diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml new file mode 100644 index 00000000..972401f6 --- /dev/null +++ b/.github/FUNDING.yml @@ -0,0 +1,4 @@ +# Enable GitHub funding + +github: [kube-vip] + diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 00000000..c25ee7a4 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,14 @@ +version: 2 +updates: + - package-ecosystem: github-actions + directory: / + schedule: + interval: weekly + - package-ecosystem: gomod + directory: / + schedule: + interval: weekly + - package-ecosystem: docker + directory: / + schedule: + interval: weekly diff --git a/.github/workflows/anchore-syft.yml b/.github/workflows/anchore-syft.yml new file mode 100644 index 00000000..b820853a --- /dev/null +++ b/.github/workflows/anchore-syft.yml @@ -0,0 +1,31 @@ +# This workflow uses actions that are not certified by GitHub. +# They are provided by a third-party and are governed by +# separate terms of service, privacy policy, and support +# documentation. + +# This workflow checks out code, builds an image, performs a container image +# scan with Anchore's Syft tool, and uploads the results to the GitHub Dependency +# submission API. + +# For more information on the Anchore sbom-action usage +# and parameters, see https://github.com/anchore/sbom-action. For more +# information about the Anchore SBOM tool, Syft, see +# https://github.com/anchore/syft +name: Anchore Syft SBOM scan + +on: + release: + types: [published] + +jobs: + sbom: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + ref: ${{ github.ref_name }} + - name: Anchore SBOM Action + uses: anchore/sbom-action@v0.15.0 + with: + format: cyclonedx-json diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index a426b169..8dc1d49e 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -2,28 +2,92 @@ name: For each commit and PR on: push: pull_request: - +env: + GO_VERSION: "1.20" jobs: validation: runs-on: ubuntu-latest - env: - CGO_ENABLED: 0 + name: Checks and linters + steps: + - name: Init + run: sudo apt-get update && sudo apt-get install -y build-essential golint + - name: Install golangci-lint + run: curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.53.3 + - name: Checkout code + uses: actions/checkout@v4 + - name: Set up Go ${{ env.GO_VERSION }} + uses: actions/setup-go@v4 + with: + go-version: ${{ env.GO_VERSION }} + - name: All checks + run: make check + unit-tests: + runs-on: ubuntu-latest + name: Unit tests + steps: + - name: Checkout code + uses: actions/checkout@v4 + - name: Set up Go ${{ env.GO_VERSION }} + uses: actions/setup-go@v4 + with: + go-version: ${{ env.GO_VERSION }} + - name: Run tests + run: make unit-tests + integration-tests: + name: Integration tests + runs-on: ubuntu-latest steps: - - name: Init - run: sudo apt-get update && sudo apt-get install -y build-essential golint - name: Checkout code - uses: actions/checkout@v2 - - name: Install Go - uses: actions/setup-go@v2 + uses: actions/checkout@v4 + - name: Set up Go ${{ env.GO_VERSION }} + uses: actions/setup-go@v4 with: - go-version: '1.16' - - name: Install golangci-lint - run: curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.42.1 - - name: checks - run: make check - - name: test docker build - run: make dockerx86Action - - name: Manifest generate - run: ./testing/testing.sh - - name: e2e tests - run: DOCKERTAG=action make e2e-tests + go-version: ${{ env.GO_VERSION }} + - name: Run tests + run: make integration-tests + e2e-tests: + runs-on: ubuntu-latest + name: E2E ARP tests + steps: + - name: Checkout code + uses: actions/checkout@v4 + - name: Set up Go ${{ env.GO_VERSION }} + uses: actions/setup-go@v4 + with: + go-version: ${{ env.GO_VERSION }} + - name: Build image locally + run: make dockerx86Local + - name: Run tests + run: make e2e-tests + service-e2e-tests: + runs-on: ubuntu-latest + name: E2E service tests + steps: + - name: Checkout code + uses: actions/checkout@v4 + - name: Set up Go ${{ env.GO_VERSION }} + uses: actions/setup-go@v4 + with: + go-version: ${{ env.GO_VERSION }} + - name: Build image with iptables + run: make dockerx86ActionIPTables + - name: Run tests + run: DOCKERTAG=action make service-tests + image-vul-check: + runs-on: ubuntu-latest + name: Image vulnerability scan + steps: + - name: Checkout code + uses: actions/checkout@v4 + - name: Build image with iptables + run: make dockerx86ActionIPTables + - name: Run Trivy vulnerability scanner + uses: aquasecurity/trivy-action@master + with: + image-ref: 'plndr/kube-vip:action' + format: 'table' + exit-code: '1' + ignore-unfixed: true + vuln-type: 'os,library' + severity: 'CRITICAL,HIGH' + diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml new file mode 100644 index 00000000..f640e08d --- /dev/null +++ b/.github/workflows/codeql-analysis.yml @@ -0,0 +1,70 @@ +# For most projects, this workflow file will not need changing; you simply need +# to commit it to your repository. +# +# You may wish to alter this file to override the set of languages analyzed, +# or to provide custom queries or build logic. +# +# ******** NOTE ******** +# We have attempted to detect the languages in your repository. Please check +# the `language` matrix defined below to confirm you have the correct set of +# supported CodeQL languages. +# +name: "CodeQL" + +on: + push: + branches: [ main ] + pull_request: + # The branches below must be a subset of the branches above + branches: [ main ] + schedule: + - cron: '17 10 * * 6' + +jobs: + analyze: + name: Analyze + runs-on: ubuntu-latest + permissions: + actions: read + contents: read + security-events: write + + strategy: + fail-fast: false + matrix: + language: [ 'go' ] + # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ] + # Learn more about CodeQL language support at https://git.io/codeql-language-support + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v2 + with: + languages: ${{ matrix.language }} + # If you wish to specify custom queries, you can do so here or in a config file. + # By default, queries listed here will override any specified in a config file. + # Prefix the list here with "+" to use these queries and those in the config file. + # queries: ./path/to/local/query, your-org/your-repo/queries@main + + # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). + # If this step fails, then you should remove it and run the build manually (see below) + - name: Autobuild + uses: github/codeql-action/autobuild@v2 + + # ℹ️ Command-line programs to run using the OS shell. + # πŸ“š https://git.io/JvXDl + + # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines + # and modify them (or add more) to build your code if your project + # uses a compiled language + + #- run: | + # make bootstrap + # make release + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v2 diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml index 03891912..e6558d22 100644 --- a/.github/workflows/main.yaml +++ b/.github/workflows/main.yaml @@ -1,43 +1,52 @@ -name: Publish the latest dev image +name: Build and publish main image regularly on: - push: - branches: - - 'master' + schedule: + - cron: '25 0 * * *' + workflow_dispatch: + jobs: - docker: + nightly_build: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v2 - - - name: Prepare Names - id: prep - run: | - DOCKER_IMAGE=plndr/kube-vip - VERSION=$(echo ${GITHUB_SHA} | cut -c1-8) - TAGS="${DOCKER_IMAGE}:${VERSION}" - TAGS="$TAGS,${DOCKER_IMAGE}:nightly" - echo ::set-output name=tags::${TAGS} - + uses: actions/checkout@v4 - name: Set up QEMU - uses: docker/setup-qemu-action@v1 + uses: docker/setup-qemu-action@v3 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1 + uses: docker/setup-buildx-action@v3 - name: Login to DockerHub - uses: docker/login-action@v1 + uses: docker/login-action@v3 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - - - name: Build and push main branch - uses: docker/build-push-action@v2 + - name: Login to Github Packages + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - name: Build standard version + id: docker_build + uses: docker/build-push-action@v5 with: context: . - platforms: linux/amd64,linux/arm/v7,linux/arm64,linux/ppc64le + platforms: linux/amd64,linux/arm/v7,linux/arm64,linux/ppc64le,linux/s390x push: ${{ github.event_name != 'pull_request' }} - tags: ${{ steps.prep.outputs.tags }} - + tags: >- + plndr/kube-vip:${{ github.ref_name }}, + ghcr.io/kube-vip/kube-vip:${{ github.ref_name }} + - name: Build iptables version + id: docker_build_iptables + uses: docker/build-push-action@v5 + with: + context: . + file: Dockerfile_iptables + platforms: linux/amd64,linux/arm/v7,linux/arm64,linux/ppc64le,linux/s390x + push: ${{ github.event_name != 'pull_request' }} + tags: >- + plndr/kube-vip-iptables:${{ github.ref_name }}, + ghcr.io/kube-vip/kube-vip-iptables:${{ github.ref_name }} - name: Image digest - run: echo ${{ steps.docker_build.outputs.digest }} \ No newline at end of file + run: echo ${{ steps.docker_build.outputs.digest }} diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index bc5ecf40..6a6c0e46 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -1,48 +1,56 @@ -name: Publish Releases to Docker Hub +name: Publish Releases to Docker Hub and GitHub Container Registry on: push: tags: - '*' + workflow_dispatch: + jobs: docker: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v2 - - - name: Prepare Names - id: prep - run: | - DOCKER_IMAGE=plndr/kube-vip - VERSION=${GITHUB_REF#refs/tags/} - TAGS="${DOCKER_IMAGE}:${VERSION},ghcr.io/kube-vip/kube-vip:${VERSION}" - TAGS="$TAGS,${DOCKER_IMAGE}:latest,ghcr.io/kube-vip/kube-vip:latest" - echo ::set-output name=tags::${TAGS} - + uses: actions/checkout@v4 - name: Set up QEMU - uses: docker/setup-qemu-action@v1 + uses: docker/setup-qemu-action@v3 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1 + uses: docker/setup-buildx-action@v3 - name: Login to DockerHub - uses: docker/login-action@v1 + uses: docker/login-action@v3 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Login to Github Packages - uses: docker/login-action@v1 + uses: docker/login-action@v3 with: registry: ghcr.io username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - - - name: Build and push main branch - uses: docker/build-push-action@v2 + - name: Build and push main branch + id: docker_build + uses: docker/build-push-action@v5 with: context: . - platforms: linux/amd64,linux/arm/v7,linux/arm64,linux/ppc64le + platforms: linux/amd64,linux/arm/v7,linux/arm64,linux/ppc64le,linux/s390x push: ${{ github.event_name != 'pull_request' }} - tags: ${{ steps.prep.outputs.tags }} - + tags: >- + plndr/kube-vip:${{ github.ref_name }}, + plndr/kube-vip:latest, + ghcr.io/kube-vip/kube-vip:${{ github.ref_name }}, + ghcr.io/kube-vip/kube-vip:latest + - name: Build iptables version and push main branch + id: docker_build_iptables + uses: docker/build-push-action@v5 + with: + context: . + file: Dockerfile_iptables + platforms: linux/amd64,linux/arm/v7,linux/arm64,linux/ppc64le,linux/s390x + push: ${{ github.event_name != 'pull_request' }} + tags: >- + plndr/kube-vip-iptables:${{ github.ref_name }}, + plndr/kube-vip-iptables:latest, + ghcr.io/kube-vip/kube-vip-iptables:${{ github.ref_name }}, + ghcr.io/kube-vip/kube-vip-iptables:latest - name: Image digest run: echo ${{ steps.docker_build.outputs.digest }} diff --git a/.gitignore b/.gitignore index 258fffdc..4d94032f 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,7 @@ -./idea/ \ No newline at end of file +.idea +kube-vip +.vscode +bin +testing/e2e/etcd/certs +pkg/etcd/etcd.pid +pkg/etcd/etcd-data diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index cdd706d7..de6d2b63 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -154,7 +154,7 @@ the `$GOPATH`. To develop locally, you can follow these steps: - 1. [Install Go 1.15](https://golang.org/doc/install) + 1. [Install Go 1.19](https://golang.org/doc/install) 2. Checkout your feature branch and `cd` into it. 3. To build all Go files and install them under `bin`, run `make bin` 4. To run all Go unit tests, run `make test-unit` diff --git a/Dockerfile b/Dockerfile index f1cd8beb..a5e7de92 100644 --- a/Dockerfile +++ b/Dockerfile @@ -5,6 +5,12 @@ FROM --platform=$TARGETPLATFORM gcr.io/spectro-images-public/golang:${BUILDER_GO # FIPS ARG CRYPTO_LIB +ARG BUILDER_GOLANG_VERSION +# First stage: build the executable. +FROM --platform=$TARGETPLATFORM gcr.io/spectro-images-public/golang:${BUILDER_GOLANG_VERSION}-alpine as dev +# FIPS +ARG CRYPTO_LIB + RUN apk add --no-cache git ca-certificates make gcc g++ RUN adduser -D appuser COPY . /src/ diff --git a/Dockerfile_iptables b/Dockerfile_iptables new file mode 100644 index 00000000..10935230 --- /dev/null +++ b/Dockerfile_iptables @@ -0,0 +1,21 @@ +# syntax=docker/dockerfile:experimental + +FROM golang:1.21.4-alpine3.18 as dev +RUN apk add --no-cache git make +RUN adduser -D appuser +COPY . /src/ +WORKDIR /src + +ENV GO111MODULE=on +RUN --mount=type=cache,sharing=locked,id=gomod,target=/go/pkg/mod/cache \ + --mount=type=cache,sharing=locked,id=goroot,target=/root/.cache/go-build \ + CGO_ENABLED=0 GOOS=linux make build + +FROM alpine:3.18.4 +# Update pkgs and add iptables +RUN apk upgrade && \ + apk add --no-cache iptables + +# Add kube-vip binary +COPY --from=dev /src/kube-vip / +ENTRYPOINT ["/kube-vip"] diff --git a/Makefile b/Makefile index c17c1312..591d98cd 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,19 @@ SHELL := /bin/sh # The name of the executable (default is current directory name) TARGET := kube-vip -.DEFAULT_GOAL: $(TARGET) +.DEFAULT_GOAL := $(TARGET) + +# Fips Flags +FIPS_ENABLE ?= "" + +BUILDER_GOLANG_VERSION ?= 1.21 +BUILD_ARGS = --build-arg CRYPTO_LIB=${FIPS_ENABLE} --build-arg BUILDER_GOLANG_VERSION=${BUILDER_GOLANG_VERSION} + +RELEASE_LOC := release +ifeq ($(FIPS_ENABLE),yes) + CGO_ENABLED := 1 + RELEASE_LOC := release-fips +endif # Fips Flags FIPS_ENABLE ?= "" @@ -17,7 +29,7 @@ ifeq ($(FIPS_ENABLE),yes) endif # These will be provided to the target -VERSION := v0.4.0 +VERSION := v0.6.4 SPECTRO_VERSION ?= 4.0.0-dev BUILD := `git rev-parse HEAD` @@ -30,7 +42,15 @@ ifeq ($(FIPS_ENABLE),yes) LDFLAGS=-ldflags "-s -w -X=main.Version=$(VERSION) -X=main.Build=$(BUILD) -linkmode=external -extldflags -static" endif DOCKERTAG ?= $(VERSION) -REPOSITORY = plndr +REPOSITORY ?= plndr + +IMAGE_NAME := kube-vip +REGISTRY ?= gcr.io/spectro-dev-public/$(USER)/${RELEASE_LOC} +IMG_TAG ?= v0.6.4-spectro-${SPECTRO_VERSION} +IMG ?= ${REGISTRY}/${IMAGE_NAME}:${IMG_TAG} + +RELEASE_REGISTRY := gcr.io/spectro-images-public/release/kube-vip +RELEASE_CONTROLLER_IMG := $(RELEASE_REGISTRY)/$(IMAGE_NAME) IMAGE_NAME := kube-vip REGISTRY ?= gcr.io/spectro-dev-public/$(USER)/${RELEASE_LOC} @@ -65,7 +85,7 @@ fmt: demo: @cd demo - @docker buildx build --platform linux/amd64,linux/arm64,linux/arm/v7,linux/ppc64le --push -t ${IMG} . + @docker buildx build --platform linux/amd64,linux/arm64,linux/arm/v7,linux/ppc64le,linux/s390x --push -t ${IMG} . @echo New Multi Architecture Docker image created @cd .. @@ -77,6 +97,11 @@ dockerx86Dev: @docker buildx build --platform linux/amd64 --push -t $(REPOSITORY)/$(TARGET):dev . @echo New single x86 Architecture Docker image created +dockerx86Iptables: + @-rm ./kube-vip + @docker buildx build --platform linux/amd64 -f ./Dockerfile_iptables --push -t $(REPOSITORY)/$(TARGET):dev . + @echo New single x86 Architecture Docker image created + dockerx86: @-rm ./kube-vip @docker buildx build --platform linux/amd64 --push -t ${IMG} . @@ -104,9 +129,14 @@ dockerx86Action: @docker buildx build --platform linux/amd64 --load -t $(REPOSITORY)/$(TARGET):action . @echo New Multi Architecture Docker image created +dockerx86ActionIPTables: + @-rm ./kube-vip + @docker buildx build --platform linux/amd64 -f ./Dockerfile_iptables --load -t $(REPOSITORY)/$(TARGET):action . + @echo New Multi Architecture Docker image created + dockerLocal: @-rm ./kube-vip - @docker buildx build --platform linux/amd64,linux/arm64,linux/arm/v7,linux/ppc64le --load -t $(REPOSITORY)/$(TARGET):$(DOCKERTAG) . + @docker buildx build --platform linux/amd64,linux/arm64,linux/arm/v7,linux/ppc64le,linux/s390x --load -t $(REPOSITORY)/$(TARGET):$(DOCKERTAG) . @echo New Multi Architecture Docker image created simplify: @@ -118,7 +148,7 @@ check: test -z $(shell gofmt -l main.go | tee /dev/stderr) || echo "[WARN] Fix formatting issues with 'make fmt'" golangci-lint run go vet ./... - + run: install @$(TARGET) @@ -134,5 +164,25 @@ manifests: @./kube-vip manifest daemonset --interface eth0 --vip 192.168.0.1 --bgp --leaderElection --controlplane --services --inCluster --provider-config /etc/cloud-sa/cloud-sa.json > ./docs/manifests/$(VERSION)/kube-vip-bgp-em-ds.yaml @-rm ./kube-vip +unit-tests: + go test ./... + +integration-tests: + go test -tags=integration,e2e -v ./pkg/etcd + e2e-tests: - E2E_IMAGE_PATH=$(REPOSITORY)/$(TARGET):$(DOCKERTAG) go run github.com/onsi/ginkgo/ginkgo -tags=e2e -v -p testing/e2e + E2E_IMAGE_PATH=$(REPOSITORY)/$(TARGET):$(DOCKERTAG) go run github.com/onsi/ginkgo/v2/ginkgo --tags=e2e -v -p ./testing/e2e ./testing/e2e/etcd + +service-tests: + E2E_IMAGE_PATH=$(REPOSITORY)/$(TARGET):$(DOCKERTAG) go run ./testing/e2e/services -Services + +trivy: dockerx86ActionIPTables + docker run -v /var/run/docker.sock:/var/run/docker.sock aquasec/trivy:0.47.0 \ + image \ + --format table \ + --exit-code 1 \ + --ignore-unfixed \ + --vuln-type 'os,library' \ + --severity 'CRITICAL,HIGH' \ + $(REPOSITORY)/$(TARGET):action + diff --git a/README.md b/README.md index 21f32b08..9aec8ed4 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,10 @@ # kube-vip -High Availability and Load-Balancing +High Availability and Load-Balancing -![](https://kube-vip.io/kube-vip.png) +![](https://github.com/kube-vip/kube-vip/raw/main/kube-vip.png) + +[![Build and publish main image regularly](https://github.com/kube-vip/kube-vip/actions/workflows/main.yaml/badge.svg)](https://github.com/kube-vip/kube-vip/actions/workflows/main.yaml) ## Overview Kubernetes Virtual IP and Load-Balancer for both control plane and Kubernetes services @@ -30,7 +32,7 @@ Kube-Vip was originally created to provide a HA solution for the Kubernetes cont - Service LoadBalancer address pools per namespace or global - Service LoadBalancer address via (existing network DHCP) - Service LoadBalancer address exposure to gateway via UPNP -- ... manifest generation, vendor API integrations and many nore... +- ... manifest generation, vendor API integrations and many more... ## Why? @@ -56,3 +58,13 @@ All of these would require a separate level of configuration and in some infrast ## Troubleshooting and Feedback Please raise issues on the GitHub repository and as mentioned check the documentation at [https://kube-vip.io](https://kube-vip.io/). + +## Contributing + +Thanks for taking the time to join our community and start contributing! We welcome pull requests. Feel free to dig through the [issues](https://github.com/kube-vip/kube-vip/issues) and jump in. + +:warning: This project has issue compiling on MacOS, please compile it on linux distribution + +## Star History + +[![Star History Chart](https://api.star-history.com/svg?repos=kube-vip/kube-vip&type=Date)](https://star-history.com/#kube-vip/kube-vip&Date) diff --git a/ROADMAP.md b/ROADMAP.md index d83a539e..7f934019 100644 --- a/ROADMAP.md +++ b/ROADMAP.md @@ -1,10 +1,10 @@ # Kube-Vip Roadmap -This document outlines the roadmap for the **kube-vip** project and only covers the technologies within this particular project, other projects that augment or provide additional functionality (such as cloud-providers) may have their own roadmaps in future. The functionality for **kube-vip** has grown either been developed organically or through real-world needs, and this is the first attempt to put into words a plan for the future of **kube-vip** and will additional evolve over time. This means that items listed or detailed here are not neccessarily set in stone and the roadmap can grow/shrink as the project matures. We definitely welcome suggestions and ideas from everyone about the roadmap and **kube-vip** features. Reach us through Issues, Slack or email @kube-vip.io. - +This document outlines the roadmap for the **kube-vip** project and only covers the technologies within this particular project, other projects that augment or provide additional functionality (such as cloud-providers) may have their own roadmaps in future. The functionality for **kube-vip** has grown either been developed organically or through real-world needs, and this is the first attempt to put into words a plan for the future of **kube-vip** and will additional evolve over time. This means that items listed or detailed here are not necessarily set in stone and the roadmap can grow/shrink as the project matures. We definitely welcome suggestions and ideas from everyone about the roadmap and **kube-vip** features. Reach us through Issues, Slack or email @kube-vip.io. + ## Release methodology -The **kube-vip** project attempts to follow a tick-tock release cycle, this typically means that one release will come **packed** with new features where the following release will come with fixes, code sanitation and performane enhancements. +The **kube-vip** project attempts to follow a tick-tock release cycle, this typically means that one release will come **packed** with new features where the following release will come with fixes, code sanitation and performance enhancements. ## Roadmap @@ -13,12 +13,12 @@ The **kube-vip** project offers two main areas of functionality: - HA Kubernetes clusters through a control-plane VIP - Kubernetes `service type:LoadBalancer` -Whilst both of these functions share underlying technologies and code they will have slightly differening roadmaps. +Whilst both of these functions share underlying technologies and code they will have slightly differing roadmaps. ### HA Kubernetes Control Plane -- **Re-implememt LoadBalancing** - due to a previous request the HTTP loadbalancing was removed leaving just HA for the control plane. This functionality will be re-implemented either through the original round-robin HTTP requests or utilising IPVS. -- **Utilise the Kubernetes API to determine additional Control Plane members** - Once a single node cluster is running **kube-vip** could use the API to determine the additional members, at this time a Cluster-API provider needs to drop a static manifest per CP node. +- **Re-implement LoadBalancing** - due to a previous request the HTTP loadbalancing was removed leaving just HA for the control plane. This functionality will be re-implemented either through the original round-robin HTTP requests or utilising IPVS. +- **Utilise the Kubernetes API to determine additional Control Plane members** - Once a single node cluster is running **kube-vip** could use the API to determine the additional members, at this time a Cluster-API provider needs to drop a static manifest per CP node. - **Re-evaluate raft** - **kube-vip** is mainly designed to run within a Kubernetes cluster, however it's original design was a raft cluster external to Kubernetes. Unfortunately given some of the upgrade paths identified in things like CAPV moving to leaderElection within Kubernetes became a better idea. ## Kubernetes `service type:LoadBalancer` @@ -29,7 +29,7 @@ Whilst both of these functions share underlying technologies and code they will ## Global **Kube-Vip** items - **Improved metrics** - At this time the scaffolding for monitoring exists, however this needs drastically extending to provide greater observability to what is happening within **kube-vip** -- **Windows support** - The Go SDK didn't support the capability for low-levels sockets for ARP originally, this should be revisted. +- **Windows support** - The Go SDK didn't support the capability for low-levels sockets for ARP originally, this should be revisited. - **Additional BGP features** : - Communities - BFD diff --git a/cmd/kube-vip-config.go b/cmd/kube-vip-config.go deleted file mode 100644 index d3f4ab8d..00000000 --- a/cmd/kube-vip-config.go +++ /dev/null @@ -1,146 +0,0 @@ -package cmd - -import ( - "fmt" - - "github.com/ghodss/yaml" - "github.com/kube-vip/kube-vip/pkg/kubevip" - log "github.com/sirupsen/logrus" - "github.com/spf13/cobra" - appv1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// [sample configuration] - flags -var cliConfig kubevip.Config -var cliConfigLB kubevip.LoadBalancer -var cliLocalPeer string -var cliRemotePeers, cliBackends []string - -func init() { - kubeVipSampleConfig.Flags().StringVar(&cliConfig.Interface, "interface", "eth0", "Name of the interface to bind to") - kubeVipSampleConfig.Flags().StringVar(&cliConfig.VIP, "vip", "192.168.0.1", "The Virtual IP address") - kubeVipSampleConfig.Flags().BoolVar(&cliConfig.SingleNode, "singleNode", false, "Start this instance as a single node") - kubeVipSampleConfig.Flags().BoolVar(&cliConfig.StartAsLeader, "startAsLeader", false, "Start this instance as the cluster leader") - kubeVipSampleConfig.Flags().BoolVar(&cliConfig.EnableARP, "arp", true, "Use ARP broadcasts to improve VIP re-allocations") - kubeVipSampleConfig.Flags().StringVar(&cliLocalPeer, "localPeer", "server1:192.168.0.1:10000", "Settings for this peer, format: id:address:port") - kubeVipSampleConfig.Flags().StringSliceVar(&cliRemotePeers, "remotePeers", []string{"server2:192.168.0.2:10000", "server3:192.168.0.3:10000"}, "Comma separated remotePeers, format: id:address:port") - // Load Balancer flags - kubeVipSampleConfig.Flags().BoolVar(&cliConfigLB.BindToVip, "lbBindToVip", false, "Bind example load balancer to VIP") - kubeVipSampleConfig.Flags().StringVar(&cliConfigLB.Type, "lbType", "tcp", "Type of load balancer instance (TCP/HTTP)") - kubeVipSampleConfig.Flags().StringVar(&cliConfigLB.Name, "lbName", "Example Load Balancer", "The name of a load balancer instance") - kubeVipSampleConfig.Flags().IntVar(&cliConfigLB.Port, "lbPort", 8080, "Port that load balancer will expose on") - kubeVipSampleConfig.Flags().StringSliceVar(&cliBackends, "lbBackends", []string{"192.168.0.1:8080", "192.168.0.2:8080"}, "Comma separated backends, format: address:port") -} - -var kubeVipSampleConfig = &cobra.Command{ - Use: "config", - Short: "Generate a Sample configuration", - Run: func(cmd *cobra.Command, args []string) { - - // // Parse localPeer - // p, err := kubevip.ParsePeerConfig(cliLocalPeer) - // if err != nil { - // cmd.Help() - // log.Fatalln(err) - // } - // cliConfig.LocalPeer = *p - - // // Parse remotePeers - // //Iterate backends - // for i := range cliRemotePeers { - // p, err := kubevip.ParsePeerConfig(cliRemotePeers[i]) - // if err != nil { - // cmd.Help() - // log.Fatalln(err) - // } - // cliConfig.RemotePeers = append(cliConfig.RemotePeers, *p) - // } - - // //Iterate backends - // for i := range cliBackends { - // b, err := kubevip.ParseBackendConfig(cliBackends[i]) - // if err != nil { - // cmd.Help() - // log.Fatalln(err) - // } - // cliConfigLB.Backends = append(cliConfigLB.Backends, *b) - // } - - // Add the basic Load-Balancer to the configuration - cliConfig.LoadBalancers = append(cliConfig.LoadBalancers, cliConfigLB) - - err := cliConfig.ParseFlags(cliLocalPeer, cliRemotePeers, cliBackends) - if err != nil { - _ = cmd.Help() - log.Fatalln(err) - } - - err = kubevip.ParseEnvironment(&cliConfig) - if err != nil { - _ = cmd.Help() - log.Fatalln(err) - } - - cliConfig.PrintConfig() - }, -} - -var kubeVipSampleManifest = &cobra.Command{ - Use: "manifest", - Short: "Generate a Sample kubernetes manifest", - Run: func(cmd *cobra.Command, args []string) { - // Generate the sample manifest specification - p := &appv1.Pod{ - TypeMeta: metav1.TypeMeta{ - Kind: "Pod", - APIVersion: "v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "kube-vip", - Namespace: "kube-system", - }, - Spec: appv1.PodSpec{ - Containers: []appv1.Container{ - { - Name: "kube-vip", - Image: fmt.Sprintf("ghcr.io/kube-vip/kube-vip:%s", Release.Version), - SecurityContext: &appv1.SecurityContext{ - Capabilities: &appv1.Capabilities{ - Add: []appv1.Capability{ - "NET_ADMIN", - "SYS_TIME", - }, - }, - }, - Args: []string{ - "start", - "-c", - "/etc/kube-vip/config.yaml", - }, - VolumeMounts: []appv1.VolumeMount{ - { - Name: "config", - MountPath: "/etc/kube-vip/", - }, - }, - }, - }, - Volumes: []appv1.Volume{ - { - Name: "config", - VolumeSource: appv1.VolumeSource{ - HostPath: &appv1.HostPathVolumeSource{ - Path: "/etc/kube-vip/", - }, - }, - }, - }, - HostNetwork: true, - }, - } - - b, _ := yaml.Marshal(p) - fmt.Print(string(b)) - }, -} diff --git a/cmd/kube-vip-kubeadm.go b/cmd/kube-vip-kubeadm.go index abf322a2..0646e097 100644 --- a/cmd/kube-vip-kubeadm.go +++ b/cmd/kube-vip-kubeadm.go @@ -1,16 +1,12 @@ package cmd import ( - "context" "fmt" "os" - "github.com/kube-vip/kube-vip/pkg/k8s" "github.com/kube-vip/kube-vip/pkg/kubevip" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // kubeadm adds two subcommands for managing a vip during a kubeadm init/join @@ -91,39 +87,6 @@ var kubeKubeadmJoin = &cobra.Command{ log.Fatalf("Unable to find file [%s]", kubeConfigPath) } - // We will use kubeconfig in order to find all the master nodes - // use the current context in kubeconfig - clientset, err := k8s.NewClientset(kubeConfigPath, false, "") - if err != nil { - log.Fatal(err.Error()) - } - - opts := metav1.ListOptions{} - opts.LabelSelector = "node-role.kubernetes.io/master" - nodes, err := clientset.CoreV1().Nodes().List(context.TODO(), opts) - if err != nil { - log.Fatal(err.Error()) - } - // Iterate over all nodes that are masters and find the details to build a peer list - for x := range nodes.Items { - // Get hostname and address - var nodeAddress, nodeHostname string - for y := range nodes.Items[x].Status.Addresses { - switch nodes.Items[x].Status.Addresses[y].Type { - case corev1.NodeHostName: - nodeHostname = nodes.Items[x].Status.Addresses[y].Address - case corev1.NodeInternalIP: - nodeAddress = nodes.Items[x].Status.Addresses[y].Address - } - } - - newPeer, err := kubevip.ParsePeerConfig(fmt.Sprintf("%s:%s:%d", nodeHostname, nodeAddress, 10000)) - if err != nil { - panic(err.Error()) - } - initConfig.RemotePeers = append(initConfig.RemotePeers, *newPeer) - - } // Generate manifest and print cfg := kubevip.GeneratePodManifestFromConfig(&initConfig, Release.Version, inCluster) fmt.Println(cfg) diff --git a/cmd/kube-vip-manifests.go b/cmd/kube-vip-manifests.go index ec01e096..703d6f51 100644 --- a/cmd/kube-vip-manifests.go +++ b/cmd/kube-vip-manifests.go @@ -13,7 +13,7 @@ import ( // - Pod spec manifest, mainly used for a static pod (kubeadm) // - Daemonset manifest, mainly used to run kube-vip as a deamonset within Kubernetes (k3s/rke) -//var inCluster bool +// var inCluster bool var taint bool func init() { @@ -46,7 +46,7 @@ var kubeManifestPod = &cobra.Command{ } // The control plane has a requirement for a VIP being specified - if initConfig.EnableControlPane && (initConfig.VIP == "" && initConfig.Address == "" && !initConfig.DDNS) { + if initConfig.EnableControlPlane && (initConfig.VIP == "" && initConfig.Address == "" && !initConfig.DDNS) { _ = cmd.Help() log.Fatalln("No address is specified for kube-vip to expose services on") } @@ -71,11 +71,11 @@ var kubeManifestDaemon = &cobra.Command{ // TODO - check for certain things VIP/interfaces // The control plane has a requirement for a VIP being specified - if initConfig.EnableControlPane && (initConfig.VIP == "" && initConfig.Address == "" && !initConfig.DDNS) { + if initConfig.EnableControlPlane && (initConfig.VIP == "" && initConfig.Address == "" && !initConfig.DDNS) { _ = cmd.Help() log.Fatalln("No address is specified for kube-vip to expose services on") } - cfg := kubevip.GenerateDeamonsetManifestFromConfig(&initConfig, Release.Version, inCluster, taint) + cfg := kubevip.GenerateDaemonsetManifestFromConfig(&initConfig, Release.Version, inCluster, taint) fmt.Println(cfg) }, diff --git a/cmd/kube-vip-start.go b/cmd/kube-vip-start.go index 1a17ce2d..51f34e7f 100644 --- a/cmd/kube-vip-start.go +++ b/cmd/kube-vip-start.go @@ -12,7 +12,6 @@ import ( var startConfig kubevip.Config var startConfigLB kubevip.LoadBalancer var startLocalPeer, startKubeConfigPath string -var startRemotePeers, startBackends []string var inCluster bool func init() { @@ -30,14 +29,13 @@ func init() { kubeVipStart.Flags().BoolVar(&startConfig.StartAsLeader, "startAsLeader", false, "Start this instance as the cluster leader") kubeVipStart.Flags().BoolVar(&startConfig.EnableARP, "arp", false, "Use ARP broadcasts to improve VIP re-allocations") kubeVipStart.Flags().StringVar(&startLocalPeer, "localPeer", "server1:192.168.0.1:10000", "Settings for this peer, format: id:address:port") - kubeVipStart.Flags().StringSliceVar(&startRemotePeers, "remotePeers", []string{"server2:192.168.0.2:10000", "server3:192.168.0.3:10000"}, "Comma separated remotePeers, format: id:address:port") + // Load Balancer flags kubeVipStart.Flags().BoolVar(&startConfigLB.BindToVip, "lbBindToVip", false, "Bind example load balancer to VIP") kubeVipStart.Flags().StringVar(&startConfigLB.Type, "lbType", "tcp", "Type of load balancer instance (TCP/HTTP)") kubeVipStart.Flags().StringVar(&startConfigLB.Name, "lbName", "Example Load Balancer", "The name of a load balancer instance") kubeVipStart.Flags().IntVar(&startConfigLB.Port, "lbPort", 8080, "Port that load balancer will expose on") - kubeVipStart.Flags().IntVar(&startConfigLB.BackendPort, "lbBackEndPort", 6443, "A port that all backends may be using (optional)") - kubeVipStart.Flags().StringSliceVar(&startBackends, "lbBackends", []string{"192.168.0.1:8080", "192.168.0.2:8080"}, "Comma separated backends, format: address:port") + kubeVipStart.Flags().StringVar(&startConfigLB.ForwardingMethod, "lbForwardingMethod", "local", "The forwarding method of a load balancer instance") // Cluster configuration kubeVipStart.Flags().StringVar(&startKubeConfigPath, "kubeConfig", "/etc/kubernetes/admin.conf", "The path of a kubernetes configuration file") @@ -58,20 +56,16 @@ var kubeVipStart = &cobra.Command{ // If a configuration file is loaded, then it will overwrite flags - if configPath != "" { - c, err := kubevip.OpenConfig(configPath) - if err != nil { - log.Fatalf("%v", err) - } - startConfig = *c - } - // parse environment variables, these will overwrite anything loaded or flags err = kubevip.ParseEnvironment(&startConfig) if err != nil { log.Fatalln(err) } + if startConfig.LeaderElectionType == "etcd" { + log.Fatalln("Leader election with etcd not supported in start command, use manager") + } + newCluster, err := cluster.InitCluster(&startConfig, disableVIP) if err != nil { log.Fatalf("%v", err) @@ -96,7 +90,7 @@ var kubeVipStart = &cobra.Command{ if startConfig.EnableBGP { log.Info("Starting the BGP server to advertise VIP routes to VGP peers") - bgpServer, err = bgp.NewBGPServer(&startConfig.BGPConfig) + bgpServer, err = bgp.NewBGPServer(&startConfig.BGPConfig, nil) if err != nil { log.Fatalf("%v", err) } @@ -116,6 +110,5 @@ var kubeVipStart = &cobra.Command{ } } } - }, } diff --git a/cmd/kube-vip.go b/cmd/kube-vip.go index 0013344a..ad363518 100644 --- a/cmd/kube-vip.go +++ b/cmd/kube-vip.go @@ -5,16 +5,19 @@ import ( "fmt" "net/http" "os" + "strings" "time" - "github.com/kube-vip/kube-vip/pkg/kubevip" - "github.com/kube-vip/kube-vip/pkg/manager" - "github.com/kube-vip/kube-vip/pkg/packet" - "github.com/kube-vip/kube-vip/pkg/vip" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" + "github.com/vishvananda/netlink" + + "github.com/kube-vip/kube-vip/pkg/equinixmetal" + "github.com/kube-vip/kube-vip/pkg/kubevip" + "github.com/kube-vip/kube-vip/pkg/manager" + "github.com/kube-vip/kube-vip/pkg/vip" ) // Path to the configuration file @@ -35,7 +38,7 @@ var disableVIP bool // ConfigMap name within a Kubernetes cluster var configMap string -// Configure the level of loggin +// Configure the level of logging var logLevel uint32 // Provider Config @@ -48,8 +51,10 @@ var Release struct { } // Structs used via the various subcommands -var initConfig kubevip.Config -var initLoadBalancer kubevip.LoadBalancer +var ( + initConfig kubevip.Config + initLoadBalancer kubevip.LoadBalancer +) // Points to a kubernetes configuration file var kubeConfigPath string @@ -60,28 +65,35 @@ var kubeVipCmd = &cobra.Command{ } func init() { - // Basic flags kubeVipCmd.PersistentFlags().StringVar(&initConfig.Interface, "interface", "", "Name of the interface to bind to") kubeVipCmd.PersistentFlags().StringVar(&initConfig.ServicesInterface, "serviceInterface", "", "Name of the interface to bind to (for services)") kubeVipCmd.PersistentFlags().StringVar(&initConfig.VIP, "vip", "", "The Virtual IP address") + kubeVipCmd.PersistentFlags().StringVar(&initConfig.VIPSubnet, "vipSubnet", "", "The Virtual IP address subnet e.g. /32 /24 /8 etc..") + + kubeVipCmd.PersistentFlags().StringVar(&initConfig.VIPCIDR, "cidr", "32", "The CIDR range for the virtual IP address") // todo: deprecate + kubeVipCmd.PersistentFlags().StringVar(&initConfig.Address, "address", "", "an address (IP or DNS name) to use as a VIP") kubeVipCmd.PersistentFlags().IntVar(&initConfig.Port, "port", 6443, "Port for the VIP") - kubeVipCmd.PersistentFlags().BoolVar(&initConfig.EnableARP, "arp", false, "Enable Arp for Vip changes") + kubeVipCmd.PersistentFlags().BoolVar(&initConfig.EnableARP, "arp", false, "Enable Arp for VIP changes") + kubeVipCmd.PersistentFlags().BoolVar(&initConfig.EnableWireguard, "wireguard", false, "Enable Wireguard for services VIPs") + kubeVipCmd.PersistentFlags().BoolVar(&initConfig.EnableRoutingTable, "table", false, "Enable Routing Table for services VIPs") // LoadBalancer flags kubeVipCmd.PersistentFlags().BoolVar(&initConfig.EnableLoadBalancer, "enableLoadBalancer", false, "enable loadbalancing on the VIP with IPVS") kubeVipCmd.PersistentFlags().IntVar(&initConfig.LoadBalancerPort, "lbPort", 6443, "loadbalancer port for the VIP") - + kubeVipCmd.PersistentFlags().StringVar(&initConfig.LoadBalancerForwardingMethod, "lbForwardingMethod", "local", "loadbalancer forwarding method") kubeVipCmd.PersistentFlags().BoolVar(&initConfig.DDNS, "ddns", false, "use Dynamic DNS + DHCP to allocate VIP for address") // Clustering type (leaderElection) kubeVipCmd.PersistentFlags().BoolVar(&initConfig.EnableLeaderElection, "leaderElection", false, "Use the Kubernetes leader election mechanism for clustering") + kubeVipCmd.PersistentFlags().StringVar(&initConfig.LeaderElectionType, "leaderElectionType", "kubernetes", "Defines the backend to run the leader election: kubernetes or etcd. Defaults to kubernetes.") + kubeVipCmd.PersistentFlags().StringVar(&initConfig.LeaseName, "leaseName", "plndr-cp-lock", "Name of the lease that is used for leader election") kubeVipCmd.PersistentFlags().IntVar(&initConfig.LeaseDuration, "leaseDuration", 5, "Length of time a Kubernetes leader lease can be held for") kubeVipCmd.PersistentFlags().IntVar(&initConfig.RenewDeadline, "leaseRenewDuration", 3, "Length of time a Kubernetes leader can attempt to renew its lease") kubeVipCmd.PersistentFlags().IntVar(&initConfig.RetryPeriod, "leaseRetry", 1, "Number of times the host will retry to hold a lease") - // Packet flags + // Equinix Metal flags kubeVipCmd.PersistentFlags().BoolVar(&initConfig.EnableMetal, "metal", false, "This will use the Equinix Metal API (requires the token ENV) to update the EIP <-> VIP") kubeVipCmd.PersistentFlags().StringVar(&initConfig.MetalAPIKey, "metalKey", "", "The API token for authenticating with the Equinix Metal API") kubeVipCmd.PersistentFlags().StringVar(&initConfig.MetalProject, "metalProject", "", "The name of project already created within Equinix Metal") @@ -89,7 +101,6 @@ func init() { kubeVipCmd.PersistentFlags().StringVar(&initConfig.ProviderConfig, "provider-config", "", "The path to a provider configuration") // BGP flags - kubeVipCmd.PersistentFlags().StringVar(&initConfig.VIPCIDR, "cidr", "32", "The CIDR range for the virtual IP address") kubeVipCmd.PersistentFlags().BoolVar(&initConfig.EnableBGP, "bgp", false, "This will enable BGP support within kube-vip") kubeVipCmd.PersistentFlags().StringVar(&initConfig.BGPConfig.RouterID, "bgpRouterID", "", "The routerID for the bgp server") kubeVipCmd.PersistentFlags().StringVar(&initConfig.BGPConfig.SourceIF, "sourceIF", "", "The source interface for bgp peering (not to be used with sourceIP)") @@ -102,8 +113,8 @@ func init() { kubeVipCmd.PersistentFlags().StringSliceVar(&initConfig.BGPPeers, "bgppeers", []string{}, "Comma separated BGP Peer, format: address:as:password:multihop") kubeVipCmd.PersistentFlags().StringVar(&initConfig.Annotations, "annotations", "", "Set Node annotations prefix for parsing") - // Control plane specific flags - kubeVipCmd.PersistentFlags().StringVarP(&initConfig.Namespace, "namespace", "n", "kube-system", "The configuration map defined within the cluster") + // Namespace for kube-vip + kubeVipCmd.PersistentFlags().StringVarP(&initConfig.Namespace, "namespace", "n", "kube-system", "The namespace for the configmap defined within the cluster") // Manage logging kubeVipCmd.PersistentFlags().Uint32Var(&logLevel, "log", 4, "Set the level of logging") @@ -111,12 +122,30 @@ func init() { // Service flags kubeVipService.Flags().StringVarP(&configMap, "configMap", "c", "plndr", "The configuration map defined within the cluster") + // Routing Table flags + kubeVipCmd.PersistentFlags().IntVar(&initConfig.RoutingTableID, "tableID", 198, "The routing table used for all table entries") + kubeVipCmd.PersistentFlags().IntVar(&initConfig.RoutingTableType, "tableType", 0, "The type of route that will be added to the routing table") + // Behaviour flags - kubeVipCmd.PersistentFlags().BoolVar(&initConfig.EnableControlPane, "controlplane", false, "Enable HA for control plane, hybrid mode") - kubeVipCmd.PersistentFlags().BoolVar(&initConfig.EnableServices, "services", false, "Enable Kubernetes services, hybrid mode") + kubeVipCmd.PersistentFlags().BoolVar(&initConfig.EnableControlPlane, "controlplane", false, "Enable HA for control plane") + kubeVipCmd.PersistentFlags().BoolVar(&initConfig.EnableServices, "services", false, "Enable Kubernetes services") + + // Extended behaviour flags + kubeVipCmd.PersistentFlags().BoolVar(&initConfig.EnableServicesElection, "servicesElection", false, "Enable leader election per kubernetes service") + kubeVipCmd.PersistentFlags().BoolVar(&initConfig.LoadBalancerClassOnly, "lbClassOnly", false, "Enable load balancing only for services with LoadBalancerClass \"kube-vip.io/kube-vip-class\"") + kubeVipCmd.PersistentFlags().StringVar(&initConfig.LoadBalancerClassName, "lbClassName", "kube-vip.io/kube-vip-class", "Name of load balancer class for kube-VIP, defaults to \"kube-vip.io/kube-vip-class\"") + kubeVipCmd.PersistentFlags().BoolVar(&initConfig.EnableServiceSecurity, "onlyAllowTrafficServicePorts", false, "Only allow traffic to service ports, others will be dropped, defaults to false") + kubeVipCmd.PersistentFlags().BoolVar(&initConfig.EnableNodeLabeling, "enableNodeLabeling", false, "Enable leader node labeling with \"kube-vip.io/has-ip=\", defaults to false") + kubeVipCmd.PersistentFlags().StringVar(&initConfig.ServicesLeaseName, "servicesLeaseName", "plndr-svcs-lock", "Name of the lease that is used for leader election for services (in arp mode)") // Prometheus HTTP Server - kubeVipCmd.PersistentFlags().StringVar(&initConfig.PrometheusHTTPServer, "promethuesHTTPServer", ":2112", "Host and port used to expose Prometheus metrics via an HTTP server") + kubeVipCmd.PersistentFlags().StringVar(&initConfig.PrometheusHTTPServer, "prometheusHTTPServer", ":2112", "Host and port used to expose Prometheus metrics via an HTTP server") + + // Etcd + kubeVipCmd.PersistentFlags().StringVar(&initConfig.Etcd.CAFile, "etcdCACert", "", "Verify certificates of TLS-enabled secure servers using this CA bundle file") + kubeVipCmd.PersistentFlags().StringVar(&initConfig.Etcd.ClientCertFile, "etcdCert", "", "Identify secure client using this TLS certificate file") + kubeVipCmd.PersistentFlags().StringVar(&initConfig.Etcd.ClientKeyFile, "etcdKey", "", "Identify secure client using this TLS key file") + kubeVipCmd.PersistentFlags().StringSliceVar(&initConfig.Etcd.Endpoints, "etcdEndpoints", nil, "Etcd member endpoints") kubeVipCmd.AddCommand(kubeKubeadm) kubeVipCmd.AddCommand(kubeManifest) @@ -125,11 +154,6 @@ func init() { kubeVipCmd.AddCommand(kubeVipService) kubeVipCmd.AddCommand(kubeVipStart) kubeVipCmd.AddCommand(kubeVipVersion) - - // Sample commands - kubeVipSample.AddCommand(kubeVipSampleConfig) - kubeVipSample.AddCommand(kubeVipSampleManifest) - } // Execute - starts the command parsing process @@ -171,6 +195,10 @@ var kubeVipService = &cobra.Command{ log.Fatalln(err) } + if err := initConfig.CheckInterface(); err != nil { + log.Fatalln(err) + } + // User Environment variables as an option to make manifest clearer envConfigMap := os.Getenv("vip_configmap") if envConfigMap != "" { @@ -195,7 +223,6 @@ var kubeVipManager = &cobra.Command{ Use: "manager", Short: "Start the kube-vip manager", Run: func(cmd *cobra.Command, args []string) { - // parse environment variables, these will overwrite anything loaded or flags err := kubevip.ParseEnvironment(&initConfig) if err != nil { @@ -203,22 +230,95 @@ var kubeVipManager = &cobra.Command{ } // Set the logging level for all subsequent functions - log.SetLevel(log.Level(logLevel)) + log.SetLevel(log.Level(initConfig.Logging)) + + // Welome messages + log.Infof("Starting kube-vip.io [%s]", Release.Version) + log.Debugf("Build kube-vip.io [%s]", Release.Build) + + // start prometheus server + if initConfig.PrometheusHTTPServer != "" { + go servePrometheusHTTPServer(cmd.Context(), PrometheusHTTPServerConfig{ + Addr: initConfig.PrometheusHTTPServer, + }) + } - if initConfig.Interface == "" { - log.Infof("No interface is specified for VIP in config, auto-detecting default Interface") - defaultIF, err := vip.GetDefaultGatewayInterface() + // Determine the kube-vip mode + var mode string + if initConfig.EnableARP { + mode = "ARP" + } + + if initConfig.EnableBGP { + mode = "BGP" + } + + if initConfig.EnableWireguard { + mode = "Wireguard" + } + + if initConfig.EnableRoutingTable { + mode = "Routing Table" + } + + // Provide configuration to output/logging + log.Infof("namespace [%s], Mode: [%s], Features(s): Control Plane:[%t], Services:[%t]", initConfig.Namespace, mode, initConfig.EnableControlPlane, initConfig.EnableServices) + + // End if nothing is enabled + if !initConfig.EnableServices && !initConfig.EnableControlPlane { + log.Fatalln("no features are enabled") + } + + // If we're using wireguard then all traffic goes through the wg0 interface + if initConfig.EnableWireguard { + if initConfig.Interface == "" { + // Set the vip interface to the wireguard interface + initConfig.Interface = "wg0" + } + + log.Infof("configuring Wireguard networking") + l, err := netlink.LinkByName(initConfig.Interface) if err != nil { - _ = cmd.Help() - log.Fatalf("unable to detect default interface -> [%v]", err) + if strings.Contains(err.Error(), "Link not found") { + log.Warnf("interface \"%s\" doesn't exist, attempting to create wireguard interface", initConfig.Interface) + err = netlink.LinkAdd(&netlink.Wireguard{LinkAttrs: netlink.LinkAttrs{Name: initConfig.Interface}}) + if err != nil { + log.Fatalln(err) + } + l, err = netlink.LinkByName(initConfig.Interface) + if err != nil { + log.Fatalln(err) + } + } + } + err = netlink.LinkSetUp(l) + if err != nil { + log.Fatalln(err) } - initConfig.Interface = defaultIF.Name - log.Infof("kube-vip will bind to interface [%s]", initConfig.Interface) - } - go servePrometheusHTTPServer(cmd.Context(), PrometheusHTTPServerConfig{ - Addr: initConfig.PrometheusHTTPServer, - }) + } else { // if we're not using Wireguard then we'll need to use an actual interface + // Check if the interface needs auto-detecting + if initConfig.Interface == "" { + log.Infof("No interface is specified for VIP in config, auto-detecting default Interface") + defaultIF, err := vip.GetDefaultGatewayInterface() + if err != nil { + _ = cmd.Help() + log.Fatalf("unable to detect default interface -> [%v]", err) + } + initConfig.Interface = defaultIF.Name + log.Infof("kube-vip will bind to interface [%s]", initConfig.Interface) + + go func() { + if err := vip.MonitorDefaultInterface(context.TODO(), defaultIF); err != nil { + log.Fatalf("crash: %s", err.Error()) + } + }() + } + } + // Perform a check on th state of the interface + if err := initConfig.CheckInterface(); err != nil { + log.Fatalln(err) + } // User Environment variables as an option to make manifest clearer envConfigMap := os.Getenv("vip_configmap") @@ -226,10 +326,10 @@ var kubeVipManager = &cobra.Command{ configMap = envConfigMap } - // If Packet is enabled and there is a provider configuration passed + // If Equinix Metal is enabled and there is a provider configuration passed if initConfig.EnableMetal { if providerConfig != "" { - providerAPI, providerProject, err := packet.GetPacketConfig(providerConfig) + providerAPI, providerProject, err := equinixmetal.GetPacketConfig(providerConfig) if err != nil { log.Fatalf("%v", err) } @@ -264,10 +364,20 @@ func servePrometheusHTTPServer(ctx context.Context, config PrometheusHTTPServerC var err error mux := http.NewServeMux() mux.Handle("/metrics", promhttp.Handler()) + mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write([]byte(` + kube-vip + +

kube-vip Metrics

+

Metrics

+ + `)) + }) srv := &http.Server{ - Addr: config.Addr, - Handler: mux, + Addr: config.Addr, + Handler: mux, + ReadHeaderTimeout: 2 * time.Second, } go func() { @@ -276,11 +386,11 @@ func servePrometheusHTTPServer(ctx context.Context, config PrometheusHTTPServerC } }() - log.Printf("server started") + log.Printf("prometheus HTTP server started") <-ctx.Done() - log.Printf("server stopped") + log.Printf("prometheus HTTP server stopped") ctxShutDown, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer func() { @@ -294,5 +404,4 @@ func servePrometheusHTTPServer(ctx context.Context, config PrometheusHTTPServerC if err == http.ErrServerClosed { err = nil } - } diff --git a/demo/README.md b/demo/README.md new file mode 100644 index 00000000..10595094 --- /dev/null +++ b/demo/README.md @@ -0,0 +1,46 @@ +# Demo client-server + +This contains some example code to determine how long "failovers" are taking within kube-vip, the server component should live within the cluster and the client should be externally. + +## Deploy the server + +Simply apply the manifest to a working cluster that has kube-vip deployed: + +``` +kubectl apply -f ./demo/server/deploy.yaml +``` + +Retrieve the loadBalancer IP that is fronting the service: + +``` +kubectl get svc demo-service +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +demo-service LoadBalancer 10.104.18.147 192.168.0.217 10002:32529/UDP 117m +``` + +## Connect the client + +From elsewhere, clone the kube-vip repository and connect the client to the server endpoint (loadBalancer IP) with the following command: + +``` +go run ./demo/client/main.go -address= +``` + +You will only see output when the client has reconcilled the connection to a pod beneath the service, where it will print the timestamp to reconnection along with the time in milliseconds it took: + +``` +15:58:35.916952 3008 +15:58:45.947506 2005 +15:58:57.983151 3007 +15:59:08.013450 2005 +15:59:20.046491 3008 +15:59:30.076341 2507 +15:59:42.110747 3008 +``` + +## Kill some pods to test + +On a machine or control plane that has `kubectl` and has the credentials to speak to the cluster we will run a command to find the demo pod and kill it every 10 seconds: + +`while true ; do kubectl delete pod $(kubectl get pods | grep -v NAME | grep vip| awk '{ print $1 }'); sleep 10; done` + diff --git a/demo/client/main.go b/demo/client/main.go new file mode 100644 index 00000000..2a4e9e9c --- /dev/null +++ b/demo/client/main.go @@ -0,0 +1,71 @@ +package main + +import ( + "bufio" + "flag" + "fmt" + "net" + "time" +) + +const udpdata = "a3ViZS12aXAK=kube-vip" + +func main() { + address := flag.String("address", "127.0.0.1", "The address of the server") + port := flag.Int("port", 10002, "the port of the server") + interval := flag.Float64("interval", 1000, "Interval in milliseconds") + flag.Parse() + var errorTime time.Time + var errorOccurred bool + for { + p := make([]byte, 2048) + conn, err := net.Dial("udp", fmt.Sprintf("%s:%d", *address, *port)) + if err != nil { + if !errorOccurred { + errorTime = time.Now() + errorOccurred = true + } + continue + } + + err = conn.SetDeadline(time.Now().Add(time.Duration(*interval) * time.Millisecond)) + if err != nil { + //fmt.Printf("Connectivity error [%v]", err) + if !errorOccurred { + errorTime = time.Now() + errorOccurred = true + } + if err = conn.Close(); err != nil { + fmt.Printf("Error closing connection [%v]", err) + } + continue + } + + _, err = fmt.Fprint(conn, udpdata) + if err != nil { + fmt.Printf("Error writing data [%v]", err) + } + + _, err = bufio.NewReader(conn).Read(p) + if err != nil { + //fmt.Printf("read error %v\n", err) + if !errorOccurred { + errorTime = time.Now() + errorOccurred = true + } + if err = conn.Close(); err != nil { + fmt.Printf("Error closing connection [%v]", err) + } + continue + } + time.Sleep(time.Duration(*interval) * time.Millisecond) + if errorOccurred { + finishTime := time.Since(errorTime) + //fmt.Printf("connectivity reconciled in %dms\n", finishTime.Milliseconds()) + //t :=time.Now().Format("15:04:05.000000") + fmt.Printf("%s %d\n", time.Now().Format("15:04:05.000000"), finishTime.Milliseconds()) + + errorOccurred = false + } + } +} diff --git a/demo/Dockerfile b/demo/server/Dockerfile similarity index 90% rename from demo/Dockerfile rename to demo/server/Dockerfile index a11a5074..96e85ad6 100644 --- a/demo/Dockerfile +++ b/demo/server/Dockerfile @@ -1,6 +1,6 @@ # syntax=docker/dockerfile:experimental -FROM golang:1.13-alpine as dev +FROM golang:1.19-alpine as dev RUN apk add --no-cache git ca-certificates RUN adduser -D appuser COPY main.go /src/ @@ -13,4 +13,4 @@ RUN --mount=type=cache,sharing=locked,id=gomod,target=/go/pkg/mod/cache \ FROM scratch COPY --from=dev /src/demo / -CMD ["/demo"] \ No newline at end of file +CMD ["/demo"] diff --git a/demo/Makefile b/demo/server/Makefile similarity index 94% rename from demo/Makefile rename to demo/server/Makefile index 1804f5e8..d02d5e3d 100644 --- a/demo/Makefile +++ b/demo/server/Makefile @@ -45,7 +45,7 @@ fmt: @gofmt -l -w $(SRC) docker: - @docker buildx build --platform linux/amd64,linux/arm64,linux/arm/v7,linux/ppc64le --push -t $(REPOSITORY)/$(TARGET):$(DOCKERTAG) . + @docker buildx build --platform linux/amd64,linux/arm64,linux/arm/v7,linux/ppc64le,linux/s390x --push -t $(REPOSITORY)/$(TARGET):$(DOCKERTAG) . @echo New Multi Architecture Docker image created simplify: diff --git a/demo/deploy.yaml b/demo/server/deploy.yaml similarity index 58% rename from demo/deploy.yaml rename to demo/server/deploy.yaml index cb29e4e4..19327764 100644 --- a/demo/deploy.yaml +++ b/demo/server/deploy.yaml @@ -6,7 +6,7 @@ metadata: app: kube-vip-demo name: kube-vip-demo spec: - replicas: 3 + replicas: 1 selector: matchLabels: app: kube-vip-demo @@ -30,4 +30,25 @@ spec: ports: - containerPort: 10001 - containerPort: 10002 -status: {} \ No newline at end of file +status: {} +--- +apiVersion: v1 +kind: Service +metadata: + name: demo-service + namespace: default + labels: + app: demo-service + annotations: + kube-vip.io/egress: "true" +spec: + type: LoadBalancer + # "Local" preserves the client source IP and avoids a second hop for + # LoadBalancer and NodePort + externalTrafficPolicy: Local + ports: + - name: demo-udp + port: 10002 + protocol: UDP + selector: + app: kube-vip-demo \ No newline at end of file diff --git a/demo/go.mod b/demo/server/go.mod similarity index 100% rename from demo/go.mod rename to demo/server/go.mod diff --git a/demo/main.go b/demo/server/main.go similarity index 97% rename from demo/main.go rename to demo/server/main.go index 24aab371..89c2928d 100644 --- a/demo/main.go +++ b/demo/server/main.go @@ -60,7 +60,7 @@ func main() { fmt.Println("error: ", err) } - ServerConn.WriteTo(buf[0:n], addr) + ServerConn.WriteTo(buf[0:n]) } } } diff --git a/docs/architecture/index.md b/docs/architecture/index.md index b59e7fcf..360bfbbc 100644 --- a/docs/architecture/index.md +++ b/docs/architecture/index.md @@ -1,52 +1,52 @@ -# **kube-vip** architecture +# Kube-Vip Architecture -This section covers two parts of the architecture: +This section covers two parts of the architecture: -1. The technical capabilities of `kube-vip` -2. The components to build a load-balancing service within [Kubernetes](https://kubernetes.io) +1. The technical capabilities of `kube-vip`. +2. The components to build a load balancing service within Kubernetes. -The `kube-vip` project is designed to provide both a highly available networking endpoint and load-balancing functionality for underlying networking services. The project was originally designed for the purpose of providing a resilient control-plane for Kubernetes, it has since expanded to provide the same functionality for applications within a Kubernetes cluster. +The `kube-vip` project is designed to provide both a highly available networking endpoint and load balancing functionality for underlying networking services. The project was originally designed for the purpose of providing a resilient control plane for Kubernetes but has since expanded to provide the same functionality for Service resources within a Kubernetes cluster. -Additionally `kube-vip` is designed to be lightweight and **multi-architecture**, all of the components are built for Linux but are also built for both `x86` and `armv7`,`armhvf`,`ppc64le`. This means that `kube-vip` will run fine in **bare-metal**, **virtual** and **edge** (raspberry pi or small arm SoC devices). +Additionally, `kube-vip` is designed to be lightweight and multi-architecture. All of the components are built for Linux on `x86`, `armv7`, `armhvf`, `ppc64le` and `s390x` architectures. This means that `kube-vip` will run fine in bare metal, virtual, and edge (Raspberry Pi or small ARM SoC) use cases. ## Technologies -There are a number of technologies or functional design choices that provide high-availability or networking functions as part of a VIP/Load-balancing solution. +There are a number of technologies or functional design choices that provide high availability and networking functions as part of a VIP/load balancing solution. ### Cluster -The `kube-vip` service builds a multi-node or multi-pod cluster to provide High-Availability. In ARP mode a leader is elected, this node will inherit the Virtual IP and become the leader of the load-balancing within the cluster, whereas with BGP all nodes will advertise the VIP address. +The `kube-vip` service builds a multi-node or multi-pod cluster to provide high availability. In ARP mode, a leader is elected which will inherit the virtual IP and become the leader of the load balancing within the cluster whereas with BGP all nodes will advertise the VIP address. -When using ARP or layer2 it will use [leader election](https://godoc.org/k8s.io/client-go/tools/leaderelection) +When using ARP or [Layer 2](https://osi-model.com/data-link-layer/) it will use [leader election](https://godoc.org/k8s.io/client-go/tools/leaderelection). ### Virtual IP -The leader within the cluster will assume the **vip** and will have it bound to the selected interface that is declared within the configuration. When the leader changes it will evacuate the **vip** first or in failure scenarios the **vip** will be directly assumed by the next elected leader. +The leader within the cluster will assume the VIP and will have it bound to the selected interface that is declared within the configuration. When the leader changes, it will evacuate the VIP first or in failure scenarios the VIP will be directly assumed by the next elected leader. -When the **vip** moves from one host to another any host that has been using the **vip** will retain the previous `vip <-> MAC address` mapping until the ARP (Address resolution protocol) expires the old entry (typically 30 seconds) and retrieves a new `vip <-> MAC` mapping. This can be improved using Gratuitous ARP broadcasts (when enabled), this is detailed below. +When the VIP moves from one host to another, any host that has been using the VIP will retain the previous VIP-to-MAC address mapping until the old ARP entry expires (typically within 30 seconds) and retrieves a new mapping. This can be improved by using [Gratuitous ARP](https://wiki.wireshark.org/Gratuitous_ARP) broadcasts when enabled (detailed below). ### ARP -(Optional) The `kube-vip` can be configured to broadcast a [gratuitous arp](https://wiki.wireshark.org/Gratuitous_ARP) that will typically immediately notify all local hosts that the `vip <-> MAC` has changed. +`kube-vip` can optionally be configured to broadcast a Gratuitous ARP that will typically immediately notify all local hosts that the VIP-to-MAC address mapping has changed. -**Below** we can see that the failover is typically done within a few seconds as the ARP broadcast is recieved. +Below we can see that the failover is typically done within a few seconds as the ARP broadcast is received. ``` 64 bytes from 192.168.0.75: icmp_seq=146 ttl=64 time=0.258 ms 64 bytes from 192.168.0.75: icmp_seq=147 ttl=64 time=0.240 ms 92 bytes from 192.168.0.70: Redirect Host(New addr: 192.168.0.75) Vr HL TOS Len ID Flg off TTL Pro cks Src Dst - 4 5 00 0054 bc98 0 0000 3f 01 3d16 192.168.0.95 192.168.0.75 +4 5 00 0054 bc98 0 0000 3f 01 3d16 192.168.0.95 192.168.0.75 Request timeout for icmp_seq 148 92 bytes from 192.168.0.70: Redirect Host(New addr: 192.168.0.75) Vr HL TOS Len ID Flg off TTL Pro cks Src Dst - 4 5 00 0054 75ff 0 0000 3f 01 83af 192.168.0.95 192.168.0.75 +4 5 00 0054 75ff 0 0000 3f 01 83af 192.168.0.95 192.168.0.75 Request timeout for icmp_seq 149 92 bytes from 192.168.0.70: Redirect Host(New addr: 192.168.0.75) Vr HL TOS Len ID Flg off TTL Pro cks Src Dst - 4 5 00 0054 2890 0 0000 3f 01 d11e 192.168.0.95 192.168.0.75 +4 5 00 0054 2890 0 0000 3f 01 d11e 192.168.0.95 192.168.0.75 Request timeout for icmp_seq 150 64 bytes from 192.168.0.75: icmp_seq=151 ttl=64 time=0.245 ms @@ -54,84 +54,84 @@ Request timeout for icmp_seq 150 ### Load Balancing -Kube-Vip has the capability to provide a HA address for both the Kubernetes control plane and for a Kubernetes service, it recently implemented support for "actual" load-balancing for the control plane to distribute API requests across control-plane nodes. +`kube-vip` has the capability to provide a high availability address for both the Kubernetes control plane and for a Kubernetes Service. As of v0.4.0, `kube-vip` implements support for true load balancing for the control plane to distribute API requests across control plane nodes. -#### Kubernetes Service Load-Balancing +#### Kubernetes Service Load Balancing -The following is required in the kube-vip yaml to enable services: +The following is required in the `kube-vip` manifest to enable Service of type `LoadBalancer`: -``` - - name: svc_enable - value: "true" +```yaml +- name: svc_enable + value: "true" ``` -This section details the flow of events in order for `kube-vip` to advertise a Kubernetes service: +This section details the flow of events in order for `kube-vip` to advertise a Kubernetes Service: -1. An end user exposes a application through Kubernetes as a LoadBalancer => `kubectl expose deployment nginx-deployment --port=80 --type=LoadBalancer --name=nginx` -2. Within the Kubernetes cluster a service object is created with the `svc.Spec.Type = LoadBalancer` -3. A controller (typically a Cloud Controller) has a loop that "watches" for services of the type `LoadBalancer`. -4. The controller now has the responsibility of providing an IP address for this service along with doing anything that is network specific for the environment where the cluster is running. -5. Once the controller has an IP address it will update the service `svc.Spec.LoadBalancerIP` with it's new IP address. -6. The `kube-vip` pods also implement a "watcher" for services that have a `svc.Spec.LoadBalancerIP` address attached. -7. When a new service appears `kube-vip` will start advertising this address to the wider network (through BGP/ARP) which will allow traffic to come into the cluster and hit the service network. -8. Finally `kube-vip` will update the service status so that the API reflects that this LoadBalancer is ready. This is done by updating the `svc.Status.LoadBalancer.Ingress` with the VIP address. +1. An end user exposes an application through Kubernetes as a Service type `LoadBalancer`. For example, imperatively using `kubectl expose deployment nginx-deployment --port=80 --type=LoadBalancer --name=nginx` +2. Within the Kubernetes cluster, a Service object is created with the `spec.type` set to `LoadBalancer`. +3. A controller (typically a [Cloud Controller](/usage/cloud-provider)) has a loop that "watches" for Services of the type `LoadBalancer`. +4. The controller now has the responsibility of providing an IP address for this Service along with doing anything that is network specific for the environment where the cluster is running. +5. Once the controller has an IP address, it will update the Service field `metadata.annotations["kube-vip.io/loadbalancerIPs"]` and `spec.loadBalancerIP` with the IP address. `spec.loadBalancerIP` is deprecated in k8s 1.24, will not be updated in future release +6. `kube-vip` Pods implement a "watcher" for Services that have a `metadata.annotations["kube-vip.io/loadbalancerIPs"]` address attached. If the annotation is not presented, it will fallback to check `spec.loadBalancerIP`. +7. When a new Service appears, `kube-vip` will start advertising this address to the wider network (through BGP/ARP) which will allow traffic to come into the cluster and hit the Service network. +8. Finally, `kube-vip` will update the Service status so that the API reflects the object is ready. This is done by updating the `status.loadBalancer.ingress` with the VIP address. -#### Control Plane Load-Balancing (> 0.4) +#### Control Plane Load-Balancing -**NOTE** in it's initial release IPVS load-balancing is configured for having the VIP in the same subnet as the control-plane nodes, NAT based load-balancing will appear soon. +As of `kube-vip` v0.4.0, IPVS load balancing is configured for having the VIP in the same subnet as the control plane nodes. NAT-based load balancing will follow later. -To enable control-plane load balancing, the following is required in the kube-vip yaml to enable control plane load-balancing. +To enable control plane load balancing using IPVS, the environment variable `lb_enable` is required in the `kube-vip` manifest: +```yaml +- name : lb_enable + value: "true" ``` - - name : lb_enable - value: "true" -``` -The load balancing is provided through IPVS (IP Virtual Server) and provides a layer-4 (TCP Port) based round-robin across all of the control plane nodes. By default the load balancer will listen on the default 6443 port as the Kubernetes API server. -**Note:** The IPVS virtual server lives in kernel space and doesn't create an "actual" service that listens on port 6443, this allows the kernel to parse packets before they're sent to an actual TCP port. This is important to know because it means we don't have any port conflicts having the IPVS load-balancer listening on the same port as the API server on the same host. -The load balancer port can be customised with the following snippet in the yaml. +The load balancing is provided through IPVS (IP Virtual Server) and provides a Layer 4 (TCP-based) round-robin across all of the control plane nodes. By default, the load balancer will listen on the default port of 6443 as the Kubernetes API server. The IPVS virtual server lives in kernel space and doesn't create an "actual" service that listens on port 6443. This allows the kernel to parse packets before they're sent to an actual TCP port. This is important to know because it means we don't have any port conflicts having the IPVS load balancer listening on the same port as the API server on the same host. -``` - - name: lb_port - value: "6443" +The load balancer port can be customised by changing the `lb_port` environment variable in the `kube-vip` manifest: + +```yaml +- name: lb_port + value: "6443" ``` -**How it works!** +##### How it works -Once the `lb_enable` is set to true kube-vip will do the following: +Once the `lb_enable` variable is set to `true`, `kube-vip` will do the following: - - In Layer 2 it will create an IPVS service on the leader - - In Layer 3 all nodes will create an IPVS service - - It will start a Kubernetes node watcher for nodes with the control plane label - - It will add/delete them as they're added and removed from the cluster +- In Layer 2 it will create an IPVS service on the leader. +- In Layer 3 all nodes will create an IPVS service. +- It will start a Kubernetes node watcher for nodes with the control plane label. +- It will add/delete them as they're added and removed from the cluster. -#### Debugging control plane load-balancing +#### Debugging control plane load balancing - In order to inspect what is happening we will need to install the `ipvsadm` tool. +In order to inspect and debug traffic, install the `ipvsadm` tool. ##### View the configuration The command `sudo ipvsadm -ln` will display the load balancer configuration. -``` - $ sudo ipvsadm -ln +```sh +$ sudo ipvsadm -ln IP Virtual Server version 1.2.1 (size=4096) Prot LocalAddress:Port Scheduler Flags - -> RemoteAddress:Port Forward Weight ActiveConn InActConn +-> RemoteAddress:Port Forward Weight ActiveConn InActConn TCP 192.168.0.40:6443 rr - -> 192.168.0.41:6443 Local 1 4 0 - -> 192.168.0.42:6443 Local 1 3 0 - -> 192.168.0.43:6443 Local 1 3 0 +-> 192.168.0.41:6443 Local 1 4 0 +-> 192.168.0.42:6443 Local 1 3 0 +-> 192.168.0.43:6443 Local 1 3 0 ``` ##### Watch things interact with the API server -The command `watch sudo ipvsadm -lnc` will auto-refresh the connections to the load-balancer. +The command `watch sudo ipvsadm -lnc` will auto-refresh the connections to the load balancer. -``` +```sh $ watch sudo ipvsadm -lnc -... + sudo ipvsadm -lnc k8s01: Tue Nov 9 11:39:39 2021 @@ -151,8 +151,7 @@ TCP 14:49 ESTABLISHED 192.168.0.40:56040 192.168.0.40:6443 192.168.0.42:6443 ## Components within a Kubernetes Cluster -The `kube-vip` kubernetes load-balancer requires a number of components in order to function: - -- The Kube-Vip Cloud Provider -> [https://github.com/kube-vip/kube-vip-cloud-provider](https://github.com/kube-vip/kube-vip-cloud-provider) -- The Kube-Vip Deployment -> [https://github.com/kube-vip/kube-vip](https://github.com/kube-vip/kube-vip) +The `kube-vip` Kubernetes load balancer requires a number of components in order to function: +- [Kube-Vip Cloud Provider](https://github.com/kube-vip/kube-vip-cloud-provider) +- [Kube-Vip Deployment](https://github.com/kube-vip/kube-vip) diff --git a/docs/flags/index.md b/docs/flags/index.md index 519e9d95..613b3786 100644 --- a/docs/flags/index.md +++ b/docs/flags/index.md @@ -2,95 +2,100 @@ ## Flags -These flags are typically used in manifest generation. +These flags are typically used in the `kube-vip` manifest generation process. -| Category | Flag | Usage | Notes | -|--------------|------|-------|-------| -|**Troubleshooting** |||| -| |`--log`|default 4|Set to `5` for debugging logs| -|**Mode** |||| -| |`--controlPlane`|Enables `kube-vip` control-plane functionality|| -| |`--services`|Enables `kube-vip` to watch services of type:LoadBalancer|| -|**Vip Config** |||| -| |`--arp`|Enables ARP brodcasts from Leader|| -| |`--bgp`|Enables BGP peering from `kube-vip`|| -| |`--vip`|``|(deprecated)| -| |`--address`|`` or ``|| -| |`--interface`|``|| -| |`--leaderElection`|Enables Kubernetes LeaderElection|Used by ARP, as only the leader can broadcast| -| |`--enableLoadBalancer`|Enables IPVS load balancer|| -| |`--lbPort`|6443|The port that the api server will load-balanced on| -|**Services**|||| -| |`--cidr`|Defaults "32"|Used when advertising BGP addresses (typically as `x.x.x.x/32`)| -|**Kubernetes**|||| -| |`--inCluster`|Defaults to looking inside the Pod for the token|| -| |`--taint`|Enables a taint, stopping control plane daemonset being on workers|| -|**LeaderElection**|||| -| |`--leaseDuration`|default 5|Seconds a lease is held for| -| |`--leaseRenewDuration`|default 3|Seconds a leader can attempt to renew the lease| -| |`--leaseRetry`|default 1|Number of times the leader will hold the lease for| -| |`--namespace`|"kube-vip"|The namespace where the lease will reside| -|**BGP**|||| -| |`--bgpRouterID`|``|Typically the address of the local node| -| |`--localAS`|default 65000|The AS we peer from| -| |`--bgppeers`|``|Comma seperate list of BGP peers| -| |`--peerAddress`|``|Address of a single BGP Peer| -| |`--peerAS`|default 65000|AS of a single BGP Peer| -| |`--peerPass`|""| Password to work with a single BGP Peer| -| |`--multiHop`|Enables eBGP MultiHop| Enable multiHop with a single BGP Peer| -| |`--sourceif`|Source Interface| Determines which interface BGP should peer _from_| -| |`--sourceip`|Source Address| Determines which IP address BGP should peer _from_| -| |`--annotaions`|``|Startup will be paused until the node annotaions contain the BGP configuration| -|**Equinix Metal**|||(May be deprecated)| -| |`--metal`|Enables Equinix Metal API calls|| -| |`--metalKey`|Equinix Metal API token|| -| |`--metalProject`|Equinix Metal Project (Name)|| -| |`--metalProjectID`|Equinix Metal Project (UUID)|| -| |`--provider-config`|Path to the Equinix Metal provider configuration|Requires the Equinix Metal CCM| +| Category | Flag | Usage | Notes | +| ------------------- | ---------------------- | ------------------------------------------------------------------ | ------------------------------------------------------------------------------- | +| **Troubleshooting** | | | | +| | `--log` | default 4 | Set to `5` for debugging logs | +| **Mode** | | | | +| | `--controlplane` | Enables `kube-vip` control plane functionality | | +| | `--services` | Enables `kube-vip` to watch services of type `LoadBalancer` | | +| **VIP Config** | | | | +| | `--arp` | Enables ARP broadcasts from Leader | | +| | `--bgp` | Enables BGP peering from `kube-vip` | | +| | `--vip` | `` | (deprecated) | +| | `--address` | `` or `` | | +| | `--interface` | Linux interface on the node | | +| | `--leaderElection` | Enables Kubernetes LeaderElection | Used by ARP, as only the leader can broadcast | +| | `--enableLoadBalancer` | Enables IPVS load balancer | `kube-vip` β‰₯ 0.4.0 | +| | `--lbPort` | 6443 | The port that the api server will load-balanced on | +| | `--lbForwardingMethod` | Select the forwarding method (default local) | The IPVS forwarding method (local, masquerade, tunnel, direct, bypass) | +| **Services** | | | | +| | `--serviceInterface` | "" | Defines an optional different interface to bind services too | +| | `--cidr` | Defaults "32" | Used when advertising BGP addresses (typically as `x.x.x.x/32`) | +| **Kubernetes** | | | | +| | `--inCluster` | Required for `kube-vip` as DaemonSet. | Runs `kube-vip` with a ServiceAccount called `kube-vip`. | +| | `--taint` | Required for `kube-vip` as DaemonSet. | Adds node affinity rules forcing `kube-vip` Pods to run on control plane. | +| **LeaderElection** | | | | +| | `--leaseDuration` | default 5 | Seconds a lease is held for | +| | `--leaseRenewDuration` | default 3 | Seconds a leader can attempt to renew the lease | +| | `--leaseRetry` | default 1 | Number of times the leader will hold the lease for | +| | `--namespace` | "kube-vip" | The namespace where the lease will reside | +| **BGP** | | | | +| | `--bgpRouterID` | `` | Typically the address of the local node | +| | `--localAS` | default 65000 | The AS we peer from | +| | `--bgppeers` | `` | Comma separated list of BGP peers | +| | `--peerAddress` | `` | Address of a single BGP Peer | +| | `--peerAS` | default 65000 | AS of a single BGP Peer | +| | `--peerPass` | "" | Password to work with a single BGP Peer | +| | `--multiHop` | Enables eBGP MultiHop | Enable multiHop with a single BGP Peer | +| | `--sourceif` | Source Interface | Determines which interface BGP should peer _from_ | +| | `--sourceip` | Source Address | Determines which IP address BGP should peer _from_ | +| | `--annotations` | `` | Startup will be paused until the node annotations contain the BGP configuration | +| **Equinix Metal** | | | (May be deprecated) | +| | `--metal` | Enables Equinix Metal API calls | | +| | `--metalKey` | Equinix Metal API token | | +| | `--metalProject` | Equinix Metal Project (Name) | | +| | `--metalProjectID` | Equinix Metal Project (UUID) | | +| | `--provider-config` | Path to the Equinix Metal provider configuration | Requires the Equinix Metal CCM | ## Environment Variables -These environment variables are usually part of a kube-vip manifest. +These environment variables are usually part of a `kube-vip` manifest and used when running the `kube-vip` Pod. More environment variables can be read through the `pkg/kubevip/config_envvar.go` file. -| Category | Environment Variable | Usage | Notes | -|--------------|------|-------|-------| -|**Troubleshooting** |||| -| |`vip_loglevel`|default 4|Set to `5` for debugging logs| -|**Mode** |||| -| |`cp_enable`|Enables `kube-vip` control-plane functionality|| -| |`svc_enable`|Enables `kube-vip` to watch services of `type:LoadBalancer`|| -|**Vip Config** |||| -| |`vip_arp`|Enables ARP brodcasts from Leader|| -| |`bgp_enable`|Enables BGP peering from `kube-vip`|| -| |`vip_address`|``|(deprecated)| -| |`address`|`` or ``|| -| |`vip_interface`|``|| -| |`vip_leaderelection`|Enables Kubernetes LeaderElection|Used by ARP, as only the leader can broadcast| -| |`lb_enable`|Enables IPVS LoadBalancer|Will watch Kubernetes nodes and add them to the IPVS load-balancer| -| |`lb_port`|6443|The IPVS port that will be used to load-balance control plane requests| -|**Services**|||| -| |`vip_cidr`|Defaults "32"|Used when advertising BGP addresses (typically as `x.x.x.x/32`)| -|**LeaderElection**|||| -| |`vip_leaseduration`|default 5|Seconds a lease is held for| -| |`vip_renewdeadline`|default 3|Seconds a leader can attempt to renew the lease| -| |`vip_retryperiod`|default 1|Number of times the leader will hold the lease for| -| |`cp_namespace`|"kube-vip"|The namespace where the lease will reside| -|**BGP**|||| -| |`bgp_routerid`|``|Typically the address of the local node| -| |`bgp_as`|default 65000|The AS we peer from| -| |`bgp_peers`|``|Comma seperate list of BGP peers| -| |`bgp_peeraddress`|``|Address of a single BGP Peer| -| |`bgp_peeras`|default 65000|AS of a single BGP Peer| -| |`bgp_peerpass`|""| Password to work with a single BGP Peer| -| |`bgp_multihop`|Enables eBGP MultiHop| Enable multiHop with a single BGP Peer| -| |`bgp_sourceif`|Source Interface| Determines which interface BGP should peer _from_| -| |`bgp_sourceip`|Source Address| Determines which IP address BGP should peer _from_| -| |`annotaions`|``|Startup will be paused until the node annotaions contain the BGP configuration| -|**Equinix Metal**|||(May be deprecated)| -| |`vip_packet`|Enables Equinix Metal API calls|| -| |`PACKET_AUTH_TOKEN`|Equinix Metal API token|| -| |`vip_packetproject`|Equinix Metal Project (Name)|| -| |`vip_packetprojectid`|Equinix Metal Project (UUID)|| -| |`provider_config`|Path to the Equinix Metal provider configuration|Requires the Equinix Metal CCM| \ No newline at end of file +| Category | Environment Variable | Usage | Notes | +| ------------------- | ---------------------- |-------------------------------------------------------------|---------------------------------------------------------------------------------| +| **Troubleshooting** | | | | +| | `vip_loglevel` | default 4 | Set to `5` for debugging logs | +| **Mode** | | | | +| | `cp_enable` | Enables `kube-vip` control plane functionality | | +| | `svc_enable` | Enables `kube-vip` to watch Services of type `LoadBalancer` | | +| **VIP Config** | | | | +| | `vip_arp` | Enables ARP broadcasts from Leader | | +| | `bgp_enable` | Enables BGP peering from `kube-vip` | | +| | `vip_address` | `` | (deprecated) | +| | `address` | `` or `` | | +| | `vip_interface` | `` | | +| | `vip_leaderelection` | Enables Kubernetes LeaderElection | Used by ARP, as only the leader can broadcast | +| | `lb_enable` | Enables IPVS LoadBalancer | `kube-vip` β‰₯ 0.4.0. Adds nodes to the IPVS load balancer | +| | `lb_port` | 6443 | The IPVS port that will be used to load-balance control plane requests | +| | `lb_fwdmethod` | Select the forwarding method (default local) | The IPVS forwarding method (local, masquerade, tunnel, direct, bypass) | +| **Services** | | | | +| | `vip_servicesinterface`| "" | Defines an optional different interface to bind services too | +| | `vip_cidr` | Defaults "32" | Used when advertising BGP addresses (typically as `x.x.x.x/32`) | +| **LeaderElection** | | | | +| | `vip_leaseduration` | default 5 | Seconds a lease is held for | +| | `vip_renewdeadline` | default 3 | Seconds a leader can attempt to renew the lease | +| | `vip_retryperiod` | default 1 | Number of times the leader will hold the lease for | +| | `cp_namespace` | "kube-vip" | The namespace where the lease will reside | +| **BGP** | | | | +| | `bgp_routerid` | `` | Typically the address of the local node | +| | `bgp_routerinterface` | Interface name | Used to associate the `routerID` with the control plane's interface. | +| | `bgp_as` | default 65000 | The AS we peer from | +| | `bgp_peers` | `` | Comma separated list of BGP peers (IPv6 addresses should be enclosed with `[]`) | +| | `bgp_peeraddress` | `` | Address of a single BGP Peer | +| | `bgp_peeras` | default 65000 | AS of a single BGP Peer | +| | `bgp_peerpass` | "" | Password to work with a single BGP Peer | +| | `bgp_multihop` | Enables eBGP MultiHop | Enable multiHop with a single BGP Peer | +| | `bgp_sourceif` | Source Interface | Determines which interface BGP should peer _from_ | +| | `bgp_sourceip` | Source Address | Determines which IP address BGP should peer _from_ | +| | `annotations` | `` | Startup will be paused until the node annotations contain the BGP configuration | +| **Equinix Metal** | | | (May be deprecated) | +| | `vip_packet` | Enables Equinix Metal API calls | | +| | `PACKET_AUTH_TOKEN` | Equinix Metal API token | | +| | `vip_packetproject` | Equinix Metal Project (Name) | | +| | `vip_packetprojectid` | Equinix Metal Project (UUID) | | +| | `provider_config` | Path to the Equinix Metal provider configuration | Requires the Equinix Metal CCM | diff --git a/docs/hybrid/daemonset/index.md b/docs/hybrid/daemonset/index.md index 0f8130ac..46139e5b 100644 --- a/docs/hybrid/daemonset/index.md +++ b/docs/hybrid/daemonset/index.md @@ -1,13 +1,13 @@ # Kube-Vip as a daemonset -In Hybrid mode `kube-vip` will manage a virtual IP address that is passed through it's configuration for a Highly Available Kubernetes cluster, it will also "watch" services of `type:LoadBalancer` and once their `spec.LoadBalancerIP` is updated (typically by a cloud controller) it will advertise this address using BGP/ARP. +In Hybrid mode `kube-vip` will manage a virtual IP address that is passed through it's configuration for a Highly Available Kubernetes cluster, it will also "watch" services of `type:LoadBalancer` and once their `service.metadata.annotations["kube-vip.io/loadbalancerIPs"]` or `spec.LoadBalancerIP` is updated (typically by a cloud controller) it will advertise this address using BGP/ARP. **Note about Daemonsets** -The "hybrid" mode is now the default mode in `kube-vip` from `0.2.3` onwards, and allows both modes to be enabled at the same time. +The "hybrid" mode is now the default mode in `kube-vip` from `0.2.3` onwards, and allows both modes to be enabled at the same time. -If the Kubernetes installer allows for adding a Virtual IP as an additional [SAN](https://en.wikipedia.org/wiki/Subject_Alternative_Name) to the API server certificate then we can apply `kube-vip` to the cluster once the first node has been brought up. +If the Kubernetes installer allows for adding a Virtual IP as an additional [SAN](https://en.wikipedia.org/wiki/Subject_Alternative_Name) to the API server certificate then we can apply `kube-vip` to the cluster once the first node has been brought up. Unlike generating the static manifest there are a few more things that may need configuring, this page will cover most scenarios. @@ -43,7 +43,7 @@ The easiest method to generate a manifest is using the container itself, below w ### BGP Example -This configuration will create a manifest that will start `kube-vip` providing **controlplane** and **services** management. **Unlike** ARP, all nodes in the BGP configuration will advertise virtual IP addresses. +This configuration will create a manifest that will start `kube-vip` providing **controlplane** and **services** management. **Unlike** ARP, all nodes in the BGP configuration will advertise virtual IP addresses. **Note** we bind the address to `lo` as we don't want multiple devices that have the same address on public interfaces. We can specify all the peers in a comma seperate list in the format of `address:AS:password:multihop`. @@ -136,7 +136,7 @@ spec: ## Equinix Metal Overview (using the [Equinix Metal CCM](https://github.com/packethost/packet-ccm)) -The below example is for running `type:LoadBalancer` services on worker nodes only and will create a daemonset that will run `kube-vip`. +The below example is for running `type:LoadBalancer` services on worker nodes only and will create a daemonset that will run `kube-vip`. **NOTE** This use-case requires the [Equinix Metal CCM](https://github.com/packethost/packet-ccm) to be installed and that the cluster/kubelet is configured to use an "external" cloud provider. @@ -153,7 +153,23 @@ kube-vip manifest daemonset \ ### Troubleshooting -If `kube-vip` has been sat waiting for a long time then you may need to investigate that the annotations have been applied correctly by doing running the `describe` on the node: +If `kube-vip` has been sat waiting for a long time then you may need to investigate that the annotations have been applied correctly by doing running the `describe` on the node. +As of Equinix Metal's CCM v3.3.0, the annotations format was changed. This means, you should expect either of the following: + +1. Equinix Metal's CCM v3.3.0 onwards: + +``` +kubectl describe node k8s.bgp02 +... +Annotations: kubeadm.alpha.kubernetes.io/cri-socket: /var/run/dockershim.sock + node.alpha.kubernetes.io/ttl: 0 + metal.equinix.com/bgp-peers-0-node-asn: 65000 + metal.equinix.com/bgp-peers-0-peer-asn: 65530 + metal.equinix.com/bgp-peers-0-peer-ip: x.x.x.x + metal.equinix.com/bgp-peers-0-src-ip: x.x.x.x +``` + +2. Equinix Metal's CCM before v3.0.0: ``` kubectl describe node k8s.bgp02 diff --git a/docs/hybrid/index.md b/docs/hybrid/index.md index c1823e94..f83fbea9 100644 --- a/docs/hybrid/index.md +++ b/docs/hybrid/index.md @@ -1,45 +1,46 @@ -# Using `kube-vip` in Hybrid Mode +# Using kube-vip in Hybrid Mode We can deploy kube-vip in two different methods, which completely depends on your use-case and method for installing Kubernetes: - Static Pods (hybrid) - Daemonset (hybrid, requires taint) -## **Prerequisites** +## Prerequisites In order for `kube-vip` to be able to speak with the Kubernetes API server, we need to be able to resolve the hostname within the pod. In order to ensure this will work as expected the `/etc/hosts` file should have the `hostname` of the server within it. The `/etc/hosts` file is passed into the running container and will ensure that the pod isn't "confused" by any Kubernetes networking. ## Kubernetes Services (`type:LoadBalancer`) -To learn more about how `kube-vip` in hybrid works with the LoadBalancer services within a kubernetes cluster the documentation is [here](./services/). To get `kube-vip` deployed read on ! +To learn more about how `kube-vip` in hybrid works with the LoadBalancer services within a kubernetes cluster the documentation is [here](./services/). To get `kube-vip` deployed read on! ## Static Pods -Static pods are a Kubernetes pod that is ran by the `kubelet` on a single node, and is **not** managed by the Kubernetes cluster itself. This means that whilst the pod can appear within Kubernetes it can't make use of a variety of kubernetes functionality (such as the kubernetes token or `configMaps`). The static pod approach is primarily required for [kubeadm](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/), this is due to the sequence of actions performed by `kubeadm`. Ideally we want `kube-vip` to be part of the kubernetes cluster, for various bits of functionality we also need `kube-vip` to provide a HA virtual IP as part of the installation. +Static pods are a Kubernetes pod that is ran by the `kubelet` on a single node, and is **not** managed by the Kubernetes cluster itself. This means that whilst the pod can appear within Kubernetes it can't make use of a variety of kubernetes functionality (such as the kubernetes token or `configMaps`). The static pod approach is primarily required for [kubeadm](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/), this is due to the sequence of actions performed by `kubeadm`. Ideally we want `kube-vip` to be part of the kubernetes cluster, for various bits of functionality we also need `kube-vip` to provide a HA virtual IP as part of the installation. The sequence of events for this to work follows: + 1. Generate a `kube-vip` manifest in the static pods manifest folder 2. Run `kubeadm init`, this generates the manifests for the control plane and wait to connect to the VIP 3. The `kubelet` will parse and execute all manifest, including the `kube-vip` manifest 4. `kube-vip` starts and advertises our VIP -5. The `kubeadm init` finishes succesfully. +5. The `kubeadm init` finishes successfully. ## Daemonset Other Kubernetes distributions can bring up a Kubernetes cluster, without depending on a VIP (BUT they are configured to support one). A prime example of this would be k3s, that can be configured to start and also sign the certificates to allow incoming traffic to a virtual IP. Given we don't need the VIP to exist **before** the cluster, we can bring up the k3s node(s) and then add `kube-vip` as a daemonset for all control plane nodes. -# Deploying `kube-vip` +## Deploying `kube-vip` The simplest method for generating the Kubernetes manifests is with `kube-vip` itself.. The subcommand `manifest pod|daemonset` can be used to generate specific types of Kubernetes manifests for use in a cluster. These subcommands can be configured with additional flags to enable/disable BGP/ARP/LeaderElection and a host of other options. Both Examples will use the same Architecture: -#### Infrastructure architecture +## Infrastructure architecture The infrastructure for our example HA Kubernetes cluster is as follows: -| Node | Address | -|----------------|------------| +| Node | Address | +| -------------- | --------- | | VIP | 10.0.0.40 | | controlPlane01 | 10.0.0.41 | | controlPlane02 | 10.0.0.42 | @@ -56,45 +57,45 @@ The details for creating a static pod are available [here](./static/) When using `kube-vip` as a daemonset the details are available [here](./daemonset/) -# Kube-Vip flag reference - -| Category | Flag | Usage | Notes | -|--------------|------|-------|-------| -|**Mode** |||| -| |`--controlPlane`|Enables `kube-vip` control-plane functionality|| -| |`--services`|Enables `kube-vip` to watch services of type:LoadBalancer|| -|**Vip Config** |||| -| |`--arp`|Enables ARP brodcasts from Leader|| -| |`--bgp`|Enables BGP peering from `kube-vip`|| -| |`--vip`|``|(deprecated)| -| |`--address`|`` or ``|| -| |`--interface`|``|| -| |`--leaderElection`|Enables Kubernetes LeaderElection|Used by ARP, as only the leader can broadcast| -|**Services**|||| -| |`--cidr`|Defaults "32"|Used when advertising BGP addresses (typically as `x.x.x.x/32`)| -|**Kubernetes**|||| -| |`--inCluster`|Defaults to looking inside the Pod for the token|| -| |`--taint`|Enables a taint, stopping control plane daemonset being on workers|| -|**LeaderElection**|||| -| |`--leaseDuration`|default 5|Seconds a lease is held for| -| |`--leaseRenewDuration`|default 3|Seconds a leader can attempt to renew the lease| -| |`--leaseRetry`|default 1|Number of times the leader will hold the lease for| -| |`--namespace`|"kube-vip"|The namespace where the lease will reside| -|**BGP**|||| -| |`--bgpRouterID`|``|Typically the address of the local node| -| |`--localAS`|default 65000|The AS we peer from| -| |`--bgppeers`|``|Comma seperate list of BGP peers| -| |`--peerAddress`|``|Address of a single BGP Peer| -| |`--peerAS`|default 65000|AS of a single BGP Peer| -| |`--peerPass`|""| Password to work with a single BGP Peer| -| |`--multiHop`|Enables eBGP MultiHop| Enable multiHop with a single BGP Peer| -| |`--annotaions`|``|Startup will be paused until the node annotaions contain the BGP configuration| -|**Equinix Metal**|||(May be deprecated)| -| |`--metal`|Enables Equinix Metal API calls|| -| |`--metalKey`|Equinix Metal API token|| -| |`--metalProject`|Equinix Metal Project (Name)|| -| |`--metalProjectID`|Equinix Metal Project (UUID)|| -| |`--provider-config`|Path to the Equinix Metal provider configuration|Requires the Equinix Metal CCM| +## Kube-Vip flag reference + +| Category | Flag | Usage | Notes | +| ------------------ | ---------------------- | ------------------------------------------------------------------ | ------------------------------------------------------------------------------- | +| **Mode** | | | | +| | `--controlPlane` | Enables `kube-vip` control-plane functionality | | +| | `--services` | Enables `kube-vip` to watch services of type:LoadBalancer | | +| **Vip Config** | | | | +| | `--arp` | Enables ARP broadcasts from Leader | | +| | `--bgp` | Enables BGP peering from `kube-vip` | | +| | `--vip` | `` | (deprecated) | +| | `--address` | `` or `` | | +| | `--interface` | `` | | +| | `--leaderElection` | Enables Kubernetes LeaderElection | Used by ARP, as only the leader can broadcast | +| **Services** | | | | +| | `--cidr` | Defaults "32" | Used when advertising BGP addresses (typically as `x.x.x.x/32`) | +| **Kubernetes** | | | | +| | `--inCluster` | Defaults to looking inside the Pod for the token | | +| | `--taint` | Enables a taint, stopping control plane daemonset being on workers | | +| **LeaderElection** | | | | +| | `--leaseDuration` | default 5 | Seconds a lease is held for | +| | `--leaseRenewDuration` | default 3 | Seconds a leader can attempt to renew the lease | +| | `--leaseRetry` | default 1 | Number of times the leader will hold the lease for | +| | `--namespace` | "kube-vip" | The namespace where the lease will reside | +| **BGP** | | | | +| | `--bgpRouterID` | `` | Typically the address of the local node | +| | `--localAS` | default 65000 | The AS we peer from | +| | `--bgppeers` | `` | Comma separated list of BGP peers | +| | `--peerAddress` | `` | Address of a single BGP Peer | +| | `--peerAS` | default 65000 | AS of a single BGP Peer | +| | `--peerPass` | "" | Password to work with a single BGP Peer | +| | `--multiHop` | Enables eBGP MultiHop | Enable multiHop with a single BGP Peer | +| | `--annotations` | `` | Startup will be paused until the node annotations contain the BGP configuration | +| **Equinix Metal** | | | (May be deprecated) | +| | `--metal` | Enables Equinix Metal API calls | | +| | `--metalKey` | Equinix Metal API token | | +| | `--metalProject` | Equinix Metal Project (Name) | | +| | `--metalProjectID` | Equinix Metal Project (UUID) | | +| | `--provider-config` | Path to the Equinix Metal provider configuration | Requires the Equinix Metal CCM | ## Changelog @@ -112,7 +113,7 @@ Once DHCP returns an IP for the FQDN, the same `dnsUpdater` runs to periodically ## BGP Support (added in 0.1.8) -In version `0.1.8` `kube-vip` was updated to support [BGP](https://en.wikipedia.org/wiki/Border_Gateway_Protocol) as a VIP failover mechanism. When a node is elected as a leader then it will update it's peers so that they are aware to route traffic to that node in order to access the VIP. +In version `0.1.8` `kube-vip` was updated to support [BGP](https://en.wikipedia.org/wiki/Border_Gateway_Protocol) as a VIP failover mechanism. When a node is elected as a leader then it will update it's peers so that they are aware to route traffic to that node in order to access the VIP. The following new flags are used: @@ -128,7 +129,7 @@ If the `--bgp` flag is passed along with the Equinix Metal flags `metal, metalKe ## Equinix Metal Control Plane Support (added in 0.1.8) -Recently in version `0.1.7` of `kube-vip` we added the functionality to use a Equinix Metal Elastic IP as the virtual IP fronting the Kubernetes Control plane cluster. In order to first get out virtual IP we will need to use our Equinix Metal account and create a EIP (either public or private). We will only need a single address so a `/32` will suffice, once this is created as part of a Equinix Metal project we can now apply this address to the servers that live in the same project. +Recently in version `0.1.7` of `kube-vip` we added the functionality to use a Equinix Metal Elastic IP as the virtual IP fronting the Kubernetes Control plane cluster. In order to first get out virtual IP we will need to use our Equinix Metal account and create a EIP (either public or private). We will only need a single address so a `/32` will suffice, once this is created as part of a Equinix Metal project we can now apply this address to the servers that live in the same project. In this example we've logged into the UI can created a new EIP of `147.75.1.2`, and we've deployed three small server instances with Ubuntu. diff --git a/docs/hybrid/services/index.md b/docs/hybrid/services/index.md index 56a0665a..ee3ebc92 100644 --- a/docs/hybrid/services/index.md +++ b/docs/hybrid/services/index.md @@ -1,4 +1,4 @@ -# Kube-vip services +# Kube-vip services We've designed `kube-vip` to be as de-coupled or agnostic from other components that may exist within a Kubernetes cluster as possible. This has lead to `kube-vip` having a very simplistic but robust approach to advertising Kubernetes services to the outside world and marking these services as ready to use. @@ -7,17 +7,17 @@ We've designed `kube-vip` to be as de-coupled or agnostic from other components This section details the flow of events in order for `kube-vip` to advertise a Kubernetes service: 1. An end user exposes a application through Kubernetes as a LoadBalancer => `kubectl expose deployment nginx-deployment --port=80 --type=LoadBalancer --name=nginx` -2. Within the Kubernetes cluster a service object is created with the `svc.Spec.Type = LoadBalancer` +2. Within the Kubernetes cluster a service object is created with the `spec.Type = LoadBalancer` 3. A controller (typically a Cloud Controller) has a loop that "watches" for services of the type `LoadBalancer`. 4. The controller now has the responsibility of providing an IP address for this service along with doing anything that is network specific for the environment where the cluster is running. -5. Once the controller has an IP address it will update the service `svc.Spec.LoadBalancerIP` with it's new IP address. -6. The `kube-vip` pods also implement a "watcher" for services that have a `svc.Spec.LoadBalancerIP` address attached. +5. Once the controller has an IP address, it will update the Service field `metadata.annotations["kube-vip.io/loadbalancerIPs"]` and `spec.loadBalancerIP` with the IP address. `spec.loadBalancerIP` is deprecated in k8s 1.24, will not be updated in future release +6. `kube-vip` Pods implement a "watcher" for Services that have a `metadata.annotations["kube-vip.io/loadbalancerIPs"]` address attached. If the annotation is not presented, it will fallback to check `spec.loadBalancerIP`. 7. When a new service appears `kube-vip` will start advertising this address to the wider network (through BGP/ARP) which will allow traffic to come into the cluster and hit the service network. -8. Finally `kube-vip` will update the service status so that the API reflects that this LoadBalancer is ready. This is done by updating the `svc.Status.LoadBalancer.Ingress` with the VIP address. +8. Finally `kube-vip` will update the service status so that the API reflects that this LoadBalancer is ready. This is done by updating the `status.LoadBalancer.Ingress` with the VIP address. ## CCM -We can see from the [flow](#Flow) above that `kube-vip` isn't coupled to anything other than the Kubernetes API, and will only act upon an existing Kubernetes primative (in this case the object of type `Service`). This makes it easy for existing CCMs to simply apply their logic to services of type LoadBalancer and leave `kube-vip` to take the next steps to advertise these load-balancers to the outside world. +We can see from the [flow](#Flow) above that `kube-vip` isn't coupled to anything other than the Kubernetes API, and will only act upon an existing Kubernetes primative (in this case the object of type `Service`). This makes it easy for existing CCMs to simply apply their logic to services of type LoadBalancer and leave `kube-vip` to take the next steps to advertise these load-balancers to the outside world. ## Using the Kube-vip Cloud Provider @@ -48,7 +48,7 @@ or kubectl create configmap --namespace kube-system kubevip --from-literal range-global=192.168.1.220-192.168.1.230 ``` -Creating services of `type: LoadBalancer` in *any namespace* will now take addresses from the **global** cidr defined in the `configmap` unless a specific +Creating services of `type: LoadBalancer` in *any namespace* will now take addresses from the **global** cidr defined in the `configmap` unless a specific ## The Detailed guide @@ -61,7 +61,7 @@ Creating services of `type: LoadBalancer` in *any namespace* will now take addre $ kubectl apply -f https://raw.githubusercontent.com/kube-vip/kube-vip-cloud-provider/main/manifest/kube-vip-cloud-controller.yaml ``` -The following output should appear when the manifest is applied: +The following output should appear when the manifest is applied: ``` serviceaccount/kube-vip-cloud-controller created @@ -161,9 +161,9 @@ spec: ### Using DHCP for Load Balancers (experimental) With the latest release of `kube-vip` > 0.2.1, it is possible to use the local network DHCP server to provide `kube-vip` with a load-balancer address that can be used to access a - Kubernetes service on the network. + Kubernetes service on the network. -In order to do this we need to signify to `kube-vip` and the cloud-provider that we don't need one of their managed addresses. We do this by explicitly exposing a service on the +In order to do this we need to signify to `kube-vip` and the cloud-provider that we don't need one of their managed addresses. We do this by explicitly exposing a service on the address `0.0.0.0`. When `kube-vip` sees a service on this address it will create a `macvlan` interface on the host and request a DHCP address, once this address is provided it will assign it as the VIP and update the Kubernetes service! ``` @@ -194,7 +194,7 @@ Using UPNP we can create a matching port on the `` all #### Enable UPNP -Add the following to the `kube-vip` `env:` section, and the rest should be completely automated. +Add the following to the `kube-vip` `env:` section, and the rest should be completely automated. **Note** some environments may require (Unifi) will require `Secure mode` being `disabled` (this allows a host with a different address to register a port) @@ -226,7 +226,7 @@ $ curl externalIP:32380 Either through the CLI or through the UI, create a public IPv4 EIP address.. and this is the address you can expose through BGP! ``` -# packet ip request -p xxx-bbb-ccc -f ams1 -q 1 -t public_ipv4 +# packet ip request -p xxx-bbb-ccc -f ams1 -q 1 -t public_ipv4 +-------+---------------+--------+----------------------+ | ID | ADDRESS | PUBLIC | CREATED | +-------+---------------+--------+----------------------+ @@ -238,7 +238,7 @@ kubectl expose deployment nginx-deployment --port=80 --type=LoadBalancer --name= ## Equinix Metal Overview (using the [Equinix Metal CCM](https://github.com/packethost/packet-ccm)) -Below are two examples for running `type:LoadBalancer` services on worker nodes only and will create a daemonset that will run `kube-vip`. +Below are two examples for running `type:LoadBalancer` services on worker nodes only and will create a daemonset that will run `kube-vip`. **NOTE** This use-case requires the [Equinix Metal CCM](https://github.com/packethost/packet-ccm) to be installed and that the cluster/kubelet is configured to use an "external" cloud provider. @@ -255,7 +255,7 @@ kube-vip manifest daemonset \ --inCluster | k apply -f - ``` -### Using the existing CCM secret +### Using the existing CCM secret Alternatively it is possible to create a daemonset that will use the existing CCM secret to do an API lookup, this will allow for discovering the networking configuration needed to advertise loadbalancer addresses through BGP. diff --git a/docs/hybrid/static/index.md b/docs/hybrid/static/index.md index d07f3356..5e193ece 100644 --- a/docs/hybrid/static/index.md +++ b/docs/hybrid/static/index.md @@ -1,8 +1,8 @@ # Kube-vip as a Static Pod -In Hybrid mode `kube-vip` will manage a virtual IP address that is passed through it's configuration for a Highly Available Kubernetes cluster, it will also "watch" services of `type:LoadBalancer` and once their `spec.LoadBalancerIP` is updated (typically by a cloud controller) it will advertise this address using BGP/ARP. +In Hybrid mode `kube-vip` will manage a virtual IP address that is passed through it's configuration for a Highly Available Kubernetes cluster, it will also "watch" services of `type:LoadBalancer` and once their `service.metadata.annotations["kube-vip.io/loadbalancerIPs"]` or `spec.LoadBalancerIP` is updated (typically by a cloud controller) it will advertise this address using BGP/ARP. -The "hybrid" mode is now the default mode in `kube-vip` from `0.2.3` onwards, and allows both modes to be enabled at the same time. +The "hybrid" mode is now the default mode in `kube-vip` from `0.2.3` onwards, and allows both modes to be enabled at the same time. ## Generating a Manifest @@ -43,7 +43,7 @@ kube-vip manifest pod \ ### BGP -This configuration will create a manifest that will start `kube-vip` providing **controlplane** and **services** management. **Unlike** ARP, all nodes in the BGP configuration will advertise virtual IP addresses. +This configuration will create a manifest that will start `kube-vip` providing **controlplane** and **services** management. **Unlike** ARP, all nodes in the BGP configuration will advertise virtual IP addresses. **Note** we bind the address to `lo` as we don't want multiple devices that have the same address on public interfaces. We can specify all the peers in a comma seperate list in the format of `address:AS:password:multihop`. @@ -85,7 +85,7 @@ kube-vip manifest pod \ #### Creating a manifest using the metadata -We can parse the metadata, *however* it requires that the tools `curl` and `jq` are installed. +We can parse the metadata, *however* it requires that the tools `curl` and `jq` are installed. ``` kube-vip manifest pod \ @@ -117,7 +117,7 @@ Due to an oddity with `kubeadm` we can't have our `kube-vip` manifest present ** ``` sudo kubeadm join $VIP:6443 \ - --token w5atsr.blahblahblah + --token w5atsr.blahblahblah --control-plane \ --certificate-key abc123 ``` @@ -126,4 +126,4 @@ sudo kubeadm join $VIP:6443 \ ## Services -At this point your `kube-vip` static pods will be up and running and where used with the `--services` flag will also be watching for Kubernetes services that they can advertise. In order for `kube-vip` to advertise a service it needs a CCM or other controller to apply an IP address to the `spec.LoadBalancerIP`, which marks the loadbalancer as defined. +At this point your `kube-vip` static pods will be up and running and where used with the `--services` flag will also be watching for Kubernetes services that they can advertise. In order for `kube-vip` to advertise a service it needs a CCM or other controller to apply an IP address to the `spec.LoadBalancerIP`, which marks the loadbalancer as defined. diff --git a/docs/index.md b/docs/index.md index afc5c016..92674069 100644 --- a/docs/index.md +++ b/docs/index.md @@ -1,65 +1,53 @@ ![kube-vip.png](kube-vip.png) ## Overview -Kubernetes Virtual IP and Load-Balancer for both control plane and Kubernetes services -The idea behind `kube-vip` is a small self-contained Highly-Available option for all environments, especially: +Kube-Vip provides Kubernetes clusters a virtual IP and load balancer for both control plane and Kubernetes Services. -- Bare-Metal -- On-Prem -- Edge (ARM / Raspberry PI) +The idea behind `kube-vip` is a small, self-contained, highly-available option for all environments, especially: + +- Bare metal +- On-Premises +- Edge (ARM / Raspberry Pi) - Virtualisation - Pretty much anywhere else :) ## Features -Kube-Vip was originally created to provide a HA solution for the Kubernetes control plane, over time it has evolved to incorporate that same functionality into Kubernetes service type [load-balancers](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer). +Kube-Vip was originally created to provide a HA solution for the Kubernetes control plane, but over time it has evolved to incorporate that same functionality for Kubernetes Services of type [LoadBalancer](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer). Some of the features include: -- VIP addresses can be both IPv4 or IPv6 +- VIP addresses can be either IPv4 or IPv6 - Control Plane with ARP (Layer 2) or BGP (Layer 3) - Control Plane using either [leader election](https://godoc.org/k8s.io/client-go/tools/leaderelection) or [raft](https://en.wikipedia.org/wiki/Raft_(computer_science)) - Control Plane HA with kubeadm (static Pods) -- Control Plane HA with K3s/and others (daemonsets) -- Control Plane LoadBalancing with IPVS (kube-vip > 0.4) +- Control Plane HA with K3s/and others (DaemonSets) +- Control Plane LoadBalancing with IPVS (kube-vip β‰₯ 0.4) - Service LoadBalancer using [leader election](https://godoc.org/k8s.io/client-go/tools/leaderelection) for ARP (Layer 2) - Service LoadBalancer using multiple nodes with BGP - Service LoadBalancer address pools per namespace or global - Service LoadBalancer address via (existing network DHCP) -- Service LoadBalancer address exposure to gateway via UPNP -- ... manifest generation, vendor API integrations and many nore... +- Service LoadBalancer address exposure to gateway via UPnP +- ... manifest generation, vendor API integrations and many more... ## Why? -The "original" purpose of `kube-vip` was to simplify the building of HA Kubernetes clusters, which at this time can involve a few components and configurations that all need to be managed. This was blogged about in detail by [thebsdbox](https://twitter.com/thebsdbox/) here -> [https://thebsdbox.co.uk/2020/01/02/Designing-Building-HA-bare-metal-Kubernetes-cluster/#Networking-load-balancing](https://thebsdbox.co.uk/2020/01/02/Designing-Building-HA-bare-metal-Kubernetes-cluster/#Networking-load-balancing). As the project evolved it now can use those same technologies to provide load-balancing capabilities within a Kubernetes Cluster. - +The "original" purpose of `kube-vip` was to simplify the building of HA Kubernetes clusters, which at the time involved a few components and configurations that all needed to be managed. This was blogged about in detail by [thebsdbox](https://twitter.com/thebsdbox/) [here](https://thebsdbox.co.uk/2020/01/02/Designing-Building-HA-bare-metal-Kubernetes-cluster/#Networking-load-balancing). Since the project has evolved, it can now use those same technologies to provide load balancing capabilities within a Kubernetes Cluster. ## Architecture -The architecture for `kube-vip` (and associated kubernetes components) is covered in detail [here](/architecture/) +The architecture for `kube-vip` (and associated Kubernetes components) is covered in detail [here](/architecture/). ## Installation -There are two main routes for deploying `kube-vip`, either through a [static pod](https://kubernetes.io/docs/tasks/configure-pod-container/static-pod/) when bringing up a Kubernetes cluster with [kubeadm](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/) or as a [daemon set](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) (typically with distributions like [k3s](https://k3s.io)). - -The infrastructure for our example HA Kubernetes cluster is as follows: - -| Node | Address | -|----------------|------------| -| VIP | 10.0.0.40 | -| controlPlane01 | 10.0.0.41 | -| controlPlane02 | 10.0.0.42 | -| controlPlane03 | 10.0.0.43 | -| worker01 | 10.0.0.44 | - -All nodes are running Ubuntu 20.04, Docker CE and will use Kubernetes 1.21.0, we only have one worker as we're going to use our controlPlanes in "hybrid" mode. +There are two main routes for deploying `kube-vip`: either through a [static Pod](https://kubernetes.io/docs/tasks/configure-pod-container/static-pod/) when bringing up a Kubernetes cluster with [kubeadm](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/) or as a [DaemonSet](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) (typically with distributions like [K3s](https://k3s.io)). - [Static Pod](/install_static) -- [Daemon Set](/install_daemonset) +- [DaemonSet](/install_daemonset) ## Usage - [On-Prem with the kube-vip cloud controller](/usage/on-prem) -- [KIND](/usage/kind) +- [KinD](/usage/kind) - [Equinix Metal](/usage/EquinixMetal) - [k3s](/usage/k3s) @@ -69,12 +57,13 @@ All nodes are running Ubuntu 20.04, Docker CE and will use Kubernetes 1.21.0, we ## Links -- The Kube-Vip Cloud Provider Repository -> [https://github.com/kube-vip/kube-vip-cloud-provider](https://github.com/kube-vip/kube-vip-cloud-provider) -- The Kube-Vip Repository -> [https://github.com/kube-vip/kube-vip](https://github.com/kube-vip/kube-vip) -- The Kube-Vip RBAC (required for the daemonset) -> [https://kube-vip.io/manifests/rbac.yaml](https://kube-vip.io/manifests/rbac.yaml) +- [Kube-Vip Cloud Provider Repository](https://github.com/kube-vip/kube-vip-cloud-provider) +- [Kube-Vip Repository](https://github.com/kube-vip/kube-vip) +- [Kube-Vip RBAC manifest (required for the DaemonSet)](https://kube-vip.io/manifests/rbac.yaml) + ## Copyright -Β© 2021 [The Linux Foundation](https://www.linuxfoundation.org/). All right reserved +Β© 2021 [The Linux Foundation](https://www.linuxfoundation.org/). All rights reserved. The Linux Foundation has registered trademarks and uses trademarks. diff --git a/docs/install_daemonset/index.md b/docs/install_daemonset/index.md index cb83ae07..95687ef4 100644 --- a/docs/install_daemonset/index.md +++ b/docs/install_daemonset/index.md @@ -1,24 +1,20 @@ -# Kube-Vip as a daemonset +# Kube-Vip as a DaemonSet -## Daemonset +## DaemonSet -Other Kubernetes distributions can bring up a Kubernetes cluster, without depending on a VIP (BUT they are configured to support one). A prime example of this would be k3s, that can be configured to start and also sign the certificates to allow incoming traffic to a virtual IP. Given we don't need the VIP to exist **before** the cluster, we can bring up the k3s node(s) and then add `kube-vip` as a daemonset for all control plane nodes. +Some Kubernetes distributions can bring up a Kubernetes cluster without depending on a pre-existing VIP (but they may be configured to support one). A prime example of this would be K3s which can be configured to start and also sign the certificates to allow incoming traffic to a virtual IP. Given we don't need the VIP to exist before the cluster, we can bring up the K3s node(s) and then add `kube-vip` as a DaemonSet for all control plane nodes. -If the Kubernetes installer allows for adding a Virtual IP as an additional [SAN](https://en.wikipedia.org/wiki/Subject_Alternative_Name) to the API server certificate then we can apply `kube-vip` to the cluster once the first node has been brought up. +If the Kubernetes installer allows for adding a virtual IP as an additional [SAN](https://en.wikipedia.org/wiki/Subject_Alternative_Name) to the API server certificate, we can apply `kube-vip` to the cluster once the first node has been brought up. -## Kube-Vip as **HA**, **Load-Balancer** or both ` Β―\_(ツ)_/Β―` +Unlike running `kube-vip` as a [static Pod](/install_static) there are a few more things that may need configuring when running `kube-vip` as a DaemonSet. This page will cover primarily the differences. -When generating a manifest for `kube-vip` we will pass in the flags `--controlplane` / `--services` these will enable the various types of functionality within `kube-vip`. +## Kube-Vip as HA, Load Balancer, or both -With both enabled `kube-vip` will manage a virtual IP address that is passed through it's configuration for a Highly Available Kubernetes cluster, it will also "watch" services of `type:LoadBalancer` and once their `spec.LoadBalancerIP` is updated (typically by a cloud controller) it will advertise this address using BGP/ARP. - -**Note about Daemonsets** - -Unlike generating the static manifest there are a few more things that may need configuring, this page will cover most scenarios. +The functionality of `kube-vip` depends on the flags used to create the static Pod manifest. By passing in `--controlplane` we instruct `kube-vip` to provide and advertise a virtual IP to be used by the control plane. By passing in `--services` we tell `kube-vip` to provide load balancing for Kubernetes Service resources created inside the cluster. With both enabled, `kube-vip` will manage a virtual IP address that is passed through its configuration for a highly available Kubernetes cluster. It will also watch Services of type `LoadBalancer` and once their `service.metadata.annotations["kube-vip.io/loadbalancerIPs"]` or `spec.LoadBalancerIP` is updated (typically by a cloud controller, including (optionally) the one provided by kube-vip in [on-prem](/usage/on-prem) scenarios) it will advertise this address using BGP/ARP. In this example, we will use both when generating the manifest. ## Create the RBAC settings -As a daemonSet runs within the Kubernetes cluster it needs the correct access to be able to watch Kubernetes services and other objects. In order to do this we create a User, Role, and a binding.. we can apply this with the command: +Since `kube-vip` as a DaemonSet runs as a regular resource instead of a static Pod, it still needs the correct access to be able to watch Kubernetes Services and other objects. In order to do this, RBAC resources must be created which include a ServiceAccount, ClusterRole, and ClusterRoleBinding and can be applied this with the command: ``` kubectl apply -f https://kube-vip.io/manifests/rbac.yaml @@ -26,63 +22,134 @@ kubectl apply -f https://kube-vip.io/manifests/rbac.yaml ## Generating a Manifest -This section only covers generating a simple *BGP* configuration, as the main focus is will be on additional changes to the manifest. For more examples we can look at [here](/hybrid/static/). - -**Note:** Pay attention if using the "static" examples, as the `manifest` subcommand should use `daemonset` and NOT `pod`. - -### Set configuration details - -`export VIP=192.168.0.40` - -`export INTERFACE=` - -### Configure to use a container runtime +In order to create an easier experience of consuming the various functionality within `kube-vip`, we can use the `kube-vip` container itself to generate our DaemonSet manifest. We do this by running the `kube-vip` image as a container and passing in the various [flags](/flags/) for the capabilities we want to enable. Generating a `kube-vip` manifest for running as a DaemonSet is almost identical to the process when running `kube-vip` as a [static Pod](/install_static). Only a few flags are different between the two processes. Therefore, refer back to the [Generating a Manifest](/install_static/#generating-a-manifest) section on the [static Pod installation page](/install_static) for the main process steps. -####Β Get latest version +### ARP Example for DaemonSet - We can parse the GitHub API to find the latest version (or we can set this manually) +When creating the `kube-vip` installation manifest as a DaemonSet, the `manifest` subcommand takes the value `daemonset` as opposed to the `pod` value. The flags `--inCluster` and `--taint` are also needed to configure the DaemonSet to use a ServiceAccount and affine the `kube-vip` Pods to control plane nodes thereby preventing them from running on worker instances. -`KVVERSION=$(curl -sL https://api.github.com/repos/kube-vip/kube-vip/releases | jq -r ".[0].name")` - -or manually: - -`export KVVERSION=vx.x.x` - -The easiest method to generate a manifest is using the container itself, below will create an alias for different container runtimes. - -### containerd -`alias kube-vip="ctr run --rm --net-host ghcr.io/kube-vip/kube-vip:$KVVERSION vip /kube-vip"` - -### Docker -`alias kube-vip="docker run --network host --rm ghcr.io/kube-vip/kube-vip:$KVVERSION"` +``` +kube-vip manifest daemonset \ + --interface $INTERFACE \ + --address $VIP \ + --inCluster \ + --taint \ + --controlplane \ + --services \ + --arp \ + --leaderElection +``` -### BGP Example +#### Example ARP Manifest -This configuration will create a manifest that will start `kube-vip` providing **controlplane** and **services** management. **Unlike** ARP, all nodes in the BGP configuration will advertise virtual IP addresses. +```yaml +apiVersion: apps/v1 +kind: DaemonSet +metadata: + creationTimestamp: null + name: kube-vip-ds + namespace: kube-system +spec: + selector: + matchLabels: + name: kube-vip-ds + template: + metadata: + creationTimestamp: null + labels: + name: kube-vip-ds + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + containers: + - args: + - manager + env: + - name: vip_arp + value: "true" + - name: port + value: "6443" + - name: vip_interface + value: ens160 + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: svc_enable + value: "true" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "5" + - name: vip_renewdeadline + value: "3" + - name: vip_retryperiod + value: "1" + - name: address + value: 192.168.0.40 + image: ghcr.io/kube-vip/kube-vip:v0.4.0 + imagePullPolicy: Always + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + - SYS_TIME + hostNetwork: true + serviceAccountName: kube-vip + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + updateStrategy: {} +status: + currentNumberScheduled: 0 + desiredNumberScheduled: 0 + numberMisscheduled: 0 + numberReady: 0 +``` -**Note** we bind the address to `lo` as we don't want multiple devices that have the same address on public interfaces. We can specify all the peers in a comma seperate list in the format of `address:AS:password:multihop`. +### BGP Example for DaemonSet -**Note 2** we pass the `--inCluster` flag as this is running as a daemonSet within the Kubernetes cluster and therefore will have access to the token inside the running pod. +This configuration will create a manifest that starts `kube-vip` providing control plane VIP and Kubernetes Service management. Unlike ARP, all nodes in the BGP configuration will advertise virtual IP addresses. -**Note 3** we pass the `--taint` flag as we're deploying `kube-vip` as both a daemonset and as advertising controlplane, we want to taint this daemonset to only run on the worker nodes. +**Note** we bind the address to `lo` as we don't want multiple devices that have the same address on public interfaces. We can specify all the peers in a comma-separated list in the format of `address:AS:password:multihop`. `export INTERFACE=lo` ``` kube-vip manifest daemonset \ --interface $INTERFACE \ - --vip $VIP \ - --controlplane \ - --services \ + --address $VIP \ --inCluster \ --taint \ + --controlplane \ + --services \ --bgp \ + --localAS 65000 \ + --bgpRouterID 192.168.0.2 \ --bgppeers 192.168.0.10:65000::false,192.168.0.11:65000::false + ``` -### Generated Manifest +#### Example BGP Manifest -``` +```yaml apiVersion: apps/v1 kind: DaemonSet metadata: @@ -118,7 +185,7 @@ spec: - name: port value: "6443" - name: vip_interface - value: ens192 + value: ens160 - name: vip_cidr value: "32" - name: cp_enable @@ -132,6 +199,7 @@ spec: - name: bgp_enable value: "true" - name: bgp_routerid + value: 192.168.0.2 - name: bgp_as value: "65000" - name: bgp_peeraddress @@ -140,9 +208,9 @@ spec: value: "65000" - name: bgp_peers value: 192.168.0.10:65000::false,192.168.0.11:65000::false - - name: vip_address + - name: address value: 192.168.0.40 - image: ghcr.io/kube-vip/kube-vip:v0.3.9 + image: ghcr.io/kube-vip/kube-vip:v0.4.0 imagePullPolicy: Always name: kube-vip resources: {} @@ -167,20 +235,19 @@ status: numberReady: 0 ``` -### Managing a `routerID` as a daemonset +#### Managing a `routerID` as a DaemonSet -The routerID needs to be unique on each node that participates in BGP advertisements. In order to do this we can modify the manifest so that when `kube-vip` starts it will look up its local address and use that as the routerID. +The `routerID` needs to be unique on each node that participates in BGP advertisements. In order to do this, we can modify the manifest so that when `kube-vip` starts it will look up its local address and use that as the `routerID`. Add the following to the `env[]` array of the container: -``` - - name: bgp_routerinterface - value: "ens160" +```yaml +- name: bgp_routerinterface + value: "ens160" ``` -This will instruct each instance of `kube-vip` as part of the daemonset to look up the IP address on that interface and use it as the routerID. +### DaemonSet Manifest Overview -### Manifest Overview +Once the manifest for `kube-vip` as a DaemonSet is generated, these are some of the notable differences over the [static Pod](/install_static) manifest and their significance. -- `nodeSelector` - Ensures that this particular daemonset only runs on control plane nodes -- `serviceAccountName: kube-vip` - this specifies the user in the `rbac` that will give us the permissions to get/update services. -- `hostNetwork: true` - This pod will need to modify interfaces (for VIPs) -- `env {...}` - We pass the configuration into the kube-vip pod through environment variables. +- `nodeSelector`: Ensures that DaemonSet Pods only run on control plane nodes. +- `serviceAccountName: kube-vip`: Specifies the ServiceAccount name that will be used to get/update Kubernetes Service resources. +- `tolerations`: Allows scheduling to control plane nodes that normally specify `NoSchedule` or `NoExecute` taints. diff --git a/docs/install_static/index.md b/docs/install_static/index.md index 0670fd17..a5a97b7a 100644 --- a/docs/install_static/index.md +++ b/docs/install_static/index.md @@ -1,69 +1,110 @@ -# Kube-vip as a Static Pod +# Kube-Vip as a Static Pod ## Static Pods -Static pods are a Kubernetes pod that is ran by the `kubelet` on a single node, and is **not** managed by the Kubernetes cluster itself. This means that whilst the pod can appear within Kubernetes it can't make use of a variety of kubernetes functionality (such as the kubernetes token or `configMaps`). The static pod approach is primarily required for [kubeadm](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/), this is due to the sequence of actions performed by `kubeadm`. Ideally we want `kube-vip` to be part of the kubernetes cluster, for various bits of functionality we also need `kube-vip` to provide a HA virtual IP as part of the installation. +[Static Pods](https://kubernetes.io/docs/tasks/configure-pod-container/static-pod/) are Kubernetes Pods that are run by the `kubelet` on a single node and are not managed by the Kubernetes cluster itself. This means that whilst the Pod can appear within Kubernetes, it can't make use of a variety of Kubernetes functionality (such as the Kubernetes token or ConfigMap resources). The static Pod approach is primarily required for [kubeadm](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/create-cluster-kubeadm/) as this is due to the sequence of actions performed by `kubeadm`. Ideally, we want `kube-vip` to be part of the Kubernetes cluster, but for various bits of functionality we also need `kube-vip` to provide a HA virtual IP as part of the installation. + +### with kubeadm +The sequence of events for building a highly available Kubernetes cluster with `kubeadm` and `kube-vip` are as follows: + +1. Generate a `kube-vip` manifest in the static Pods manifest directory (see the [generating a manifest](#generating-a-manifest) section below). +2. Run `kubeadm init` with the `--control-plane-endpoint` flag using the VIP address provided when generating the static Pod manifest. +3. The `kubelet` will parse and execute all manifests, including the `kube-vip` manifest generated in step one and the other control plane components including `kube-apiserver`. +4. `kube-vip` starts and advertises the VIP address. +5. The `kubelet` on this first control plane will connect to the VIP advertised in the previous step. +6. `kubeadm init` finishes successfully on the first control plane. +7. Using the output from the `kubeadm init` command on the first control plane, run the `kubeadm join` command on the remainder of the control planes. +8. Copy the generated `kube-vip` manifest to the remainder of the control planes and place in their static Pods manifest directory (default of `/etc/kubernetes/manifests/`). + +### with k0sctl +The sequence of events for building a highly available Kubernetes cluster with `k0sctl` and `kube-vip` are as follows: + + +1. Generate a `kube-vip` manifest in the static Pods manifest directory (see the [generating a manifest](#generating-a-manifest) section below). +2. Run `k0sctl init > k0sctl.yaml` edit the manifest to change and add your host IPs. Controller need to have both role by using `role: controller+worker` +3. Add in `installFlags` option `--kubelet-extra-args=--pod-manifest-path=/etc/k0s/manifests/` and `--disable-components=konnectivity-server` +4. On the first controller in the list add a `hooks` options like bellow and replace value: + ```yaml + hooks: + apply: + before: + - /usr/sbin/ip addr add ${VIP} dev ${INTERFACE} || exit 0 + after: + - /usr/sbin/ip addr del ${VIP} dev ${INTERFACE} || exit 0 + ``` +5. On every controller in the list configure the `files` options to upload the `kube-vip` manifest into `/etc/k0s/manifests/` + ```yaml + files: + - name: kube-vip.yaml + src: kube-vip.yaml + dstDir: /etc/k0s/manifests/ + perm: 0655 + ``` +6. Configure the `k0s.config.spec.api.externalAddress` in the `k0s` config section in `k0sctl.yaml` +7. Deploy your cluster with `k0sctl apply` + + +## Kube-Vip as HA, Load Balancer, or both + +The functionality of `kube-vip` depends on the flags used to create the static Pod manifest. By passing in `--controlplane` we instruct `kube-vip` to provide and advertise a virtual IP to be used by the control plane. By passing in `--services` we tell `kube-vip` to provide load balancing for Kubernetes Service resources created inside the cluster. With both enabled, `kube-vip` will manage a virtual IP address that is passed through its configuration for a highly available Kubernetes cluster. It will also watch Services of type `LoadBalancer` and once their `service.metadata.annotations["kube-vip.io/loadbalancerIPs"]` or `spec.LoadBalancerIP` is updated (typically by a cloud controller, including (optionally) the one provided by kube-vip in [on-prem](/usage/on-prem) scenarios) it will advertise this address using BGP/ARP. In this example, we will use both when generating the manifest. -The sequence of events for this to work follows: -1. Generate a `kube-vip` manifest in the static pods manifest folder -2. Run `kubeadm init`, this generates the manifests for the control plane and wait to connect to the VIP -3. The `kubelet` will parse and execute all manifest, including the `kube-vip` manifest -4. `kube-vip` starts and advertises our VIP -5. The `kubeadm init` finishes succesfully. +## Generating a Manifest -## Kube-Vip as **HA**, **Load-Balancer** or both ` Β―\_(ツ)_/Β―` +In order to create an easier experience of consuming the various functionality within `kube-vip`, we can use the `kube-vip` container itself to generate our static Pod manifest. We do this by running the `kube-vip` image as a container and passing in the various [flags](/flags/) for the capabilities we want to enable. -When generating a manifest for `kube-vip` we will pass in the flags `--controlplane` / `--services` these will enable the various types of functionality within `kube-vip`. +### Set configuration details -With both enabled `kube-vip` will manage a virtual IP address that is passed through it's configuration for a Highly Available Kubernetes cluster, it will also "watch" services of `type:LoadBalancer` and once their `spec.LoadBalancerIP` is updated (typically by a cloud controller) it will advertise this address using BGP/ARP. +We use environment variables to predefine the values of the inputs to supply to `kube-vip`. -## Generating a Manifest +Set the `VIP` address to be used for the control plane: -This section details creating a number of manifests for various use cases +`export VIP=192.168.0.40` -### Set configuration details +Set the `INTERFACE` name to the name of the interface on the control plane(s) which will announce the VIP. In many Linux distributions this can be found with the `ip a` command. -`export VIP=192.168.0.40` +`export INTERFACE=ens160` -`export INTERFACE=` +Get the latest version of the `kube-vip` release by parsing the GitHub API. This step requires that `jq` and `curl` are installed. -## Configure to use a container runtime +`KVVERSION=$(curl -sL https://api.github.com/repos/kube-vip/kube-vip/releases | jq -r ".[0].name")` -###Β Get latest version +To set manually instead, find the desired [release tag](https://github.com/kube-vip/kube-vip/releases): - We can parse the GitHub API to find the latest version (or we can set this manually) +`export KVVERSION=v0.4.0` -`KVVERSION=$(curl -sL https://api.github.com/repos/kube-vip/kube-vip/releases | jq -r ".[0].name")` +### Creating the manifest -or manually: +With the input values now set, we can pull and run the `kube-vip` image supplying it the desired flags and values. Once the static Pod manifest is generated for your desired method (ARP or BGP), if running multiple control plane nodes, ensure it is placed in each control plane's static manifest directory (by default, `/etc/kubernetes/manifests`). -`export KVVERSION=vx.x.x` +Depending on the container runtime, use one of the two aliased commands to create a `kube-vip` command which runs the `kube-vip` image as a container. -The easiest method to generate a manifest is using the container itself, below will create an alias for different container runtimes. +For containerd, run the below command: -### containerd `alias kube-vip="ctr run --rm --net-host ghcr.io/kube-vip/kube-vip:$KVVERSION vip /kube-vip"` -### Docker +For Docker, run the below command: + `alias kube-vip="docker run --network host --rm ghcr.io/kube-vip/kube-vip:$KVVERSION"` -## ARP +### ARP -This configuration will create a manifest that starts `kube-vip` providing **controlplane** and **services** management, using **leaderElection**. When this instance is elected as the leader it will bind the `vip` to the specified `interface`, this is also the same for services of `type:LoadBalancer`. +With the inputs and alias command set, we can run the `kube-vip` container to generate a static Pod manifest which will be directed to a file at `/etc/kubernetes/manifests/kube-vip.yaml`. As such, this is assumed to run on the first control plane node. -`export INTERFACE=eth0` +This configuration will create a manifest that starts `kube-vip` providing control plane VIP and Kubernetes Service management using the `leaderElection` method and ARP. When this instance is elected as the leader, it will bind the `vip` to the specified `interface`. This is the same behavior for Services of type `LoadBalancer`. + +> Note: When running these commands on a to-be control plane node, `sudo` access may be required along with pre-creation of the `/etc/kubernetes/manifests/` directory. ``` kube-vip manifest pod \ --interface $INTERFACE \ - --vip $VIP \ + --address $VIP \ --controlplane \ --services \ --arp \ - --leaderElection | tee /etc/kubernetes/manifests/kube-vip.yaml + --leaderElection | tee /etc/kubernetes/manifests/kube-vip.yaml ``` -### Example manifest +#### Example ARP Manifest ``` apiVersion: v1 @@ -101,9 +142,9 @@ spec: value: "3" - name: vip_retryperiod value: "1" - - name: vip_address + - name: address value: 192.168.0.40 - image: ghcr.io/kube-vip/kube-vip:v0.3.9 + image: ghcr.io/kube-vip/kube-vip:v0.4.0 imagePullPolicy: Always name: kube-vip resources: {} @@ -128,27 +169,27 @@ spec: status: {} ``` -## BGP +### BGP -This configuration will create a manifest that will start `kube-vip` providing **controlplane** and **services** management. **Unlike** ARP, all nodes in the BGP configuration will advertise virtual IP addresses. +This configuration will create a manifest that starts `kube-vip` providing control plane VIP and Kubernetes Service management. Unlike ARP, all nodes in the BGP configuration will advertise virtual IP addresses. -**Note** we bind the address to `lo` as we don't want multiple devices that have the same address on public interfaces. We can specify all the peers in a comma seperate list in the format of `address:AS:password:multihop`. +**Note** we bind the address to `lo` as we don't want multiple devices that have the same address on public interfaces. We can specify all the peers in a comma-separated list in the format of `address:AS:password:multihop`. `export INTERFACE=lo` ``` kube-vip manifest pod \ --interface $INTERFACE \ - --vip $VIP \ + --address $VIP \ --controlplane \ --services \ --bgp \ --localAS 65000 \ --bgpRouterID 192.168.0.2 \ - --bgppeers 192.168.0.10:65000::false,192.168.0.11:65000::false | tee /etc/kubernetes/manifests/kube-vip.yaml + --bgppeers 192.168.0.10:65000::false,192.168.0.11:65000::false | tee /etc/kubernetes/manifests/kube-vip.yaml ``` -### Example Manifest +#### Example BGP Manifest ``` apiVersion: v1 @@ -188,7 +229,7 @@ spec: value: "65000" - name: bgp_peers value: 192.168.0.10:65000::false,192.168.0.11:65000::false - - name: vip_address + - name: address value: 192.168.0.40 image: ghcr.io/kube-vip/kube-vip:v0.3.9 imagePullPolicy: Always @@ -213,4 +254,4 @@ spec: path: /etc/kubernetes/admin.conf name: kubeconfig status: {} -``` \ No newline at end of file +``` diff --git a/docs/manifests/rbac.yaml b/docs/manifests/rbac.yaml index f35d79aa..0480d0ec 100644 --- a/docs/manifests/rbac.yaml +++ b/docs/manifests/rbac.yaml @@ -12,7 +12,7 @@ metadata: name: system:kube-vip-role rules: - apiGroups: [""] - resources: ["services", "services/status", "nodes"] + resources: ["services", "services/status", "nodes", "endpoints"] verbs: ["list","get","watch", "update"] - apiGroups: ["coordination.k8s.io"] resources: ["leases"] diff --git a/docs/manifests/v0.4.1/kube-vip-arp-ds-lb.yaml b/docs/manifests/v0.4.1/kube-vip-arp-ds-lb.yaml new file mode 100644 index 00000000..91e6c7c8 --- /dev/null +++ b/docs/manifests/v0.4.1/kube-vip-arp-ds-lb.yaml @@ -0,0 +1,70 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + creationTimestamp: null + name: kube-vip-ds + namespace: kube-system +spec: + selector: + matchLabels: + name: kube-vip-ds + template: + metadata: + creationTimestamp: null + labels: + name: kube-vip-ds + spec: + containers: + - args: + - manager + env: + - name: vip_arp + value: "true" + - name: port + value: "6443" + - name: vip_interface + value: eth0 + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: svc_enable + value: "true" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "5" + - name: vip_renewdeadline + value: "3" + - name: vip_retryperiod + value: "1" + - name: lb_enable + value: "true" + - name: lb_port + value: "6443" + - name: lb_fwdmethod + value: local + - name: vip_address + value: 192.168.0.1 + image: ghcr.io/kube-vip/kube-vip:v0.4.1 + imagePullPolicy: Always + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + hostNetwork: true + serviceAccountName: kube-vip + updateStrategy: {} +status: + currentNumberScheduled: 0 + desiredNumberScheduled: 0 + numberMisscheduled: 0 + numberReady: 0 + diff --git a/docs/manifests/v0.4.1/kube-vip-arp-ds.yaml b/docs/manifests/v0.4.1/kube-vip-arp-ds.yaml new file mode 100644 index 00000000..c37cf21a --- /dev/null +++ b/docs/manifests/v0.4.1/kube-vip-arp-ds.yaml @@ -0,0 +1,64 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + creationTimestamp: null + name: kube-vip-ds + namespace: kube-system +spec: + selector: + matchLabels: + name: kube-vip-ds + template: + metadata: + creationTimestamp: null + labels: + name: kube-vip-ds + spec: + containers: + - args: + - manager + env: + - name: vip_arp + value: "true" + - name: port + value: "6443" + - name: vip_interface + value: eth0 + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: svc_enable + value: "true" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "5" + - name: vip_renewdeadline + value: "3" + - name: vip_retryperiod + value: "1" + - name: vip_address + value: 192.168.0.1 + image: ghcr.io/kube-vip/kube-vip:v0.4.1 + imagePullPolicy: Always + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + hostNetwork: true + serviceAccountName: kube-vip + updateStrategy: {} +status: + currentNumberScheduled: 0 + desiredNumberScheduled: 0 + numberMisscheduled: 0 + numberReady: 0 + diff --git a/docs/manifests/v0.4.1/kube-vip-arp-lb.yaml b/docs/manifests/v0.4.1/kube-vip-arp-lb.yaml new file mode 100644 index 00000000..81746ccf --- /dev/null +++ b/docs/manifests/v0.4.1/kube-vip-arp-lb.yaml @@ -0,0 +1,66 @@ +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null + name: kube-vip + namespace: kube-system +spec: + containers: + - args: + - manager + env: + - name: vip_arp + value: "true" + - name: port + value: "6443" + - name: vip_interface + value: eth0 + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: svc_enable + value: "true" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "5" + - name: vip_renewdeadline + value: "3" + - name: vip_retryperiod + value: "1" + - name: lb_enable + value: "true" + - name: lb_port + value: "6443" + - name: lb_fwdmethod + value: local + - name: vip_address + value: 192.168.0.1 + image: ghcr.io/kube-vip/kube-vip:v0.4.1 + imagePullPolicy: Always + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + hostAliases: + - hostnames: + - kubernetes + ip: 127.0.0.1 + hostNetwork: true + volumes: + - hostPath: + path: /etc/kubernetes/admin.conf + name: kubeconfig +status: {} + diff --git a/docs/manifests/v0.4.1/kube-vip-arp.yaml b/docs/manifests/v0.4.1/kube-vip-arp.yaml new file mode 100644 index 00000000..f3fe9c68 --- /dev/null +++ b/docs/manifests/v0.4.1/kube-vip-arp.yaml @@ -0,0 +1,60 @@ +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null + name: kube-vip + namespace: kube-system +spec: + containers: + - args: + - manager + env: + - name: vip_arp + value: "true" + - name: port + value: "6443" + - name: vip_interface + value: eth0 + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: svc_enable + value: "true" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "5" + - name: vip_renewdeadline + value: "3" + - name: vip_retryperiod + value: "1" + - name: vip_address + value: 192.168.0.1 + image: ghcr.io/kube-vip/kube-vip:v0.4.1 + imagePullPolicy: Always + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + hostAliases: + - hostnames: + - kubernetes + ip: 127.0.0.1 + hostNetwork: true + volumes: + - hostPath: + path: /etc/kubernetes/admin.conf + name: kubeconfig +status: {} + diff --git a/docs/manifests/v0.4.1/kube-vip-bgp-ds.yaml b/docs/manifests/v0.4.1/kube-vip-bgp-ds.yaml new file mode 100644 index 00000000..c8bebaf4 --- /dev/null +++ b/docs/manifests/v0.4.1/kube-vip-bgp-ds.yaml @@ -0,0 +1,73 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + creationTimestamp: null + name: kube-vip-ds + namespace: kube-system +spec: + selector: + matchLabels: + name: kube-vip-ds + template: + metadata: + creationTimestamp: null + labels: + name: kube-vip-ds + spec: + containers: + - args: + - manager + env: + - name: vip_arp + value: "false" + - name: port + value: "6443" + - name: vip_interface + value: eth0 + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: svc_enable + value: "true" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "5" + - name: vip_renewdeadline + value: "3" + - name: vip_retryperiod + value: "1" + - name: bgp_enable + value: "true" + - name: bgp_routerid + - name: bgp_as + value: "65000" + - name: bgp_peeraddress + - name: bgp_peerpass + - name: bgp_peeras + value: "65000" + - name: vip_address + value: 192.168.0.1 + image: ghcr.io/kube-vip/kube-vip:v0.4.1 + imagePullPolicy: Always + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + hostNetwork: true + serviceAccountName: kube-vip + updateStrategy: {} +status: + currentNumberScheduled: 0 + desiredNumberScheduled: 0 + numberMisscheduled: 0 + numberReady: 0 + diff --git a/docs/manifests/v0.4.1/kube-vip-bgp-em-ds.yaml b/docs/manifests/v0.4.1/kube-vip-bgp-em-ds.yaml new file mode 100644 index 00000000..6b4eb5b1 --- /dev/null +++ b/docs/manifests/v0.4.1/kube-vip-bgp-em-ds.yaml @@ -0,0 +1,83 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + creationTimestamp: null + name: kube-vip-ds + namespace: kube-system +spec: + selector: + matchLabels: + name: kube-vip-ds + template: + metadata: + creationTimestamp: null + labels: + name: kube-vip-ds + spec: + containers: + - args: + - manager + env: + - name: vip_arp + value: "false" + - name: port + value: "6443" + - name: vip_interface + value: eth0 + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: svc_enable + value: "true" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "5" + - name: vip_renewdeadline + value: "3" + - name: vip_retryperiod + value: "1" + - name: provider_config + value: /etc/cloud-sa/cloud-sa.json + - name: bgp_enable + value: "true" + - name: bgp_routerid + - name: bgp_as + value: "65000" + - name: bgp_peeraddress + - name: bgp_peerpass + - name: bgp_peeras + value: "65000" + - name: vip_address + value: 192.168.0.1 + image: ghcr.io/kube-vip/kube-vip:v0.4.1 + imagePullPolicy: Always + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + volumeMounts: + - mountPath: /etc/cloud-sa + name: cloud-sa-volume + readOnly: true + hostNetwork: true + serviceAccountName: kube-vip + volumes: + - name: cloud-sa-volume + secret: + secretName: metal-cloud-config + updateStrategy: {} +status: + currentNumberScheduled: 0 + desiredNumberScheduled: 0 + numberMisscheduled: 0 + numberReady: 0 + diff --git a/docs/manifests/v0.4.1/kube-vip-bgp.yaml b/docs/manifests/v0.4.1/kube-vip-bgp.yaml new file mode 100644 index 00000000..03b22bf5 --- /dev/null +++ b/docs/manifests/v0.4.1/kube-vip-bgp.yaml @@ -0,0 +1,61 @@ +apiVersion: v1 +kind: Pod +metadata: + creationTimestamp: null + name: kube-vip + namespace: kube-system +spec: + containers: + - args: + - manager + env: + - name: vip_arp + value: "false" + - name: port + value: "6443" + - name: vip_interface + value: eth0 + - name: vip_cidr + value: "32" + - name: cp_enable + value: "true" + - name: cp_namespace + value: kube-system + - name: vip_ddns + value: "false" + - name: svc_enable + value: "true" + - name: bgp_enable + value: "true" + - name: bgp_routerid + - name: bgp_as + value: "65000" + - name: bgp_peeraddress + - name: bgp_peerpass + - name: bgp_peeras + value: "65000" + - name: vip_address + value: 192.168.0.1 + image: ghcr.io/kube-vip/kube-vip:v0.4.1 + imagePullPolicy: Always + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + hostAliases: + - hostnames: + - kubernetes + ip: 127.0.0.1 + hostNetwork: true + volumes: + - hostPath: + path: /etc/kubernetes/admin.conf + name: kubeconfig +status: {} + diff --git a/docs/usage/EquinixMetal/index.md b/docs/usage/EquinixMetal/index.md index eab30549..4263d1cc 100644 --- a/docs/usage/EquinixMetal/index.md +++ b/docs/usage/EquinixMetal/index.md @@ -8,7 +8,7 @@ When deploying Kubernetes with Equinix Metal with the `--controlplane` functiona ## Configure to use a container runtime -###Β Get latest version +### Get latest version We can parse the GitHub API to find the latest version (or we can set this manually) @@ -123,7 +123,23 @@ kubectl expose deployment nginx-deployment --port=80 --type=LoadBalancer --name= ## Troubleshooting -If `kube-vip` has been sat waiting for a long time then you may need to investigate that the annotations have been applied correctly by doing running the `describe` on the node: +If `kube-vip` has been sat waiting for a long time then you may need to investigate that the annotations have been applied correctly by doing running the `describe` on the node. +As of Equinix Metal's CCM v3.3.0, the annotations format was changed. This means, you should expect either of the following: + +1. Equinix Metal's CCM v3.3.0 onwards: + +``` +kubectl describe node k8s.bgp02 +... +Annotations: kubeadm.alpha.kubernetes.io/cri-socket: /var/run/dockershim.sock + node.alpha.kubernetes.io/ttl: 0 + metal.equinix.com/bgp-peers-0-node-asn: 65000 + metal.equinix.com/bgp-peers-0-peer-asn: 65530 + metal.equinix.com/bgp-peers-0-peer-ip: x.x.x.x + metal.equinix.com/bgp-peers-0-src-ip: x.x.x.x +``` + +2. Equinix Metal's CCM before v3.0.0: ``` kubectl describe node k8s.bgp02 diff --git a/docs/usage/k3s/index.md b/docs/usage/k3s/index.md index 78e57ade..280c2178 100644 --- a/docs/usage/k3s/index.md +++ b/docs/usage/k3s/index.md @@ -1,99 +1,53 @@ -# K3s overview (on Equinix Metal) +# K3s Overview -## Prerequisites +`kube-vip` works on [K3s environments](https://k3s.io/) similar to most others with the exception of how it gets deployed. Because K3s is able to bootstrap a single server (control plane node) without the availability of the load balancer fronting it, `kube-vip` can be installed as a DaemonSet. -In order to make ARP work on Equinix Metal, you need to follow [metal-gateway](https://metal.equinix.com/developers/docs/networking/metal-gateway/) guide to have public VLAN subnet, which you can use as your loadbalancer IP. +## Prerequisites (on Equinix Metal) -## Optional Tidy environment (best if something was running before) -``` -rm -rf /var/lib/rancher /etc/rancher ~/.kube/*; \ -ip addr flush dev lo; \ -ip addr add 127.0.0.1/8 dev lo; -``` +In order to make ARP work on Equinix Metal, follow the [metal-gateway](https://metal.equinix.com/developers/docs/networking/metal-gateway/) guide to have public VLAN subnet which can be used for the load balancer IP. -## Step 1: Create Manifests folder +## Clean Environment -This is required, this folder will contain all of the generated manifests that `k3s` will execute as it starts. We will create it before `k3s` and place our `kube-vip` manifests within it. +This step is optional but recommended if a K3s installation previously existed. ``` -mkdir -p /var/lib/rancher/k3s/server/manifests/ +rm -rf /var/lib/rancher /etc/rancher ~/.kube/*; \ +ip addr flush dev lo; \ +ip addr add 127.0.0.1/8 dev lo; ``` -## Step 2: Get rbac for `Kube-Vip` +## Step 1: Create Manifests Folder -As `kube-vip` runs inside of the Kubernetes cluster, we will need to ensure that the required permissions exist. +K3s has an optional manifests directory that will be searched to [auto-deploy](https://rancher.com/docs/k3s/latest/en/advanced/#auto-deploying-manifests) any manifests found within. Create this directory first in order to later place the `kube-vip` resources inside. ``` -curl https://kube-vip.io/manifests/rbac.yaml > /var/lib/rancher/k3s/server/manifests/rbac.yaml +mkdir -p /var/lib/rancher/k3s/server/manifests/ ``` -## Step 3: Generate kube-vip (A VIP address for the network will be required) +## Step 2: Upload Kube-Vip RBAC Manifest -Configure your virtual IP (for the control plane) and interface that will expose this VIP first. +As `kube-vip` runs as a DaemonSet under K3s and not a static Pod, we will need to ensure that the required permissions exist for it to communicate with the API server. RBAC resources are needed to ensure a ServiceAccount exists with those permissions and bound appropriately. -``` -export VIP=x.x.x.x -export INTERFACE=bind0 # or ethX depends on your networking setup -``` - -Modify the `VIP` and `INTERFACE` to match the floating IP address you'd like to use and the interface it should bind to. - -To generate the manifest we have two options! We can generate the manifest from [kube-vip.io](kube-vip.io) or use a kube-vip image to generate the manifest! +Get the RBAC manifest and place in the auto-deploy directory: -## Step 3.1: Generate from kube-vip.io - ``` -curl -sL kube-vip.io/k3s | vipAddress=$VIP vipInterface=$INTERFACE sh | sudo tee /var/lib/rancher/k3s/server/manifests/vip.yaml +curl https://kube-vip.io/manifests/rbac.yaml > /var/lib/rancher/k3s/server/manifests/kube-vip-rbac.yaml ``` -## Step 3.2 Generate from container image - - -###Β Get latest version - - We can parse the GitHub API to find the latest version (or we can set this manually) +## Step 3: Generate a Kube-Vip DaemonSet Manifest -`KVVERSION=$(curl -sL https://api.github.com/repos/kube-vip/kube-vip/releases | jq -r ".[0].name")` +Refer to the [DaemonSet manifest generation documentation](/docs/install_daemonset/index.md#generating-a-manifest) for the process to complete this step. -or manually: +Either store this generated manifest separately in the `/var/lib/rancher/k3s/server/manifests/` directory, or append to the existing RBAC manifest called `kube-vip-rbac.yaml`. As a general best practice, it is a cleaner approach to place all related resources into a single YAML file. -`export KVVERSION=vx.x.x` +> Note: Remember to include YAML document delimiters (`---`) when composing multiple documents. -The easiest method to generate a manifest is using the container itself, below will create an alias for different container runtimes. +## Step 4: Install a HA K3s Cluster -### containerd -`alias kube-vip="ctr run --rm --net-host ghcr.io/kube-vip/kube-vip:$KVVERSION vip /kube-vip"` +There are multiple ways to install K3s including `[k3sup](https://k3sup.dev/)` or [running the binary](https://rancher.com/docs/k3s/latest/en/quick-start/) locally. Whichever method you choose, the `--tls-san` flag must be passed with the same IP when generating the `kube-vip` DaemonSet manifest when installing the first server (control plane) instance. This is so that K3s generates an API server certificate with the `kube-vip` virtual IP address. -### Docker -`alias kube-vip="docker run --network host --rm ghcr.io/kube-vip/kube-vip:KVVERSION"` - - -``` -kube-vip manifest daemonset \ - --interface $INTERFACE \ - --vip $VIP \ - --controlplane \ - --services \ - --inCluster \ - --taint \ - --arp -``` - -## Step 4: Up Cluster - -From online `-->` - -``` -curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="--write-kubeconfig-mode 644 \ --t agent-secret --tls-san $VIP" sh - -``` - -From local `-->` - -``` -sudo ./k3s server --tls-san $VIP -``` +Once the cluster is installed, you should be able to edit the `kubeconfig` file generated from the process and use the `kube-vip` VIP address to access the control plane. -## Step 5: Service Load-Balancing +## Step 5: Service Load Balancing -For this refer to the [on-prem](../on-prem) documentation +If wanting to use the `kube-vip` [cloud controller](/docs/usage/cloud-provider/), pass the `--disable servicelb` flag so K3s will not attempt to render Kubernetes Service resources of type `LoadBalancer`. If building with `k3sup`, the flag should be given as an argument to the `--k3s-extra-args` flag itself: `--k3s-extra-args "--disable servicelb"`. To install the `kube-vip` cloud controller, follow the additional steps in the [cloud controller guide](/docs/usage/cloud-provider/#install-the-kube-vip-cloud-provider). diff --git a/docs/usage/kind/index.md b/docs/usage/kind/index.md index f3915dd6..f8edd13e 100644 --- a/docs/usage/kind/index.md +++ b/docs/usage/kind/index.md @@ -2,7 +2,13 @@ ## Deploying KIND -The documentation for KIND is fantastic and it's quickstart guide will have you up and running in no time -> [https://kind.sigs.k8s.io/docs/user/quick-start/](https://kind.sigs.k8s.io/docs/user/quick-start/) +The documentation for KIND is fantastic and its [quick start](https://kind.sigs.k8s.io/docs/user/quick-start/) guide will have you up and running in no time. + +## Create RBAC settings + +``` +kubectl apply -f https://kube-vip.io/manifests/rbac.yaml +``` ## Find Address Pool for Kube-Vip @@ -12,12 +18,12 @@ We will need to find addresses that can be used by Kube-Vip: docker network inspect kind -f '{{ range $i, $a := .IPAM.Config }}{{ println .Subnet }}{{ end }}' ``` -This will return a cidr range such as `172.18.0.0/16` and from here we can select a range. +This will return a CIDR range such as `172.18.0.0/16` and from here we can select a range. ## Deploy the Kube-Vip Cloud Controller ``` -$ kubectl apply -f https://raw.githubusercontent.com/kube-vip/kube-vip-cloud-provider/main/manifest/kube-vip-cloud-controller.yaml +kubectl apply -f https://raw.githubusercontent.com/kube-vip/kube-vip-cloud-provider/main/manifest/kube-vip-cloud-controller.yaml ``` ## Add our Address range @@ -28,9 +34,17 @@ kubectl create configmap --namespace kube-system kubevip --from-literal range-gl ## Install kube-vip -###Β Get latest version +### Create the RBAC settings + +Since `kube-vip` as a DaemonSet runs as a regular resource instead of a static Pod, it still needs the correct access to be able to watch Kubernetes Services and other objects. In order to do this, RBAC resources must be created which include a ServiceAccount, ClusterRole, and ClusterRoleBinding and can be applied this with the command: + +``` +kubectl apply -f https://kube-vip.io/manifests/rbac.yaml +``` + +### Get latest version - We can parse the GitHub API to find the latest version (or we can set this manually) +We can parse the GitHub API to find the latest version (or we can set this manually) `KVVERSION=$(curl -sL https://api.github.com/repos/kube-vip/kube-vip/releases | jq -r ".[0].name")` @@ -41,15 +55,17 @@ or manually: The easiest method to generate a manifest is using the container itself, below will create an alias for different container runtimes. ### containerd + `alias kube-vip="ctr run --rm --net-host ghcr.io/kube-vip/kube-vip:$KVVERSION vip /kube-vip"` ### Docker -`alias kube-vip="docker run --network host --rm ghcr.io/kube-vip/kube-vip:KVVERSION"` -## Deploy Kube-vip as a deamonset +`alias kube-vip="docker run --network host --rm ghcr.io/kube-vip/kube-vip:$KVVERSION"` + +## Deploy Kube-vip as a DaemonSet ``` -kube-vip manifest daemonset --services --inCluster --arp --interface eth0 | ./kubectl apply -f - +kube-vip manifest daemonset --services --inCluster --arp --interface eth0 | kubectl apply -f - ``` ## Test @@ -67,4 +83,4 @@ kubectl get svc NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.96.0.1 443/TCP 74m nginx LoadBalancer 10.96.196.235 172.18.100.11 80:31236/TCP 6s -``` \ No newline at end of file +``` diff --git a/docs/usage/on-prem/index.md b/docs/usage/on-prem/index.md index a5aebb61..18dbcc34 100644 --- a/docs/usage/on-prem/index.md +++ b/docs/usage/on-prem/index.md @@ -1,123 +1,82 @@ -# Kube-vip on-prem +# Kube-Vip On-Prem -We've designed `kube-vip` to be as de-coupled or agnostic from other components that may exist within a Kubernetes cluster as possible. This has lead to `kube-vip` having a very simplistic but robust approach to advertising Kubernetes services to the outside world and marking these services as ready to use. +We've designed `kube-vip` to be as decoupled or agnostic from other components that may exist within a Kubernetes cluster as possible. This has lead to `kube-vip` having a very simplistic but robust approach to advertising Kubernetes Services to the outside world and marking these Services as ready to use. -## CCM +## Cloud Controller Manager -We can see from the [flow](#Flow) above that `kube-vip` isn't coupled to anything other than the Kubernetes API, and will only act upon an existing Kubernetes primative (in this case the object of type `Service`). This makes it easy for existing CCMs to simply apply their logic to services of type LoadBalancer and leave `kube-vip` to take the next steps to advertise these load-balancers to the outside world. +`kube-vip` isn't coupled to anything other than the Kubernetes API and will only act upon an existing Kubernetes primitive (in this case the object of type `Service`). This makes it easy for existing [cloud controller managers (CCMs)](https://kubernetes.io/docs/concepts/architecture/cloud-controller/) to simply apply their logic to services of type LoadBalancer and leave `kube-vip` to take the next steps to advertise these load balancers to the outside world. +## Using the Kube-Vip Cloud Provider -## Using the Kube-vip Cloud Provider +The `kube-vip` cloud provider can be used to populate an IP address for Services of type `LoadBalancer` similar to what public cloud providers allow through a Kubernetes CCM. The below instructions *should just work* on Kubernetes regardless of the architecture (a Linux OS being the only requirement) and will install the latest components. -The below instructions *should just work* on Kubernetes regardless of architecture (Linux Operating System is the only requirement) - you can quickly install the "latest" components: +## Install the Kube-Vip Cloud Provider -### Create the RBAC settings +The `kube-vip` cloud provider can be installed from the latest release in the `main` branch by using the following command: -As a daemonSet runs within the Kubernetes cluster it needs the correct access to be able to watch Kubernetes services and other objects. In order to do this we create a User, Role, and a binding.. we can apply this with the command: - -``` -kubectl apply -f https://kube-vip.io/manifests/rbac.yaml -``` - -### Install the `kube-vip-cloud-provider` - -``` -$ kubectl apply -f https://raw.githubusercontent.com/kube-vip/kube-vip-cloud-provider/main/manifest/kube-vip-cloud-controller.yaml -``` - -It uses a `statefulSet` and can always be viewed with the following command: - -``` -kubectl describe pods -n kube-system kube-vip-cloud-provider-0 -``` - -**Create a global CIDR or IP Range** - -Any `service` in any `namespace` can use an address from the global CIDR `cidr-global` or range `range-global` - -``` -kubectl create configmap --namespace kube-system kubevip --from-literal cidr-global=192.168.0.220/29 ``` -or +kubectl apply -f https://raw.githubusercontent.com/kube-vip/kube-vip-cloud-provider/main/manifest/kube-vip-cloud-controller.yaml ``` -kubectl create configmap --namespace kube-system kubevip --from-literal range-global=192.168.1.220-192.168.1.230 -``` - -Creating services of `type: LoadBalancer` in *any namespace* will now take addresses from the **global** cidr defined in the `configmap` unless a specific +## Create a global CIDR or IP Range -## The Detailed guide +In order for `kube-vip` to set an IP address for a Service of type `LoadBalancer`, it needs to have an availability of IP address to assign. This information is stored in a Kubernetes ConfigMap to which `kube-vip` has access. You control the scope of the IP allocations with the `key` within the ConfigMap. Either CIDR blocks or IP ranges may be specified and scoped either globally (cluster-side) or per-Namespace. -### Create the RBAC settings - -As a daemonSet runs within the Kubernetes cluster it needs the correct access to be able to watch Kubernetes services and other objects. In order to do this we create a User, Role, and a binding.. we can apply this with the command: +To allow a global (cluster-wide) CIDR block which `kube-vip` can use to allocate an IP to Services of type `LoadBalancer` in any Namespace, create a ConfigMap named `kubevip` with the key `cidr-global` and value equal to a CIDR block available in your environment. For example, the below command creates a global CIDR with value `192.168.0.220/29` from which `kube-vip` will allocate IP addresses. ``` -kubectl apply -f https://kube-vip.io/manifests/rbac.yaml +kubectl create configmap -n kube-system kubevip --from-literal cidr-global=192.168.0.220/29 ``` -### Install the `kube-vip-cloud-provider` +To use a global range instead, create the key `range-global` with the value set to a valid range of IP addresses. For example, the below command creates a global range using the pool `192.168.1.220-192.168.1.230`. ``` -$ kubectl apply -f https://raw.githubusercontent.com/kube-vip/kube-vip-cloud-provider/main/manifest/kube-vip-cloud-controller.yaml +kubectl create configmap -n kube-system kubevip --from-literal range-global=192.168.1.220-192.168.1.230 ``` -The following output should appear when the manifest is applied: +Creating services of type `LoadBalancer` in any Namespace will now take addresses from one of the global pools defined in the ConfigMap unless a Namespace-specific pool is created. -``` -serviceaccount/kube-vip-cloud-controller created -clusterrole.rbac.authorization.k8s.io/system:kube-vip-cloud-controller-role created -clusterrolebinding.rbac.authorization.k8s.io/system:kube-vip-cloud-controller-binding created -statefulset.apps/kube-vip-cloud-provider created -``` +### The Kube-Vip Cloud Provider ConfigMap -We can validate the cloud provider by examining the pods and following the logs: +To manage the IP address ranges for Services of type `LoadBalancer`, the `kube-vip-cloud-provider` uses a ConfigMap held in the `kube-system` Namespace. IP addresses can be configured using one or multiple formats: -``` -kubectl describe pods -n kube-system kube-vip-cloud-provider-0 -kubectl logs -n kube-system kube-vip-cloud-provider-0 -f -``` - -### The Kube-vip Cloud Provider `configmap` - -To manage the IP address ranges for the load balancer instances the `kube-vip-cloud-provider` uses a `configmap` held in the `kube-system` namespace. IP address ranges can be configured using: -- IP address pools by CIDR +- CIDR blocks - IP ranges [start address - end address] -- Multiple pools by CIDR per namespace -- Multiple IP ranges per namespace (handles overlapping ranges) -- Setting of static addresses through --load-balancer-ip=x.x.x.x +- Multiple pools by CIDR per Namespace +- Multiple IP ranges per Namespace (handles overlapping ranges) +- Setting of static addresses through service.metadata.annotations `kube-vip.io/loadbalancerIPs` +- Setting of static addresses through --load-balancer-ip=x.x.x.x (`kubectl expose` command) -To control which IP address range is used for which service the following rules are applied: -- Global address pools (`cidr-global` or `range-global`) are available for use by *any* `service` in *any* `namespace` -- Namespace specific address pools (`cidr-` or `range-`) are *only* available for use by `service` in the *specific* `namespace` -- Static IP addresses can be applied to a load balancer `service` using the `loadbalancerIP` setting, even outside of the assigned ranges +To control which IP address range is used for which Service, the following rules are applied: -Example Configmap: +- Global address pools (`cidr-global` or `range-global`) are available for use by *any* Service in *any* Namespace +- Namespace specific address pools (`cidr-` or `range-`) are *only* available for use by a Service in the *specific* Namespace +- Static IP addresses can be applied to a Service of type `LoadBalancer` using the `spec.loadBalancerIP` field, even outside of the assigned ranges -``` -$ kubectl get configmap -n kube-system kubevip -o yaml +Example Configmap: +```yaml apiVersion: v1 kind: ConfigMap metadata: name: kubevip namespace: kube-system data: - cidr-default: 192.168.0.200/29 # CIDR-based IP range for use in the default namespace - range-development: 192.168.0.210-192.168.0.219 # Range-based IP range for use in the development namespace - cidr-finance: 192.168.0.220/29,192.168.0.230/29 # Multiple CIDR-based ranges for use in the finance namespace - cidr-global: 192.168.0.240/29 # CIDR-based range which can be used in any namespace + cidr-default: 192.168.0.200/29 # CIDR-based IP range for use in the default Namespace + range-development: 192.168.0.210-192.168.0.219 # Range-based IP range for use in the development Namespace + cidr-finance: 192.168.0.220/29,192.168.0.230/29 # Multiple CIDR-based ranges for use in the finance Namespace + cidr-global: 192.168.0.240/29 # CIDR-based range which can be used in any Namespace ``` -### Expose a service +### Expose a Service -We can now expose a service and once the cloud provider has provided an address `kube-vip` will start to advertise that address to the outside world as shown below! +We can now expose a Service and once the cloud provider has provided an address, `kube-vip` will start to advertise that address to the outside world as shown below: ``` kubectl expose deployment nginx-deployment --port=80 --type=LoadBalancer --name=nginx ``` -or via a `service` YAML definition +or via a Service YAML definition: ``` apiVersion: v1 @@ -134,19 +93,15 @@ spec: type: LoadBalancer ``` +We can also expose a specific address by specifying it imperatively in the Service definition: -We can also expose a specific address by specifying it on the command line: - -``` -kubectl expose deployment nginx-deployment --port=80 --type=LoadBalancer --name=nginx --load-balancer-ip=1.1.1.1 -``` - -or including it in the `service` definition: ``` apiVersion: v1 kind: Service metadata: + annotations: + "kube-vip.io/loadbalancerIPs": "1.1.1.1" name: nginx spec: ports: @@ -156,19 +111,24 @@ spec: selector: app: nginx type: LoadBalancer - loadBalancerIP: "1.1.1.1" ``` +Or set it through command line. + +``` +kubectl expose deployment nginx-deployment --port=80 --type=LoadBalancer --name=nginx --load-balancer-ip=1.1.1.1 +``` + +Since k8s 1.24, loadbalancerIP field [is deprecated](https://github.com/kubernetes/kubernetes/pull/107235). It's recommended to use the annotations instead of command line or `service.spec.loadBalancerIP` to specify the ip. + ### Using DHCP for Load Balancers (experimental) -With the latest release of `kube-vip` > 0.2.1, it is possible to use the local network DHCP server to provide `kube-vip` with a load-balancer address that can be used to access a - Kubernetes service on the network. +With `kube-vip` > 0.2.1, it is possible to use the local network DHCP server to provide `kube-vip` with a load balancer address that can be used to access a Kubernetes service on the network. -In order to do this we need to signify to `kube-vip` and the cloud-provider that we don't need one of their managed addresses. We do this by explicitly exposing a service on the -address `0.0.0.0`. When `kube-vip` sees a service on this address it will create a `macvlan` interface on the host and request a DHCP address, once this address is provided it will assign it as the VIP and update the Kubernetes service! +In order to do this, we need to signify to `kube-vip` and the cloud provider that we don't need one of their managed addresses. We do this by explicitly exposing a Service on the address `0.0.0.0`. When `kube-vip` sees a Service on this address, it will create a `macvlan` interface on the host and request a DHCP address. Once this address is provided, it will assign it as the `LoadBalancer` IP and update the Kubernetes Service. ``` -$ k expose deployment nginx-deployment --port=80 --type=LoadBalancer --name=nginx-dhcp --load-balancer-ip=0.0.0.0; k get svc +$ kubectl expose deployment nginx-deployment --port=80 --type=LoadBalancer --name=nginx-dhcp --load-balancer-ip=0.0.0.0; kubectl get svc service/nginx-dhcp exposed NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.96.0.1 443/TCP 17m @@ -176,44 +136,64 @@ nginx-dhcp LoadBalancer 10.97.150.208 0.0.0.0 80:31184/TCP 0s { ... a second or so later ... } -$ k get svc +$ kubectl get svc NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kubernetes ClusterIP 10.96.0.1 443/TCP 17m nginx-dhcp LoadBalancer 10.97.150.208 192.168.0.155 80:31184/TCP 3s ``` -### Using UPNP to expose a service to the outside world +You can also specify a hostname used for the DHCP lease by adding an annotation to your service. + +``` +apiVersion: v1 +kind: Service +metadata: + name: nginx-dhcp + annotations: + kube-vip.io/loadbalancerHostname: mydhcp-test +spec: + loadBalancerIP: 0.0.0.0 + ports: + - name: http + port: 80 + protocol: TCP + targetPort: 80 + selector: + app: hello-world + type: LoadBalancer +``` + +### Using UPnP to expose a Service to the outside world -With the latest release of `kube-vip` > 0.2.1, it is possible to expose a load-balancer on a specific port and using UPNP (on a supported gateway) expose this service to the inte -rnet. +With `kube-vip` > 0.2.1, it is possible to expose a Service of type `LoadBalancer` on a specific port to the Internet by using UPnP (on a supported gateway). Most simple networks look something like the following: `<----- ----> Internet` -Using UPNP we can create a matching port on the `` allowing your service to be exposed to the internet. +Using UPnP we can create a matching port on the `` allowing your Service to be exposed to the Internet. -#### Enable UPNP +#### Enable UPnP -Add the following to the `kube-vip` `env:` section, and the rest should be completely automated. +Add the following to the `kube-vip` `env:` section of either the static Pod or DaemonSet for `kube-vip`, and the rest should be completely automated. -**Note** some environments may require (Unifi) will require `Secure mode` being `disabled` (this allows a host with a different address to register a port) +**Note** some environments may require (Unifi) `Secure mode` being `disabled` (this allows a host with a different address to register a port). ``` - name: enableUPNP value: "true" ``` -#### Exposing a service +#### Exposing a Service -To expose a port successfully we'll need to change the command slightly: +To expose a port successfully, we'll need to change the command slightly: `--target-port=80` the port of the application in the pods (HTT/NGINX) -`--port=32380` the port the service will be exposed on (and what you should connect to in order to receive traffic from the service) +`--port=32380` the port the Service will be exposed on (and what you should connect to in order to receive traffic from the Service) `kubectl expose deployment plunder-nginx --port=32380 --target-port=80 --type=LoadBalancer --namespace plunder` -The above example should expose a port on your external (internet facing address), that can be tested externally with: +The above example should expose a port on your external (Internet facing) address that can be tested externally with: ``` $ curl externalIP:32380 diff --git a/go.mod b/go.mod index 3223beb7..bc8c6b0f 100644 --- a/go.mod +++ b/go.mod @@ -3,106 +3,120 @@ module github.com/kube-vip/kube-vip go 1.21 require ( - github.com/cloudflare/ipvs v0.8.0 + github.com/cloudflare/ipvs v0.9.1 github.com/davecgh/go-spew v1.1.1 - github.com/ghodss/yaml v1.0.0 + github.com/florianl/go-conntrack v0.4.0 github.com/golang/protobuf v1.5.3 - github.com/insomniacslk/dhcp v0.0.0-20230307103557-e252950ab961 + github.com/insomniacslk/dhcp v0.0.0-20230731140434-0f9eb93a696c github.com/jpillora/backoff v1.0.0 github.com/kamhlos/upnp v0.0.0-20210324072331-5661950dff08 - github.com/mdlayher/ndp v0.0.0-20200602162440-17ab9e3e5567 - github.com/onsi/ginkgo v1.16.5 - github.com/onsi/gomega v1.23.0 - github.com/osrg/gobgp v0.0.0-20210901090119-8ab84f8636ee - github.com/packethost/packngo v0.29.0 + github.com/mdlayher/ndp v1.0.1 + github.com/onsi/ginkgo/v2 v2.13.0 + github.com/onsi/gomega v1.30.0 + github.com/osrg/gobgp/v3 v3.19.0 + github.com/packethost/packngo v0.30.0 github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.14.0 + github.com/prometheus/client_golang v1.16.0 github.com/sirupsen/logrus v1.9.3 - github.com/spf13/cobra v1.6.1 - github.com/stretchr/testify v1.8.2 + github.com/spf13/cobra v1.7.0 + github.com/stretchr/testify v1.8.4 github.com/vishvananda/netlink v1.2.1-beta.2 - golang.org/x/net v0.17.0 - golang.org/x/sys v0.13.0 - k8s.io/api v0.26.2 - k8s.io/apimachinery v0.26.2 - k8s.io/client-go v0.26.2 - k8s.io/klog/v2 v2.90.1 - sigs.k8s.io/kind v0.17.0 + go.etcd.io/etcd/api/v3 v3.5.10 + go.etcd.io/etcd/client/pkg/v3 v3.5.10 + go.etcd.io/etcd/client/v3 v3.5.10 + go.uber.org/zap v1.26.0 + golang.org/x/exp v0.0.0-20231005195138-3e424a577f31 + golang.org/x/sys v0.15.0 + golang.zx2c4.com/wireguard/wgctrl v0.0.0-20230429144221-925a1e7659e6 + k8s.io/api v0.28.3 + k8s.io/apimachinery v0.28.3 + k8s.io/client-go v0.28.3 + k8s.io/klog/v2 v2.100.1 + sigs.k8s.io/kind v0.20.0 + sigs.k8s.io/yaml v1.4.0 ) require ( - github.com/BurntSushi/toml v1.0.0 // indirect + github.com/BurntSushi/toml v1.3.2 // indirect github.com/alessio/shellescape v1.4.1 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/coreos/go-semver v0.3.0 // indirect + github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect github.com/eapache/channels v1.1.0 // indirect github.com/eapache/queue v1.1.0 // indirect github.com/emicklei/go-restful/v3 v3.10.0 // indirect github.com/evanphx/json-patch/v5 v5.6.0 // indirect - github.com/fsnotify/fsnotify v1.4.9 // indirect - github.com/go-logr/logr v1.2.3 // indirect - github.com/go-openapi/jsonpointer v0.19.5 // indirect - github.com/go-openapi/jsonreference v0.20.0 // indirect - github.com/go-openapi/swag v0.19.14 // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/go-logr/logr v1.2.4 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.22.3 // indirect + github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/google/gnostic v0.5.7-v3refs // indirect - github.com/google/go-cmp v0.5.9 // indirect + github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 // indirect - github.com/google/uuid v1.3.0 // indirect + github.com/google/uuid v1.3.1 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/imdario/mergo v0.3.12 // indirect - github.com/inconshreveable/mousetrap v1.0.1 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/josharian/native v1.1.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/k-sone/critbitgo v1.4.0 // indirect - github.com/magiconair/properties v1.8.5 // indirect - github.com/mailru/easyjson v0.7.6 // indirect + github.com/magiconair/properties v1.8.7 // indirect + github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-isatty v0.0.14 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect - github.com/mdlayher/ethernet v0.0.0-20190606142754-0394541c37b7 // indirect - github.com/mdlayher/genetlink v1.0.0 // indirect - github.com/mdlayher/netlink v1.4.1 // indirect - github.com/mdlayher/raw v0.0.0-20210412142147-51b895745faf // indirect - github.com/mdlayher/socket v0.0.0-20210307095302-262dc9984e00 // indirect - github.com/mitchellh/mapstructure v1.4.1 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/mdlayher/genetlink v1.3.2 // indirect + github.com/mdlayher/netlink v1.7.2 // indirect + github.com/mdlayher/packet v1.1.2 // indirect + github.com/mdlayher/socket v0.4.1 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/nxadm/tail v1.4.8 // indirect - github.com/pelletier/go-toml v1.9.4 // indirect - github.com/pierrec/lz4/v4 v4.1.14 // indirect + github.com/pelletier/go-toml v1.9.5 // indirect + github.com/pelletier/go-toml/v2 v2.0.8 // indirect + github.com/pierrec/lz4/v4 v4.1.18 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_model v0.3.0 // indirect - github.com/prometheus/common v0.37.0 // indirect - github.com/prometheus/procfs v0.8.0 // indirect - github.com/spf13/afero v1.6.0 // indirect - github.com/spf13/cast v1.3.1 // indirect + github.com/prometheus/common v0.42.0 // indirect + github.com/prometheus/procfs v0.10.1 // indirect + github.com/spf13/afero v1.9.5 // indirect + github.com/spf13/cast v1.5.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/spf13/viper v1.7.1 // indirect - github.com/subosito/gotenv v1.2.0 // indirect - github.com/u-root/uio v0.0.0-20230220225925-ffce2a382923 // indirect - github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f // indirect - gitlab.com/golang-commonmark/puny v0.0.0-20191124015043-9f83538fa04f // indirect + github.com/spf13/viper v1.16.0 // indirect + github.com/subosito/gotenv v1.4.2 // indirect + github.com/u-root/uio v0.0.0-20230305220412-3e8cd9d6bf63 // indirect + github.com/vishvananda/netns v0.0.4 // indirect + go.uber.org/multierr v1.10.0 // indirect + golang.org/x/crypto v0.17.0 // indirect + golang.org/x/net v0.17.0 // indirect golang.org/x/oauth2 v0.10.0 // indirect - golang.org/x/term v0.13.0 // indirect - golang.org/x/text v0.13.0 // indirect - golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect + golang.org/x/sync v0.3.0 // indirect + golang.org/x/term v0.15.0 // indirect + golang.org/x/text v0.14.0 // indirect + golang.org/x/time v0.3.0 // indirect + golang.org/x/tools v0.13.0 // indirect + golang.zx2c4.com/wireguard v0.0.0-20230325221338-052af4a8072b // indirect google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 // indirect google.golang.org/grpc v1.58.3 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/ini.v1 v1.62.0 // indirect - gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect - k8s.io/utils v0.0.0-20221107191617-1a15be271d1d // indirect - sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect + k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect + k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect - sigs.k8s.io/yaml v1.3.0 // indirect ) diff --git a/go.sum b/go.sum index 74fd296b..e554e152 100644 --- a/go.sum +++ b/go.sum @@ -3,6 +3,7 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= @@ -13,6 +14,9 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -21,7 +25,6 @@ cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4g cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -31,63 +34,46 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= -github.com/BurntSushi/toml v1.0.0 h1:dtDWrepsVPfW9H/4y7dDgFc2MBUSeJhlaDtK13CxFlU= github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= +github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alessio/shellescape v1.4.1 h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVKJUX0= github.com/alessio/shellescape v1.4.1/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cilium/ebpf v0.5.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/ipvs v0.8.0 h1:2cl7WMwQI6pklBcwrzdusAqHuPThu/VBdAFNl5r/D5w= -github.com/cloudflare/ipvs v0.8.0/go.mod h1:rL2uv7wRPwNsyRig+6EJybJVLVIuw7+L6dc8DPyn+84= +github.com/cloudflare/ipvs v0.9.1 h1:4azDqbWqNAkcuD78yK9y9rxZoQfW2OnC9DFo+y7HPgk= +github.com/cloudflare/ipvs v0.9.1/go.mod h1:5H4icNJZ8T4H7bg0/THRew5BbNOkDgl2RC1twjeSOHU= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/dnaeon/go-vcr v1.0.1 h1:r8L/HqC0Hje5AXMu1ooW8oyQyOFv4GxqpL0nRP7SLLY= -github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= +github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= github.com/eapache/channels v1.1.0 h1:F1taHcn7/F0i8DYqKXJnyhJcVpp2kgFcNePxXtnyu4k= github.com/eapache/channels v1.1.0/go.mod h1:jMm2qB5Ubtg9zLd+inMZd2/NUvXgzmWXsDaLyQIGfH0= github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= @@ -97,50 +83,40 @@ github.com/emicklei/go-restful/v3 v3.10.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRr github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/florianl/go-conntrack v0.4.0 h1:TlYkxytdwgVayfU0cKwkHurQA0Rd1ZSEBRckRYDUu18= +github.com/florianl/go-conntrack v0.4.0/go.mod h1:iPDx4oIats2T7X7Jm3PFyRCJM1GfZhJaSHOWROYOrE8= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= +github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA= -github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= -github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/go-test/deep v1.0.7 h1:/VSMRlnY/JSyqxQUzQLKVMAskpY/NZKFA5j2P+0pP2M= -github.com/go-test/deep v1.0.7/go.mod h1:QV8Hv/iy04NyLBxAdO9njL0iVPN1S4d/A3NVv1V36o8= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= +github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg= +github.com/go-test/deep v1.1.0/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -168,13 +144,12 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= -github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -186,13 +161,15 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -200,54 +177,36 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 h1:SJ+NtwL6QaZ21U+IrK7d0gGgpjGGvd2kz+FzTHVzdqI= github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2/go.mod h1:Tv1PlzqC9t8wNnpPdctvtSUOPUUg4SHeE6vR1Ir2hmg= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= +github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hugelgupf/socketpair v0.0.0-20190730060125-05d35a94e714 h1:/jC7qQFrv8CrSJVmaolDVOxTfS9kc36uB6H40kdbQq8= github.com/hugelgupf/socketpair v0.0.0-20190730060125-05d35a94e714/go.mod h1:2Goc3h8EklBH5mspfHFxBnEoURQCGzQQH1ga9Myjvis= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc= -github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/insomniacslk/dhcp v0.0.0-20230307103557-e252950ab961 h1:x/YtdDlmypenG1te/FfH6LVM+3krhXk5CFV8VYNNX5M= -github.com/insomniacslk/dhcp v0.0.0-20230307103557-e252950ab961/go.mod h1:IKrnDWs3/Mqq5n0lI+RxA2sB7MvN/vbMBP3ehXg65UI= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/insomniacslk/dhcp v0.0.0-20230731140434-0f9eb93a696c h1:P/3mFnHCv1A/ej4m8pF5EB6FUt9qEL2Q9lfrcUNwCYs= +github.com/insomniacslk/dhcp v0.0.0-20230731140434-0f9eb93a696c/go.mod h1:7474bZ1YNCvarT6WFKie4kEET6J0KYRDC4XJqqXzQW4= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/josharian/native v0.0.0-20200817173448-b6b71def0850/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= @@ -263,59 +222,42 @@ github.com/jsimonetti/rtnetlink v0.0.0-20201216134343-bde56ed16391/go.mod h1:cR7 github.com/jsimonetti/rtnetlink v0.0.0-20201220180245-69540ac93943/go.mod h1:z4c53zj6Eex712ROyh8WI0ihysb5j2ROyV42iNogmAs= github.com/jsimonetti/rtnetlink v0.0.0-20210122163228-8d122574c736/go.mod h1:ZXpIyOK59ZnN7J0BV99cZUPmsqDRZ3eq5X+st7u/oSA= github.com/jsimonetti/rtnetlink v0.0.0-20210212075122-66c871082f2b/go.mod h1:8w9Rh8m+aHZIG69YPGGem1i5VzoyRC8nw2kA8B+ik5U= -github.com/jsimonetti/rtnetlink v0.0.0-20210525051524-4cc836578190 h1:iycCSDo8EKVueI9sfVBBJmtNn9DnXV/K1YWwEJO+uOs= github.com/jsimonetti/rtnetlink v0.0.0-20210525051524-4cc836578190/go.mod h1:NmKSdU4VGSiv1bMsdqNALI4RSvvjtz65tTMCnD05qLo= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jsimonetti/rtnetlink v0.0.0-20211022192332-93da33804786/go.mod h1:v4hqbTdfQngbVSZJVWUhGE/lbTFf9jb+ygmNUDQMuOs= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/k-sone/critbitgo v1.4.0 h1:l71cTyBGeh6X5ATh6Fibgw3+rtNT80BA0uNNWgkPrbE= github.com/k-sone/critbitgo v1.4.0/go.mod h1:7E6pyoyADnFxlUBEKcnfS49b7SUAQGMK+OAp/UQvo0s= github.com/kamhlos/upnp v0.0.0-20210324072331-5661950dff08 h1:UQlM3K8NSN3cqIsICAQnSVOQe9B4LyFEu/xJUr+Scn4= github.com/kamhlos/upnp v0.0.0-20210324072331-5661950dff08/go.mod h1:0L/S1RSG4wA4M2Vhau3z7VsYMLxFnsX0bzzgwYRIdYU= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls= -github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= -github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mdlayher/ethernet v0.0.0-20190606142754-0394541c37b7 h1:lez6TS6aAau+8wXUP3G9I3TGlmPFEq2CTxBaRqY6AGE= -github.com/mdlayher/ethernet v0.0.0-20190606142754-0394541c37b7/go.mod h1:U6ZQobyTjI/tJyq2HG+i/dfSoFUt8/aZCM+GKtmFk/Y= -github.com/mdlayher/ethtool v0.0.0-20210210192532-2b88debcdd43 h1:WgyLFv10Ov49JAQI/ZLUkCZ7VJS3r74hwFIGXJsgZlY= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mdlayher/ethtool v0.0.0-20210210192532-2b88debcdd43/go.mod h1:+t7E0lkKfbBsebllff1xdTmyJt8lH37niI6kwFk9OTo= -github.com/mdlayher/genetlink v1.0.0 h1:OoHN1OdyEIkScEmRgxLEe2M9U8ClMytqA5niynLtfj0= +github.com/mdlayher/ethtool v0.0.0-20211028163843-288d040e9d60/go.mod h1:aYbhishWc4Ai3I2U4Gaa2n3kHWSwzme6EsG/46HRQbE= github.com/mdlayher/genetlink v1.0.0/go.mod h1:0rJ0h4itni50A86M2kHcgS85ttZazNt7a8H2a2cw0Gc= -github.com/mdlayher/ndp v0.0.0-20200602162440-17ab9e3e5567 h1:x+xs91ZJ+lr0C6sedWeREvck4uGCt+AA1kKXwsHB6jI= -github.com/mdlayher/ndp v0.0.0-20200602162440-17ab9e3e5567/go.mod h1:32w/5dDZWVSEOxyniAgKK4d7dHTuO6TCxWmUznQe3f8= +github.com/mdlayher/genetlink v1.3.2 h1:KdrNKe+CTu+IbZnm/GVUMXSqBBLqcGpRDa0xkQy56gw= +github.com/mdlayher/genetlink v1.3.2/go.mod h1:tcC3pkCrPUGIKKsCsp0B3AdaaKuHtaxoJRz3cc+528o= +github.com/mdlayher/ndp v1.0.1 h1:+yAD79/BWyFlvAoeG5ncPS0ItlHP/eVbH7bQ6/+LVA4= +github.com/mdlayher/ndp v1.0.1/go.mod h1:rf3wKaWhAYJEXFKpgF8kQ2AxypxVbfNcZbqoAo6fVzk= github.com/mdlayher/netlink v0.0.0-20190409211403-11939a169225/go.mod h1:eQB3mZE4aiYnlUsyGGCOpPETfdQq4Jhsgf1fk3cwQaA= github.com/mdlayher/netlink v1.0.0/go.mod h1:KxeJAFOFLG6AjpyDkQ/iIhxygIUKD+vcwqcnu43w/+M= github.com/mdlayher/netlink v1.1.0/go.mod h1:H4WCitaheIsdF9yOYu8CFmCgQthAPIWZmcKp9uZHgmY= @@ -325,142 +267,81 @@ github.com/mdlayher/netlink v1.2.1/go.mod h1:bacnNlfhqHqqLo4WsYeXSqfyXkInQ9JneWI github.com/mdlayher/netlink v1.2.2-0.20210123213345-5cc92139ae3e/go.mod h1:bacnNlfhqHqqLo4WsYeXSqfyXkInQ9JneWI68v1KwSU= github.com/mdlayher/netlink v1.3.0/go.mod h1:xK/BssKuwcRXHrtN04UBkwQ6dY9VviGGuriDdoPSWys= github.com/mdlayher/netlink v1.4.0/go.mod h1:dRJi5IABcZpBD2A3D0Mv/AiX8I9uDEu5oGkAVrekmf8= -github.com/mdlayher/netlink v1.4.1 h1:I154BCU+mKlIf7BgcAJB2r7QjveNPty6uNY1g9ChVfI= github.com/mdlayher/netlink v1.4.1/go.mod h1:e4/KuJ+s8UhfUpO9z00/fDZZmhSrs+oxyqAS9cNgn6Q= -github.com/mdlayher/raw v0.0.0-20190606142536-fef19f00fc18/go.mod h1:7EpbotpCmVZcu+KCX4g9WaRNuu11uyhiW7+Le1dKawg= -github.com/mdlayher/raw v0.0.0-20210412142147-51b895745faf h1:InctQoB89TIkmgIFQeIL4KXNvWc1iebQXdZggqPSwL8= -github.com/mdlayher/raw v0.0.0-20210412142147-51b895745faf/go.mod h1:7EpbotpCmVZcu+KCX4g9WaRNuu11uyhiW7+Le1dKawg= -github.com/mdlayher/socket v0.0.0-20210307095302-262dc9984e00 h1:qEtkL8n1DAHpi5/AOgAckwGQUlMe4+jhL/GMt+GKIks= +github.com/mdlayher/netlink v1.5.0/go.mod h1:1Kr8BBFxGyUyNmztC9WLOayqYVAd2wsgOZm18nqGuzQ= +github.com/mdlayher/netlink v1.7.2 h1:/UtM3ofJap7Vl4QWCPDGXY8d3GIY2UGSDbK+QWmY8/g= +github.com/mdlayher/netlink v1.7.2/go.mod h1:xraEF7uJbxLhc5fpHL4cPe221LI2bdttWlU+ZGLfQSw= +github.com/mdlayher/packet v1.1.2 h1:3Up1NG6LZrsgDVn6X4L9Ge/iyRyxFEFD9o6Pr3Q1nQY= +github.com/mdlayher/packet v1.1.2/go.mod h1:GEu1+n9sG5VtiRE4SydOmX5GTwyyYlteZiFU+x0kew4= github.com/mdlayher/socket v0.0.0-20210307095302-262dc9984e00/go.mod h1:GAFlyu4/XV68LkQKYzKhIo/WW7j3Zi0YRAz/BOoanUc= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag= -github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mdlayher/socket v0.0.0-20211007213009-516dcbdf0267/go.mod h1:nFZ1EtZYK8Gi/k6QNu7z7CgO20i/4ExeQswwWuPmG/g= +github.com/mdlayher/socket v0.1.0/go.mod h1:mYV5YIZAfHh4dzDVzI8x8tWLWCliuX8Mon5Awbj+qDs= +github.com/mdlayher/socket v0.4.1 h1:eM9y2/jlbs1M615oshPQOHZzj6R6wMT7bX5NPiQvn2U= +github.com/mdlayher/socket v0.4.1/go.mod h1:cAqeGjoufqdxWkD7DkpyS+wcefOtmu5OQ8KuoJGIReA= +github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721 h1:RlZweED6sbSArvlE924+mUcZuXKLBHA35U7LN621Bws= +github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721/go.mod h1:Ickgr2WtCLZ2MDGd4Gr0geeCH5HybhRJbonOgQpvSxc= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= -github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs= -github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.23.0 h1:/oxKu9c2HVap+F3PfKort2Hw5DEU+HGlW8n+tguWsys= -github.com/onsi/gomega v1.23.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= -github.com/osrg/gobgp v0.0.0-20210901090119-8ab84f8636ee h1:p9cPdDaBWf5r9+arw2pUuc0aDT/tkfCEBVqfx/UBb2o= -github.com/osrg/gobgp v0.0.0-20210901090119-8ab84f8636ee/go.mod h1:QvEj9qq9o66TvTyFC0Yyn1zgSSFrno8MsptfrjyMR7A= -github.com/packethost/packngo v0.29.0 h1:gRIhciVZQ/zLNrIdIdbOUyB/Tw5IgoaXyhP4bvE+D2s= -github.com/packethost/packngo v0.29.0/go.mod h1:/UHguFdPs6Lf6FOkkSEPnRY5tgS0fsVM+Zv/bvBrmt0= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM= +github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8= +github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/osrg/gobgp/v3 v3.19.0 h1:SHjeu707EVp5h2LR8qLxDz/PzFU6oO+jhquGzGsigTI= +github.com/osrg/gobgp/v3 v3.19.0/go.mod h1:TszzyYD/31jXlljifRhxFEmPsITEloZmGU5CTN21W18= +github.com/packethost/packngo v0.30.0 h1:JVeTwbXXETsLTDQncUbYwIFpkOp/xevXrffM2HrFECI= +github.com/packethost/packngo v0.30.0/go.mod h1:BT/XcdwLVmeMtGPbovnxCpnI1s9ylSE1cs/7pq007NE= github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/pierrec/lz4/v4 v4.1.14 h1:+fL8AQEZtz/ijeNnpduH0bROTu0O3NZAlPjQxGn8LwE= +github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ= +github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4= github.com/pierrec/lz4/v4 v4.1.14/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ= +github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= -github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= +github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= -github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= -github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= +github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= +github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= +github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= -github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= -github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= +github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM= +github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= +github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= +github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= -github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= -github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/spf13/viper v1.7.1 h1:pM5oEahlgWv/WnHXpgbKz7iLIxRf65tye2Ci+XFK5sk= -github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/spf13/viper v1.16.0 h1:rGGH0XDZhdUOryiDWjmIvUSWpbNqisK8Wk0Vyefw8hc= +github.com/spf13/viper v1.16.0/go.mod h1:yg78JgCJcbrQOvV9YLXgkLaZqUidkY9K+Dd1FofRzQg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= @@ -471,46 +352,53 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= -github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/u-root/uio v0.0.0-20230220225925-ffce2a382923 h1:tHNk7XK9GkmKUR6Gh8gVBKXc2MVSZ4G/NnWLtzw4gNA= -github.com/u-root/uio v0.0.0-20230220225925-ffce2a382923/go.mod h1:eLL9Nub3yfAho7qB0MzZizFhTU2QkLeoVsWdHtDW264= -github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= +github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= +github.com/u-root/uio v0.0.0-20230305220412-3e8cd9d6bf63 h1:YcojQL98T/OO+rybuzn2+5KrD5dBwXIvYBvQ2cD3Avg= +github.com/u-root/uio v0.0.0-20230305220412-3e8cd9d6bf63/go.mod h1:eLL9Nub3yfAho7qB0MzZizFhTU2QkLeoVsWdHtDW264= github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs= github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= -github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f h1:p4VB7kIXpOQvVn1ZaTIVp+3vuYAXFe3OJEvjbUYJLaA= -github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= +github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -gitlab.com/golang-commonmark/puny v0.0.0-20191124015043-9f83538fa04f h1:Wku8eEdeJqIOFHtrfkYUByc4bCaTeA6fL0UJgfEiFMI= -gitlab.com/golang-commonmark/puny v0.0.0-20191124015043-9f83538fa04f/go.mod h1:Tiuhl+njh/JIg0uS/sOJVYi0x2HEa5rc1OAaVsb5tAs= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +go.etcd.io/etcd/api/v3 v3.5.10 h1:szRajuUUbLyppkhs9K6BRtjY37l66XQQmw7oZRANE4k= +go.etcd.io/etcd/api/v3 v3.5.10/go.mod h1:TidfmT4Uycad3NM/o25fG3J07odo4GBB9hoxaodFCtI= +go.etcd.io/etcd/client/pkg/v3 v3.5.10 h1:kfYIdQftBnbAq8pUWFXfpuuxFSKzlmM5cSn76JByiT0= +go.etcd.io/etcd/client/pkg/v3 v3.5.10/go.mod h1:DYivfIviIuQ8+/lCq4vcxuseg2P2XbHygkKwFo9fc8U= +go.etcd.io/etcd/client/v3 v3.5.10 h1:W9TXNZ+oB3MCd/8UjxHTWK5J9Nquw9fQBLJd5ne5/Ao= +go.etcd.io/etcd/client/v3 v3.5.10/go.mod h1:RVeBnDz2PUEZqTpgqwAtUd8nAPf5kjyFyND7P1VkOKc= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= +go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo= +go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ= +go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= +go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200420201142-3c4aac89819a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= -golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -521,6 +409,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20231005195138-3e424a577f31 h1:9k5exFQKQglLo+RoP+4zMjOFE14P6+vyR0baDAi0Rcs= +golang.org/x/exp v0.0.0-20231005195138-3e424a577f31/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -541,22 +431,21 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190419010253-1f3472d942ba/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -572,23 +461,26 @@ golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201216054612-986b41b23924/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210928044308-7d9f5e0b762b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211020060615-d418f374d309/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220107192237-5cfca573fb4d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -596,8 +488,10 @@ golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -610,35 +504,24 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190411185658-b44545bcd369/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190418153312-f0ce4c0180be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606122018-79a91cf218c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -652,60 +535,63 @@ golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200602100848-8d3cce7afc34/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201009025420-dfb3f7c4e634/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201118182958-a01c418693c7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201218084310-7d0127a74742/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210110051926-789bb1bd4061/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210123111255-9b0068b26619/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210216163648-f7da38b97c65/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210525143221-35b2ab0089ea/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220622161953-175b2fd9d664/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= -golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44= -golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= @@ -715,7 +601,6 @@ golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -742,12 +627,25 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= +golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.zx2c4.com/wireguard v0.0.0-20230325221338-052af4a8072b h1:J1CaxgLerRR5lgx3wnr6L04cJFbWoceSK9JWBdglINo= +golang.zx2c4.com/wireguard v0.0.0-20230325221338-052af4a8072b/go.mod h1:tqur9LnfstdR9ep2LaJT4lFUl0EjlHtge+gAjmsHUG4= +golang.zx2c4.com/wireguard/wgctrl v0.0.0-20230429144221-925a1e7659e6 h1:CawjfCvYQH2OU3/TnxLx97WDSUDRABfT18pCOYwc2GE= +golang.zx2c4.com/wireguard/wgctrl v0.0.0-20230429144221-925a1e7659e6/go.mod h1:3rxYc4HtVcSG9gVaTs2GEBdehh+sYPOwKtyUWEOTb80= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -764,6 +662,9 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -801,7 +702,17 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98 h1:Z0hjGZePRE0ZBWotvtrwxFNrNE9CUAGtplaDK5NNI/g= +google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98/go.mod h1:S7mY02OqCJTD0E1OiQy1F72PWFB4bZJ87cAtLPYgDR0= +google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98 h1:FmF5cCW94Ij59cfpoLiwTgodWmm60eEV0CjlsVg2fuw= +google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 h1:bVf09lpb+OJbByTj913DRJioFFAjf/ZGxEz7MajTp2U= google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= @@ -816,6 +727,10 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ= google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -832,36 +747,26 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b h1:QRR6H1YWRnHb4Y/HeNFCTJLFVxaq6wH4YuVdsUOr75U= gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.62.0 h1:duBzk771uxoUuOlyRLkHsygud9+5lrlGjdFBb4mSKDU= -gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= +gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -869,26 +774,29 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.26.2 h1:dM3cinp3PGB6asOySalOZxEG4CZ0IAdJsrYZXE/ovGQ= -k8s.io/api v0.26.2/go.mod h1:1kjMQsFE+QHPfskEcVNgL3+Hp88B80uj0QtSOlj8itU= -k8s.io/apimachinery v0.26.2 h1:da1u3D5wfR5u2RpLhE/ZtZS2P7QvDgLZTi9wrNZl/tQ= -k8s.io/apimachinery v0.26.2/go.mod h1:ats7nN1LExKHvJ9TmwootT00Yz05MuYqPXEXaVeOy5I= -k8s.io/client-go v0.26.2 h1:s1WkVujHX3kTp4Zn4yGNFK+dlDXy1bAAkIl+cFAiuYI= -k8s.io/client-go v0.26.2/go.mod h1:u5EjOuSyBa09yqqyY7m3abZeovO/7D/WehVVlZ2qcqU= -k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw= -k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 h1:+70TFaan3hfJzs+7VK2o+OGxg8HsuBr/5f6tVAjDu6E= -k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4= -k8s.io/utils v0.0.0-20221107191617-1a15be271d1d h1:0Smp/HP1OH4Rvhe+4B8nWGERtlqAGSftbSbbmm45oFs= -k8s.io/utils v0.0.0-20221107191617-1a15be271d1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +honnef.co/go/tools v0.2.1/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY= +honnef.co/go/tools v0.2.2/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY= +k8s.io/api v0.28.3 h1:Gj1HtbSdB4P08C8rs9AR94MfSGpRhJgsS+GF9V26xMM= +k8s.io/api v0.28.3/go.mod h1:MRCV/jr1dW87/qJnZ57U5Pak65LGmQVkKTzf3AtKFHc= +k8s.io/apimachinery v0.28.3 h1:B1wYx8txOaCQG0HmYF6nbpU8dg6HvA06x5tEffvOe7A= +k8s.io/apimachinery v0.28.3/go.mod h1:uQTKmIqs+rAYaq+DFaoD2X7pcjLOqbQX2AOiO0nIpb8= +k8s.io/client-go v0.28.3 h1:2OqNb72ZuTZPKCl+4gTKvqao0AMOl9f3o2ijbAj3LI4= +k8s.io/client-go v0.28.3/go.mod h1:LTykbBp9gsA7SwqirlCXBWtK0guzfhpoW4qSm7i9dxo= +k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= +k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= +k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= +k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 h1:qY1Ad8PODbnymg2pRbkyMT/ylpTrCM8P2RJ0yroCyIk= +k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= -sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/kind v0.17.0 h1:CScmGz/wX66puA06Gj8OZb76Wmk7JIjgWf5JDvY7msM= -sigs.k8s.io/kind v0.17.0/go.mod h1:Qqp8AiwOlMZmJWs37Hgs31xcbiYXjtXlRBSftcnZXQk= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/kind v0.20.0 h1:f0sc3v9mQbGnjBUaqSFST1dwIuiikKVGgoTwpoP33a8= +sigs.k8s.io/kind v0.20.0/go.mod h1:aBlbxg08cauDgZ612shr017/rZwqd7AS563FvpWKPVs= sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/kube-vip.png b/kube-vip.png new file mode 100644 index 00000000..f2f7bf96 Binary files /dev/null and b/kube-vip.png differ diff --git a/kubernetes-control-plane.md b/kubernetes-control-plane.md deleted file mode 100644 index 835862a8..00000000 --- a/kubernetes-control-plane.md +++ /dev/null @@ -1,147 +0,0 @@ -# Load Balancing a Kubernetes Cluster (Control-Plane) - -This document covers all of the details for using `kube-vip` to build a HA Kubernetes cluster - -`tl;dr version` -- Generate/modify first node `kube-vip` config/manifest -- `init` first node -- `join` remaining nodes -- Add remaining config/manifests - -## Infrastructure architecture - -The infrastructure for our example HA Kubernetes cluster is as follows: - -| Node | Address | -|----------------|------------| -| VIP | 10.0.0.75 | -| controlPlane01 | 10.0.0.70 | -| controlPlane02 | 10.0.0.71 | -| controlPlane03 | 10.0.0.72 | - -All nodes are running Ubuntu 18.04, Docker CE and will use Kubernetes 1.17.0. - -### Generate the `kube-vip` configuration - -Make sure that the config directory exists: `sudo mkdir -p /etc/kube-vip/`, this directory can be any directory however the `hostPath` in the manifest will need modifying to point to the correct path. - -``` -sudo docker run -it --rm ghcr.io/kube-vip/kube-vip:0.3.7 sample config | sudo tee /etc/kube-vip/config.yaml -``` - -### Modify the configuration - -**Cluster Configuration** -Modify the `remotePeers` to point to the correct addresses of the other two nodes, ensure that their `id` is unique otherwise this will confuse the raft algorithm. The `localPeer` should be the configuration of the current node (`controlPlane01`), which is where this instance of the cluster will run. - -As this node will be the first node, it will need to elect itself leader as until this occurs the VIP won’t be activated! - -`startAsLeader: true` - -**VIP Config** -We will need to set our VIP address to `192.168.0.75` and to ensure all hosts are updated when the VIP moves we will enable ARP broadcasts `gratuitousARP: true` - -**Load Balancer** -We will configure the load balancer to sit on the standard API-Server port `6443` and we will configure the backends to point to the API-servers that will be configured to run on port `6444`. Also for the Kubernetes Control Plane we will configure the load balancer to be of `type: tcp`. - -We can also use `6443` for both the VIP and the API-Servers, in order to do this we need to specify that the api-server is bound to it's local IP. To do this we use the `--apiserver-advertise-address` flag as part of the `init`, this means that we can then bind the same port to the VIP and we wont have a port conflict. - -**config.yaml** - -`user@controlPlane01:/etc/kube-vip$ cat config.yaml` - -... - -``` -remotePeers: -- id: server2 - address: 192.168.0.71 - port: 10000 -- id: server3 - address: 192.168.0.72 - port: 10000 -localPeer: - id: server1 - address: 192.168.0.70 - port: 10000 -vip: 192.168.0.75 -gratuitousARP: true -singleNode: false -startAsLeader: true -interface: ens192 -loadBalancers: -- name: Kubernetes Control Plane - type: tcp - port: 6443 - bindToVip: true - backends: - - port: 6444 - address: 192.168.0.70 - - port: 6444 - address: 192.168.0.71 - - port: 6444 - address: 192.168.0.72 -``` - -### First Node - -To generate the basic Kubernetes static pod `yaml` configuration: - -Make sure that the manifest directory exists: `sudo mkdir -p /etc/kubernetes/manifests/` - -``` -sudo docker run -it --rm ghcr.io/kube-vip/kube-vip:0.3.7 sample manifest | sudo tee /etc/kubernetes/manifests/kube-vip.yaml -``` - -Ensure that `image: ghcr.io/kube-vip/kube-vip:` is modified to point to a specific version (`0.3.7` at the time of writing), refer to [GitHub](https://github.com/kube-vip/kube-vip/pkgs/container/kube-vip) for details. Also ensure that the `hostPath` points to the correct `kube-vip` configuration, if it isn’t the above path. - -The **vip** is set to `192.168.0.75` and this first node will elect itself as leader, and as part of the `kubeadm init` it will use the VIP in order to speak back to the initialising api-server. - -`sudo kubeadm init --control-plane-endpoint β€œ192.168.0.75:6443” --apiserver-bind-port 6444 --upload-certs --kubernetes-version β€œv1.17.0”` - -Once this node is up and running we will be able to see the control-plane pods, including the `kube-vip` pod: - -``` -$ kubectl get pods -A -NAMESPACE NAME READY STATUS RESTARTS AGE -<...> -kube-system kube-vip-controlplane01 1/1 Running 0 10m -``` - -### Remaining Nodes - -We first will need to create the `kube-vip` configuration that resides in `/etc/kube-vip/config.yaml` or we can regenerate it from scratch using the above example. Ensure that the configuration is almost identical with the `localPeer` and `remotePeers` sections are updated for each node. Finally, ensure that the remaining nodes will behave as standard cluster nodes by setting `startAsLeader: false`. - -At this point **DON’T** generate the manifests, this is due to some bizarre `kubeadm/kubelet` behaviour. - -``` - kubeadm join 192.168.0.75:6443 --token \ - --discovery-token-ca-cert-hash sha256: \ - --control-plane --certificate-key - -``` - -**After** this node has been added to the cluster, we can add the manifest to also add this node as a `kube-vip` member. (Adding the manifest afterwards doesn’t interfere with `kubeadm`). - -``` -sudo docker run -it --rm ghcr.io/kube-vip/kube-vip:0.3.7 sample manifest | sudo tee /etc/kubernetes/manifests/kube-vip.yaml -``` - -Once this node is added we will be able to see that the `kube-vip` pod is up and running as expected: - -``` -user@controlPlane01:~$ kubectl get pods -A | grep vip -kube-system kube-vip-controlplane01 1/1 Running 1 16m -kube-system kube-vip-controlplane02 1/1 Running 0 18m -kube-system kube-vip-controlplane03 1/1 Running 0 20m - -``` - -If we look at the logs, we can see that the VIP is running on the second node and we’re waiting for our third node to join the cluster: - -``` -$ kubectl logs kube-vip-controlplane02 -n kube-system -time=β€œ2020-02-12T15:33:09Z” level=info msg=β€œThe Node [192.168.0.70:10000] is leading” -time=β€œ2020-02-12T15:33:09Z” level=info msg=β€œThe Node [192.168.0.70:10000] is leading” - -``` diff --git a/main.go b/main.go index 9e20cb03..2d66f9cd 100644 --- a/main.go +++ b/main.go @@ -11,6 +11,7 @@ var Version string var Build string func main() { + cmd.Release.Version = Version cmd.Release.Build = Build cmd.Execute() diff --git a/pkg/bgp/hosts.go b/pkg/bgp/hosts.go index 8952d168..2d283e99 100644 --- a/pkg/bgp/hosts.go +++ b/pkg/bgp/hosts.go @@ -5,7 +5,7 @@ import ( "fmt" "net" - api "github.com/osrg/gobgp/api" + api "github.com/osrg/gobgp/v3/api" ) // AddHost will update peers of a host diff --git a/pkg/bgp/peers.go b/pkg/bgp/peers.go index c04b404c..640c88bc 100644 --- a/pkg/bgp/peers.go +++ b/pkg/bgp/peers.go @@ -9,25 +9,15 @@ import ( "github.com/golang/protobuf/ptypes" //nolint "github.com/golang/protobuf/ptypes/any" - api "github.com/osrg/gobgp/api" + api "github.com/osrg/gobgp/v3/api" ) -// AddPeer will add peers to the BGP configuration +// AddPeer will add peers to the BGP configuration func (b *Server) AddPeer(peer Peer) (err error) { - port := 179 - - if t := strings.SplitN(peer.Address, ":", 2); len(t) == 2 { - peer.Address = t[0] - - if port, err = strconv.Atoi(t[1]); err != nil { - return fmt.Errorf("unable to parse port '%s' as int: %s", t[1], err) - } - } - p := &api.Peer{ Conf: &api.PeerConf{ NeighborAddress: peer.Address, - PeerAs: peer.AS, + PeerAsn: peer.AS, AuthPassword: peer.Password, }, @@ -46,7 +36,7 @@ func (b *Server) AddPeer(peer Peer) (err error) { Transport: &api.Transport{ MtuDiscovery: true, RemoteAddress: peer.Address, - RemotePort: uint32(port), + RemotePort: uint32(179), }, } @@ -63,49 +53,60 @@ func (b *Server) AddPeer(peer Peer) (err error) { }) } -func (b *Server) getPath(ip net.IP) *api.Path { - var pfxLen uint32 = 32 - if ip.To4() == nil { - if !b.c.IPv6 { - return nil - } - - pfxLen = 128 - } - - //nolint - nlri, _ := ptypes.MarshalAny(&api.IPAddressPrefix{ - Prefix: ip.String(), - PrefixLen: pfxLen, - }) +func (b *Server) getPath(ip net.IP) (path *api.Path) { + isV6 := ip.To4() == nil //nolint - a1, _ := ptypes.MarshalAny(&api.OriginAttribute{ + originAttr, _ := ptypes.MarshalAny(&api.OriginAttribute{ Origin: 0, }) - var nh string - if b.c.NextHop != "" { - nh = b.c.NextHop - } else if b.c.SourceIP != "" { - nh = b.c.SourceIP + if !isV6 { + //nolint + nlri, _ := ptypes.MarshalAny(&api.IPAddressPrefix{ + Prefix: ip.String(), + PrefixLen: 32, + }) + + //nolint + nhAttr, _ := ptypes.MarshalAny(&api.NextHopAttribute{ + NextHop: "0.0.0.0", // gobgp will fill this + }) + + path = &api.Path{ + Family: &api.Family{ + Afi: api.Family_AFI_IP, + Safi: api.Family_SAFI_UNICAST, + }, + Nlri: nlri, + Pattrs: []*any.Any{originAttr, nhAttr}, + } } else { - nh = b.c.RouterID - } - - //nolint - a2, _ := ptypes.MarshalAny(&api.NextHopAttribute{ - NextHop: nh, - }) - - return &api.Path{ - Family: &api.Family{ - Afi: api.Family_AFI_IP, + //nolint + nlri, _ := ptypes.MarshalAny(&api.IPAddressPrefix{ + Prefix: ip.String(), + PrefixLen: 128, + }) + + v6Family := &api.Family{ + Afi: api.Family_AFI_IP6, Safi: api.Family_SAFI_UNICAST, - }, - Nlri: nlri, - Pattrs: []*any.Any{a1, a2}, + } + + //nolint + mpAttr, _ := ptypes.MarshalAny(&api.MpReachNLRIAttribute{ + Family: v6Family, + NextHops: []string{"::"}, // gobgp will fill this + Nlris: []*any.Any{nlri}, + }) + + path = &api.Path{ + Family: v6Family, + Nlri: nlri, + Pattrs: []*any.Any{originAttr, mpAttr}, + } } + return } // ParseBGPPeerConfig - take a string and parses it into an array of peers @@ -116,24 +117,53 @@ func ParseBGPPeerConfig(config string) (bgpPeers []Peer, err error) { } for x := range peers { - peer := strings.Split(peers[x], ":") - if len(peer) != 4 { - return nil, fmt.Errorf("BGP Peer configuration format error :::") + peerStr := peers[x] + if peerStr == "" { + continue + } + isV6Peer := peerStr[0] == '[' + + address := "" + if isV6Peer { + addressEndPos := strings.IndexByte(peerStr, ']') + if addressEndPos == -1 { + return nil, fmt.Errorf("no matching ] found for IPv6 BGP Peer") + } + address = peerStr[1:addressEndPos] + peerStr = peerStr[addressEndPos+1:] } - ASNumber, err := strconv.Atoi(peer[1]) + + peer := strings.Split(peerStr, ":") + if len(peer) < 2 { + return nil, fmt.Errorf("mandatory peering params : incomplete") + } + + if !isV6Peer { + address = peer[0] + } + + ASNumber, err := strconv.ParseUint(peer[1], 10, 32) if err != nil { return nil, fmt.Errorf("BGP Peer AS format error [%s]", peer[1]) + } + password := "" + if len(peer) >= 3 { + password = peer[2] } - multiHop, err := strconv.ParseBool(peer[3]) - if err != nil { - return nil, fmt.Errorf("BGP MultiHop format error (true/false) [%s]", peer[1]) + + multiHop := false + if len(peer) >= 4 { + multiHop, err = strconv.ParseBool(peer[3]) + if err != nil { + return nil, fmt.Errorf("BGP MultiHop format error (true/false) [%s]", peer[1]) + } } peerConfig := Peer{ - Address: peer[0], + Address: address, AS: uint32(ASNumber), - Password: peer[2], + Password: password, MultiHop: multiHop, } diff --git a/pkg/bgp/server.go b/pkg/bgp/server.go index 33f42e9f..faafaf0b 100644 --- a/pkg/bgp/server.go +++ b/pkg/bgp/server.go @@ -6,12 +6,12 @@ import ( "log" "time" - api "github.com/osrg/gobgp/api" - gobgp "github.com/osrg/gobgp/pkg/server" + api "github.com/osrg/gobgp/v3/api" + gobgp "github.com/osrg/gobgp/v3/pkg/server" ) // NewBGPServer takes a configuration and returns a running BGP server instance -func NewBGPServer(c *Config) (b *Server, err error) { +func NewBGPServer(c *Config, peerStateChangeCallback func(*api.WatchEventResponse_PeerEvent)) (b *Server, err error) { if c.AS == 0 { return nil, fmt.Errorf("You need to provide AS") } @@ -32,7 +32,7 @@ func NewBGPServer(c *Config) (b *Server, err error) { if err = b.s.StartBgp(context.Background(), &api.StartBgpRequest{ Global: &api.Global{ - As: c.AS, + Asn: c.AS, RouterId: c.RouterID, ListenPort: -1, }, @@ -40,7 +40,14 @@ func NewBGPServer(c *Config) (b *Server, err error) { return } - if err = b.s.MonitorPeer(context.Background(), &api.MonitorPeerRequest{}, func(p *api.Peer) { log.Println(p) }); err != nil { + if err = b.s.WatchEvent(context.Background(), &api.WatchEventRequest{Peer: &api.WatchEventRequest_Peer{}}, func(r *api.WatchEventResponse) { + if p := r.GetPeer(); p != nil && p.Type == api.WatchEventResponse_PeerEvent_STATE { + log.Println(p) + if peerStateChangeCallback != nil { + peerStateChangeCallback(p) + } + } + }); err != nil { return } diff --git a/pkg/bgp/types.go b/pkg/bgp/types.go index 2e06f888..e97bc8c8 100644 --- a/pkg/bgp/types.go +++ b/pkg/bgp/types.go @@ -1,6 +1,6 @@ package bgp -import gobgp "github.com/osrg/gobgp/pkg/server" +import gobgp "github.com/osrg/gobgp/v3/pkg/server" // Peer defines a BGP Peer type Peer struct { @@ -14,12 +14,10 @@ type Peer struct { type Config struct { AS uint32 RouterID string - NextHop string SourceIP string SourceIF string Peers []Peer - IPv6 bool } // Server manages a server object diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index e42b89a0..1a45e4c4 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -1,22 +1,24 @@ package cluster import ( + "sync" + + log "github.com/sirupsen/logrus" + "github.com/kube-vip/kube-vip/pkg/kubevip" "github.com/kube-vip/kube-vip/pkg/vip" - log "github.com/sirupsen/logrus" ) // Cluster - The Cluster object manages the state of the cluster for a particular node type Cluster struct { stop chan bool completed chan bool + once sync.Once Network vip.Network } // InitCluster - Will attempt to initialise all of the required settings for the cluster func InitCluster(c *kubevip.Config, disableVIP bool) (*Cluster, error) { - - // TODO - Check for root (needed to netlink) var network vip.Network var err error @@ -32,6 +34,8 @@ func InitCluster(c *kubevip.Config, disableVIP bool) (*Cluster, error) { Network: network, } + log.Debugf("init enable service security: %t", c.EnableServiceSecurity) + return newCluster, nil } @@ -42,20 +46,23 @@ func startNetworking(c *kubevip.Config) (vip.Network, error) { address = c.Address } - network, err := vip.NewConfig(address, c.Interface, c.DDNS) + network, err := vip.NewConfig(address, c.Interface, c.VIPSubnet, c.DDNS, c.RoutingTableID, c.RoutingTableType) if err != nil { return nil, err } + return network, nil } // Stop - Will stop the Cluster and release VIP if needed func (cluster *Cluster) Stop() { - // Close the stop chanel, which will shut down the VIP (if needed) - close(cluster.stop) + // Close the stop channel, which will shut down the VIP (if needed) + if cluster.stop != nil { + cluster.once.Do(func() { // Ensure that the close channel can only ever be called once + close(cluster.stop) + }) + } // Wait until the completed channel is closed, signallign all shutdown tasks completed <-cluster.completed - - log.Info("Stopped") } diff --git a/pkg/cluster/clusterDDNS.go b/pkg/cluster/clusterDDNS.go index 9f904ef2..ebacad42 100644 --- a/pkg/cluster/clusterDDNS.go +++ b/pkg/cluster/clusterDDNS.go @@ -19,9 +19,5 @@ func (cluster *Cluster) StartDDNS(ctx context.Context) error { return err } - if err = cluster.Network.SetIP(ip); err != nil { - return err - } - - return nil + return cluster.Network.SetIP(ip) } diff --git a/pkg/cluster/clusterLeaderElection.go b/pkg/cluster/clusterLeaderElection.go index c1038bef..9d8501f3 100644 --- a/pkg/cluster/clusterLeaderElection.go +++ b/pkg/cluster/clusterLeaderElection.go @@ -11,18 +11,19 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/kube-vip/kube-vip/pkg/bgp" + "github.com/kube-vip/kube-vip/pkg/equinixmetal" + "github.com/kube-vip/kube-vip/pkg/etcd" "github.com/kube-vip/kube-vip/pkg/k8s" "github.com/kube-vip/kube-vip/pkg/kubevip" "github.com/kube-vip/kube-vip/pkg/loadbalancer" - "github.com/kube-vip/kube-vip/pkg/packet" "github.com/packethost/packngo" log "github.com/sirupsen/logrus" + clientv3 "go.etcd.io/etcd/client/v3" v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/cache" @@ -31,21 +32,26 @@ import ( watchtools "k8s.io/client-go/tools/watch" ) -const plunderLock = "plndr-cp-lock" - // Manager degines the manager of the load-balancing services type Manager struct { KubernetesClient *kubernetes.Clientset // This channel is used to signal a shutdown SignalChan chan os.Signal + + EtcdClient *clientv3.Client } // NewManager will create a new managing object func NewManager(path string, inCluster bool, port int) (*Manager, error) { var hostname string - // If the path passed is empty and not running in the cluster, + // If inCluster is set then it will likely have started as a static pod or won't have the + // VIP up before trying to connect to the API server, we set the API endpoint to this machine to + // ensure connectivity. Else if the path passed is empty and not running in the cluster, // attempt to look for a kubeconfig in the default HOME dir. + + hostname = fmt.Sprintf("kubernetes:%v", port) + if len(path) == 0 && !inCluster { path = filepath.Join(os.Getenv("HOME"), ".kube", "config") @@ -70,26 +76,12 @@ func NewManager(path string, inCluster bool, port int) (*Manager, error) { // StartCluster - Begins a running instance of the Leader Election cluster func (cluster *Cluster) StartCluster(c *kubevip.Config, sm *Manager, bgpServer *bgp.Server) error { - id, err := os.Hostname() if err != nil { return err } - log.Infof("Beginning cluster membership, namespace [%s], lock name [%s], id [%s]", c.Namespace, plunderLock, id) - - // we use the Lease lock type since edits to Leases are less common - // and fewer objects in the cluster watch "all Leases". - lock := &resourcelock.LeaseLock{ - LeaseMeta: metav1.ObjectMeta{ - Name: plunderLock, - Namespace: c.Namespace, - }, - Client: sm.KubernetesClient.CoordinationV1(), - LockConfig: resourcelock.ResourceLockConfig{ - Identity: id, - }, - } + log.Infof("Beginning cluster membership, namespace [%s], lock name [%s], id [%s]", c.Namespace, c.LeaseName, id) // use a Go context so we can tell the leaderelection code when we // want to step down @@ -118,13 +110,13 @@ func (cluster *Cluster) StartCluster(c *kubevip.Config, sm *Manager, bgpServer * go func() { <-signalChan - log.Info("Received termination, signaling shutdown") + log.Info("Received termination, signaling cluster shutdown") // Cancel the context, which will in turn cancel the leadership cancel() // Cancel the arp context, which will in turn stop any broadcasts }() - // (attempt to) Remove the virtual IP, incase it already exists + // (attempt to) Remove the virtual IP, in case it already exists err = cluster.Network.DeleteIP() if err != nil { log.Errorf("could not delete virtualIP: %v", err) @@ -137,11 +129,11 @@ func (cluster *Cluster) StartCluster(c *kubevip.Config, sm *Manager, bgpServer * } }() - // If Packet is enabled then we can begin our preparation work + // If Equinix Metal is enabled then we can begin our preparation work var packetClient *packngo.Client if c.EnableMetal { if c.ProviderConfig != "" { - key, project, err := packet.GetPacketConfig(c.ProviderConfig) + key, project, err := equinixmetal.GetPacketConfig(c.ProviderConfig) if err != nil { log.Error(err) } else { @@ -156,25 +148,108 @@ func (cluster *Cluster) StartCluster(c *kubevip.Config, sm *Manager, bgpServer * log.Error(err) } - // We're using Packet with BGP, popuplate the Peer information from the API + // We're using Equinix Metal with BGP, populate the Peer information from the API if c.EnableBGP { - log.Infoln("Looking up the BGP configuration from packet") - err = packet.BGPLookup(packetClient, c) + log.Infoln("Looking up the BGP configuration from Equinix Metal") + err = equinixmetal.BGPLookup(packetClient, c) if err != nil { log.Error(err) } } } - if c.EnableBGP { + if c.EnableBGP && bgpServer == nil { // Lets start BGP log.Info("Starting the BGP server to advertise VIP routes to VGP peers") - bgpServer, err = bgp.NewBGPServer(&c.BGPConfig) + bgpServer, err = bgp.NewBGPServer(&c.BGPConfig, nil) if err != nil { log.Error(err) } } + run := &runConfig{ + config: c, + leaseID: id, + sm: sm, + onStartedLeading: func(ctx context.Context) { + // As we're leading lets start the vip service + err := cluster.vipService(ctxArp, ctxDNS, c, sm, bgpServer, packetClient) + if err != nil { + log.Errorf("Error starting the VIP service on the leader [%s]", err) + } + }, + onStoppedLeading: func() { + // we can do cleanup here + log.Info("This node is becoming a follower within the cluster") + + // Stop the dns context + cancelDNS() + // Stop the Arp context if it is running + cancelArp() + + // Stop the BGP server + if bgpServer != nil { + err := bgpServer.Close() + if err != nil { + log.Warnf("%v", err) + } + } + + err := cluster.Network.DeleteIP() + if err != nil { + log.Warnf("%v", err) + } + + log.Fatal("lost leadership, restarting kube-vip") + }, + onNewLeader: func(identity string) { + // we're notified when new leader elected + log.Infof("Node [%s] is assuming leadership of the cluster", identity) + }, + } + + switch c.LeaderElectionType { + case "kubernetes", "": + cluster.runKubernetesLeaderElectionOrDie(ctx, run) + case "etcd": + cluster.runEtcdLeaderElectionOrDie(ctx, run) + default: + log.Info(fmt.Sprintf("LeaderElectionMode %s not supported, exiting", c.LeaderElectionType)) + } + + return nil +} + +type runConfig struct { + config *kubevip.Config + leaseID string + sm *Manager + + // onStartedLeading is called when this member starts leading. + onStartedLeading func(context.Context) + // onStoppedLeading is called when this member stops leading. + onStoppedLeading func() + // onNewLeader is called when the client observes a leader that is + // not the previously observed leader. This includes the first observed + // leader when the client starts. + onNewLeader func(identity string) +} + +func (cluster *Cluster) runKubernetesLeaderElectionOrDie(ctx context.Context, run *runConfig) { + // we use the Lease lock type since edits to Leases are less common + // and fewer objects in the cluster watch "all Leases". + lock := &resourcelock.LeaseLock{ + LeaseMeta: metav1.ObjectMeta{ + Name: run.config.LeaseName, + Namespace: run.config.Namespace, + Annotations: run.config.LeaseAnnotations, + }, + Client: run.sm.KubernetesClient.CoordinationV1(), + LockConfig: resourcelock.ResourceLockConfig{ + Identity: run.leaseID, + }, + } + // start the leader election code loop leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{ Lock: lock, @@ -185,59 +260,37 @@ func (cluster *Cluster) StartCluster(c *kubevip.Config, sm *Manager, bgpServer * // get elected before your background loop finished, violating // the stated goal of the lease. ReleaseOnCancel: true, - LeaseDuration: time.Duration(c.LeaseDuration) * time.Second, - RenewDeadline: time.Duration(c.RenewDeadline) * time.Second, - RetryPeriod: time.Duration(c.RetryPeriod) * time.Second, + LeaseDuration: time.Duration(run.config.LeaseDuration) * time.Second, + RenewDeadline: time.Duration(run.config.RenewDeadline) * time.Second, + RetryPeriod: time.Duration(run.config.RetryPeriod) * time.Second, Callbacks: leaderelection.LeaderCallbacks{ - OnStartedLeading: func(ctx context.Context) { - // As we're leading lets start the vip service - err = cluster.vipService(ctxArp, ctxDNS, c, sm, bgpServer, packetClient) - if err != nil { - log.Errorf("Error starting the VIP service on the leader [%s]", err) - } - - }, - OnStoppedLeading: func() { - // we can do cleanup here - log.Info("This node is becoming a follower within the cluster") - - // Stop the dns context - cancelDNS() - // Stop the Arp context if it is running - cancelArp() - - // Stop the BGP server - if bgpServer != nil { - err = bgpServer.Close() - if err != nil { - log.Warnf("%v", err) - } - } - - err = cluster.Network.DeleteIP() - if err != nil { - log.Warnf("%v", err) - } - - log.Fatal("lost leadership, restarting kube-vip") - }, - OnNewLeader: func(identity string) { - // we're notified when new leader elected - log.Infof("Node [%s] is assuming leadership of the cluster", identity) - }, + OnStartedLeading: run.onStartedLeading, + OnStoppedLeading: run.onStoppedLeading, + OnNewLeader: run.onNewLeader, }, }) +} - return nil +func (cluster *Cluster) runEtcdLeaderElectionOrDie(ctx context.Context, run *runConfig) { + etcd.RunElectionOrDie(ctx, &etcd.LeaderElectionConfig{ + EtcdConfig: etcd.ClientConfig{Client: run.sm.EtcdClient}, + Name: run.config.LeaseName, + MemberID: run.leaseID, + LeaseDurationSeconds: int64(run.config.LeaseDuration), + Callbacks: etcd.LeaderCallbacks{ + OnStartedLeading: run.onStartedLeading, + OnStoppedLeading: run.onStoppedLeading, + OnNewLeader: run.onNewLeader, + }, + }) } -func (sm *Manager) NodeWatcher(lb *loadbalancer.IPVSLoadBalancer) error { +func (sm *Manager) NodeWatcher(lb *loadbalancer.IPVSLoadBalancer, port int) error { // Use a restartable watcher, as this should help in the event of etcd or timeout issues log.Infof("Kube-Vip is watching nodes for control-plane labels") - labelSelector := metav1.LabelSelector{MatchLabels: map[string]string{"node-role.kubernetes.io/control-plane": ""}} listOptions := metav1.ListOptions{ - LabelSelector: labels.Set(labelSelector.MatchLabels).String(), + LabelSelector: "node-role.kubernetes.io/control-plane", } rw, err := watchtools.NewRetryWatcher("1", &cache.ListWatch{ @@ -257,7 +310,7 @@ func (sm *Manager) NodeWatcher(lb *loadbalancer.IPVSLoadBalancer) error { }() ch := rw.ResultChan() - //defer rw.Stop() + // defer rw.Stop() for event := range ch { // We need to inspect the event and get ResourceVersion out of it @@ -265,29 +318,27 @@ func (sm *Manager) NodeWatcher(lb *loadbalancer.IPVSLoadBalancer) error { case watch.Added, watch.Modified: node, ok := event.Object.(*v1.Node) if !ok { - return fmt.Errorf("Unable to parse Kubernetes Node from Annotation watcher") + return fmt.Errorf("unable to parse Kubernetes Node from Annotation watcher") } - //Find the node IP address (this isn't foolproof) + // Find the node IP address (this isn't foolproof) for x := range node.Status.Addresses { - if node.Status.Addresses[x].Type == v1.NodeInternalIP { - err = lb.AddBackend(node.Status.Addresses[x].Address) + err = lb.AddBackend(node.Status.Addresses[x].Address, port) if err != nil { - log.Errorf("Add IPVS backend [%v]", err) + log.Errorf("add IPVS backend [%v]", err) } } } case watch.Deleted: node, ok := event.Object.(*v1.Node) if !ok { - return fmt.Errorf("Unable to parse Kubernetes Node from Annotation watcher") + return fmt.Errorf("unable to parse Kubernetes Node from Annotation watcher") } - //Find the node IP address (this isn't foolproof) + // Find the node IP address (this isn't foolproof) for x := range node.Status.Addresses { - if node.Status.Addresses[x].Type == v1.NodeInternalIP { - err = lb.AddBackend(node.Status.Addresses[x].Address) + err = lb.RemoveBackend(node.Status.Addresses[x].Address, port) if err != nil { log.Errorf("Del IPVS backend [%v]", err) } @@ -306,7 +357,6 @@ func (sm *Manager) NodeWatcher(lb *loadbalancer.IPVSLoadBalancer) error { statusErr, ok := errObject.(*apierrors.StatusError) if !ok { log.Errorf(spew.Sprintf("Received an error which is not *metav1.Status but %#+v", event.Object)) - } status := statusErr.ErrStatus @@ -315,7 +365,6 @@ func (sm *Manager) NodeWatcher(lb *loadbalancer.IPVSLoadBalancer) error { } } - log.Infoln("Exiting Annotations watcher") + log.Infoln("Exiting Node watcher") return nil - } diff --git a/pkg/cluster/service.go b/pkg/cluster/service.go index 6cfada76..d57264ba 100644 --- a/pkg/cluster/service.go +++ b/pkg/cluster/service.go @@ -9,9 +9,9 @@ import ( "time" "github.com/kube-vip/kube-vip/pkg/bgp" + "github.com/kube-vip/kube-vip/pkg/equinixmetal" "github.com/kube-vip/kube-vip/pkg/kubevip" "github.com/kube-vip/kube-vip/pkg/loadbalancer" - "github.com/kube-vip/kube-vip/pkg/packet" "github.com/kube-vip/kube-vip/pkg/vip" "github.com/packethost/packngo" log "github.com/sirupsen/logrus" @@ -48,15 +48,15 @@ func (cluster *Cluster) vipService(ctxArp, ctxDNS context.Context, c *kubevip.Co err = cluster.Network.AddIP() if err != nil { - log.Warnf("%v", err) + log.Fatalf("%v", err) } if c.EnableMetal { - // We're not using Packet with BGP + // We're not using Equinix Metal with BGP if !c.EnableBGP { // Attempt to attach the EIP in the standard manner - log.Debugf("Attaching the Packet EIP through the API to this host") - err = packet.AttachEIP(packetClient, c, id) + log.Debugf("Attaching the Equinix Metal EIP through the API to this host") + err = equinixmetal.AttachEIP(packetClient, c, id) if err != nil { log.Error(err) } @@ -78,13 +78,13 @@ func (cluster *Cluster) vipService(ctxArp, ctxDNS context.Context, c *kubevip.Co log.Infof("Starting IPVS LoadBalancer") - lb, err := loadbalancer.NewIPVSLB(c.VIP, c.LoadBalancerPort) + lb, err := loadbalancer.NewIPVSLB(cluster.Network.IP(), c.LoadBalancerPort, c.LoadBalancerForwardingMethod) if err != nil { log.Errorf("Error creating IPVS LoadBalancer [%s]", err) } go func() { - err = sm.NodeWatcher(lb) + err = sm.NodeWatcher(lb, c.Port) if err != nil { log.Errorf("Error watching node labels [%s]", err) } @@ -101,12 +101,13 @@ func (cluster *Cluster) vipService(ctxArp, ctxDNS context.Context, c *kubevip.Co } if c.EnableARP { - //ctxArp, cancelArp = context.WithCancel(context.Background()) + // ctxArp, cancelArp = context.WithCancel(context.Background()) ipString := cluster.Network.IP() + isIPv6 := vip.IsIPv6(ipString) var ndp *vip.NdpResponder - if vip.IsIPv6(ipString) { + if isIPv6 { ndp, err = vip.NewNDPResponder(c.Interface) if err != nil { log.Fatalf("failed to create new NDP Responder") @@ -117,56 +118,35 @@ func (cluster *Cluster) vipService(ctxArp, ctxDNS context.Context, c *kubevip.Co if ndp != nil { defer ndp.Close() } - + log.Infof("Gratuitous Arp broadcast will repeat every 3 seconds for [%s]", ipString) for { select { case <-ctx.Done(): // if cancel() execute return default: - // Ensure the address exists on the interface before attempting to ARP - set, err := cluster.Network.IsSet() - if err != nil { - log.Warnf("%v", err) - } - if !set { - log.Warnf("Re-applying the VIP configuration [%s] to the interface [%s]", ipString, c.Interface) - err = cluster.Network.AddIP() - if err != nil { - log.Warnf("%v", err) - } - } - - if vip.IsIPv4(ipString) { - // Gratuitous ARP, will broadcast to new MAC <-> IPv4 address - err := vip.ARPSendGratuitous(ipString, c.Interface) - if err != nil { - log.Warnf("%v", err) - } - } else { - // Gratuitous NDP, will broadcast new MAC <-> IPv6 address - err := ndp.SendGratuitous(ipString) - if err != nil { - log.Warnf("%v", err) - } - } + cluster.ensureIPAndSendGratuitous(c.Interface, ndp) } time.Sleep(3 * time.Second) } }(ctxArp) } + + if c.EnableRoutingTable { + err = cluster.Network.AddRoute() + if err != nil { + log.Warnf("%v", err) + } + } + return nil } // StartLoadBalancerService will start a VIP instance and leave it for kube-proxy to handle -func (cluster *Cluster) StartLoadBalancerService(c *kubevip.Config, bgp *bgp.Server) error { - // Start a kube-vip loadbalancer service - log.Infof("Starting advertising address [%s] with kube-vip", c.VIP) - +func (cluster *Cluster) StartLoadBalancerService(c *kubevip.Config, bgp *bgp.Server) { // use a Go context so we can tell the arp loop code when we // want to step down //nolint ctxArp, cancelArp := context.WithCancel(context.Background()) - defer cancelArp() cluster.stop = make(chan bool, 1) cluster.completed = make(chan bool, 1) @@ -175,14 +155,19 @@ func (cluster *Cluster) StartLoadBalancerService(c *kubevip.Config, bgp *bgp.Ser if err != nil { log.Warnf("Attempted to clean existing VIP => %v", err) } - - err = cluster.Network.AddIP() - if err != nil { - log.Warnf("%v", err) + if c.EnableRoutingTable { + err = cluster.Network.AddRoute() + if err != nil { + log.Warnf("%v", err) + } + } else { + err = cluster.Network.AddIP() + if err != nil { + log.Warnf("%v", err) + } } - if c.EnableARP { - //ctxArp, cancelArp = context.WithCancel(context.Background()) + // ctxArp, cancelArp = context.WithCancel(context.Background()) ipString := cluster.Network.IP() @@ -197,41 +182,21 @@ func (cluster *Cluster) StartLoadBalancerService(c *kubevip.Config, bgp *bgp.Ser if ndp != nil { defer ndp.Close() } + log.Debugf("(svcs) broadcasting ARP update for %s via %s, every %dms", ipString, c.Interface, c.ArpBroadcastRate) for { - select { case <-ctx.Done(): // if cancel() execute + log.Debugf("(svcs) ending ARP update for %s via %s, every %dms", ipString, c.Interface, c.ArpBroadcastRate) return default: - // Ensure the address exists on the interface before attempting to ARP - set, err := cluster.Network.IsSet() - if err != nil { - log.Warnf("%v", err) - } - if !set { - log.Warnf("Re-applying the VIP configuration [%s] to the interface [%s]", ipString, c.Interface) - err = cluster.Network.AddIP() - if err != nil { - log.Warnf("%v", err) - } - } - - if vip.IsIPv4(ipString) { - // Gratuitous ARP, will broadcast to new MAC <-> IPv4 address - err := vip.ARPSendGratuitous(ipString, c.Interface) - if err != nil { - log.Warnf("%v", err) - } - } else { - // Gratuitous NDP, will broadcast new MAC <-> IPv6 address - err := ndp.SendGratuitous(ipString) - if err != nil { - log.Warnf("%v", err) - } - } + cluster.ensureIPAndSendGratuitous(c.Interface, ndp) } - time.Sleep(3 * time.Second) + if c.ArpBroadcastRate < 500 { + log.Errorf("arp broadcast rate is [%d], this shouldn't be lower that 300ms (defaulting to 3000)", c.ArpBroadcastRate) + c.ArpBroadcastRate = 3000 + } + time.Sleep(time.Duration(c.ArpBroadcastRate) * time.Millisecond) } }(ctxArp) } @@ -239,7 +204,7 @@ func (cluster *Cluster) StartLoadBalancerService(c *kubevip.Config, bgp *bgp.Ser if c.EnableBGP { // Lets advertise the VIP over BGP, the host needs to be passed using CIDR notation cidrVip := fmt.Sprintf("%s/%s", cluster.Network.IP(), c.VIPCIDR) - log.Debugf("Attempting to advertise the address [%s] over BGP", cidrVip) + log.Debugf("(svcs) attempting to advertise the address [%s] over BGP", cidrVip) err = bgp.AddHost(cidrVip) if err != nil { log.Error(err) @@ -247,25 +212,70 @@ func (cluster *Cluster) StartLoadBalancerService(c *kubevip.Config, bgp *bgp.Ser } go func() { - //nolint - for { - select { - case <-cluster.stop: - // Stop the Arp context if it is running - cancelArp() - - log.Info("[LOADBALANCER] Stopping load balancers") - log.Infof("[VIP] Releasing the Virtual IP [%s]", c.VIP) - err = cluster.Network.DeleteIP() - if err != nil { - log.Warnf("%v", err) - } + <-cluster.stop + // Stop the Arp context if it is running + cancelArp() - close(cluster.completed) - return + if c.EnableRoutingTable { + err = cluster.Network.DeleteRoute() + if err != nil { + log.Warnf("%v", err) } + + close(cluster.completed) + return + } + + log.Info("[LOADBALANCER] Stopping load balancers") + log.Infof("[VIP] Releasing the Virtual IP [%s]", c.VIP) + err = cluster.Network.DeleteIP() + if err != nil { + log.Warnf("%v", err) } + + close(cluster.completed) }() - log.Infoln("Started Load Balancer and Virtual IP") - return nil +} + +// ensureIPAndSendGratuitous - adds IP to the interface if missing, and send +// either a gratuitous ARP or gratuitous NDP. Re-adds the interface if it is IPv6 +// and in a dadfailed state. +func (cluster *Cluster) ensureIPAndSendGratuitous(iface string, ndp *vip.NdpResponder) { + ipString := cluster.Network.IP() + isIPv6 := vip.IsIPv6(ipString) + // Check if IP is dadfailed + if cluster.Network.IsDADFAIL() { + log.Warnf("IP address is in dadfailed state, removing [%s] from interface [%s]", ipString, iface) + err := cluster.Network.DeleteIP() + if err != nil { + log.Warnf("%v", err) + } + } + + // Ensure the address exists on the interface before attempting to ARP + set, err := cluster.Network.IsSet() + if err != nil { + log.Warnf("%v", err) + } + if !set { + log.Warnf("Re-applying the VIP configuration [%s] to the interface [%s]", ipString, iface) + err = cluster.Network.AddIP() + if err != nil { + log.Warnf("%v", err) + } + } + + if isIPv6 { + // Gratuitous NDP, will broadcast new MAC <-> IPv6 address + err := ndp.SendGratuitous(ipString) + if err != nil { + log.Warnf("%v", err) + } + } else { + // Gratuitous ARP, will broadcast to new MAC <-> IPv4 address + err := vip.ARPSendGratuitous(ipString, iface) + if err != nil { + log.Warnf("%v", err) + } + } } diff --git a/pkg/cluster/singleNode.go b/pkg/cluster/singleNode.go index c8440b38..e7d871d3 100644 --- a/pkg/cluster/singleNode.go +++ b/pkg/cluster/singleNode.go @@ -45,23 +45,17 @@ func (cluster *Cluster) StartSingleNode(c *kubevip.Config, disableVIP bool) erro } go func() { - //nolint - for { - select { - case <-cluster.stop: - - if !disableVIP { - - log.Info("[VIP] Releasing the Virtual IP") - err := cluster.Network.DeleteIP() - if err != nil { - log.Warnf("%v", err) - } - } - close(cluster.completed) - return + <-cluster.stop + + if !disableVIP { + + log.Info("[VIP] Releasing the Virtual IP") + err := cluster.Network.DeleteIP() + if err != nil { + log.Warnf("%v", err) } } + close(cluster.completed) }() log.Infoln("Started Load Balancer and Virtual IP") return nil diff --git a/pkg/packet/bgp.go b/pkg/equinixmetal/bgp.go similarity index 92% rename from pkg/packet/bgp.go rename to pkg/equinixmetal/bgp.go index 778055ca..5f113aa6 100644 --- a/pkg/packet/bgp.go +++ b/pkg/equinixmetal/bgp.go @@ -1,4 +1,4 @@ -package packet +package equinixmetal import ( "fmt" @@ -22,7 +22,7 @@ func BGPLookup(c *packngo.Client, k *kubevip.Config) error { thisDevice = findSelf(c, k.MetalProjectID) } if thisDevice == nil { - return fmt.Errorf("Unable to find local/this device in packet API") + return fmt.Errorf("Unable to find local/this device in Equinix Metal API") } fmt.Printf("Querying BGP settings for [%s]", thisDevice.Hostname) @@ -54,6 +54,7 @@ func BGPLookup(c *packngo.Client, k *kubevip.Config) error { Address: neighbours[0].PeerIps[x], AS: uint32(neighbours[0].PeerAs), MultiHop: neighbours[0].Multihop, + Password: neighbours[0].Md5Password, } k.BGPConfig.Peers = append(k.BGPConfig.Peers, peer) } diff --git a/pkg/packet/eip.go b/pkg/equinixmetal/eip.go similarity index 78% rename from pkg/packet/eip.go rename to pkg/equinixmetal/eip.go index 0ecb3558..1db4666a 100644 --- a/pkg/packet/eip.go +++ b/pkg/equinixmetal/eip.go @@ -1,4 +1,4 @@ -package packet +package equinixmetal import ( "fmt" @@ -9,9 +9,8 @@ import ( log "github.com/sirupsen/logrus" ) -// AttachEIP will use the packet APIs to move an EIP and attach to a host -func AttachEIP(c *packngo.Client, k *kubevip.Config, hostname string) error { - +// AttachEIP will use the Equinix Metal APIs to move an EIP and attach to a host +func AttachEIP(c *packngo.Client, k *kubevip.Config, _ string) error { // Use MetalProjectID if it is defined projID := k.MetalProjectID @@ -33,11 +32,10 @@ func AttachEIP(c *packngo.Client, k *kubevip.Config, hostname string) error { ips, _, _ := c.ProjectIPs.List(projID, &packngo.ListOptions{}) for _, ip := range ips { - // Find the device id for our EIP if ip.Address == vip { log.Infof("Found EIP ->%s ID -> %s\n", ip.Address, ip.ID) - // If attachements already exist then remove them + // If attachments already exist then remove them if len(ip.Assignments) != 0 { hrefID := path.Base(ip.Assignments[0].Href) _, err := c.DeviceIPs.Unassign(hrefID) @@ -48,10 +46,10 @@ func AttachEIP(c *packngo.Client, k *kubevip.Config, hostname string) error { } } - // Lookup this server through the packet API + // Lookup this server through the Equinix Metal API thisDevice := findSelf(c, projID) if thisDevice == nil { - return fmt.Errorf("unable to find local/this device in packet API") + return fmt.Errorf("unable to find local/this device in Equinix Metal API") } // Assign the EIP to this device diff --git a/pkg/packet/utils.go b/pkg/equinixmetal/utils.go similarity index 89% rename from pkg/packet/utils.go rename to pkg/equinixmetal/utils.go index 6cbe0406..6403fd24 100644 --- a/pkg/packet/utils.go +++ b/pkg/equinixmetal/utils.go @@ -1,9 +1,8 @@ -package packet +package equinixmetal import ( "encoding/json" "fmt" - "io/ioutil" "os" "github.com/packethost/packngo" @@ -38,7 +37,7 @@ func findSelf(c *packngo.Client, projectID string) *packngo.Device { return nil } -//GetPacketConfig will lookup the configuration from a file path +// GetPacketConfig will lookup the configuration from a file path func GetPacketConfig(providerConfig string) (string, string, error) { var config struct { AuthToken string `json:"apiKey"` @@ -46,7 +45,7 @@ func GetPacketConfig(providerConfig string) (string, string, error) { } // get our token and project if providerConfig != "" { - configBytes, err := ioutil.ReadFile(providerConfig) + configBytes, err := os.ReadFile(providerConfig) if err != nil { return "", "", fmt.Errorf("failed to get read configuration file at path %s: %v", providerConfig, err) } diff --git a/pkg/etcd/client.go b/pkg/etcd/client.go new file mode 100644 index 00000000..bc0d7540 --- /dev/null +++ b/pkg/etcd/client.go @@ -0,0 +1,26 @@ +package etcd + +import ( + "go.etcd.io/etcd/client/pkg/v3/transport" + clientv3 "go.etcd.io/etcd/client/v3" + + "github.com/kube-vip/kube-vip/pkg/kubevip" +) + +func NewClient(c *kubevip.Config) (*clientv3.Client, error) { + tlsInfo := transport.TLSInfo{ + TrustedCAFile: c.Etcd.CAFile, + CertFile: c.Etcd.ClientCertFile, + KeyFile: c.Etcd.ClientKeyFile, + } + + clientTLS, err := tlsInfo.ClientConfig() + if err != nil { + return nil, err + } + + return clientv3.New(clientv3.Config{ + Endpoints: c.Etcd.Endpoints, + TLS: clientTLS, + }) +} diff --git a/pkg/etcd/election.go b/pkg/etcd/election.go new file mode 100644 index 00000000..a9b5d425 --- /dev/null +++ b/pkg/etcd/election.go @@ -0,0 +1,226 @@ +package etcd + +import ( + "context" + "hash/fnv" + "time" + + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + pb "go.etcd.io/etcd/api/v3/etcdserverpb" + clientv3 "go.etcd.io/etcd/client/v3" + "go.etcd.io/etcd/client/v3/concurrency" +) + +// LeaderElectionConfig allows to configure the leader election params. +type LeaderElectionConfig struct { + // EtcdConfig contains the client to connect to the etcd cluster. + EtcdConfig ClientConfig + + // Name uniquely identifies this leader election. All members of the same election + // should use the same value here. + Name string + + // MemberID identifies uniquely this contestant from other in the leader election. + // It will be converted to an int64 using a hash, so theoretically collisions are possible + // when using a string. If you want to guarantee safety, us MemberUniqueID to specify a unique + // int64 directly. + // If two processes start a leader election using the same MemberID, one of them will + // fail. + MemberID string + + // MemberUniqueID is the int equivalent to MemberID that allows to override the default conversion + // from string to int using hashing. + MemberUniqueID *int64 + + // LeaseDurationSeconds is the duration that non-leader candidates will + // wait to force acquire leadership. + // This is just a request to the etcd server but it's not guaranteed, the server + // might decide to make the duration longer. + LeaseDurationSeconds int64 + + // Callbacks are callbacks that are triggered during certain lifecycle + // events of the LeaderElector + Callbacks LeaderCallbacks +} + +// LeaderCallbacks are callbacks that are triggered during certain +// lifecycle events of the election. +type LeaderCallbacks struct { + // OnStartedLeading is called when this member starts leading. + OnStartedLeading func(context.Context) + // OnStoppedLeading is called when this member stops leading. + OnStoppedLeading func() + // OnNewLeader is called when the client observes a leader that is + // not the previously observed leader. This includes the first observed + // leader when the client starts. + OnNewLeader func(identity string) +} + +// ClientConfig contains the client to connect to the etcd cluster. +type ClientConfig struct { + Client *clientv3.Client +} + +// RunElectionOrDie behaves the same way as RunElection but panics if there is an error. +func RunElectionOrDie(ctx context.Context, config *LeaderElectionConfig) { + if err := RunElection(ctx, config); err != nil { + panic(err) + } +} + +// RunElection starts a client with the provided config or panics. +// RunElection blocks until leader election loop is +// stopped by ctx or it has stopped holding the leader lease. +func RunElection(ctx context.Context, config *LeaderElectionConfig) error { + var memberID int64 + if config.MemberUniqueID != nil { + memberID = *config.MemberUniqueID + } else { + h := fnv.New64a() + if _, err := h.Write(append([]byte(config.Name), []byte(config.MemberID)...)); err != nil { + return err + } + memberID = int64(h.Sum64()) + } + + ttl := config.LeaseDurationSeconds + r := &pb.LeaseGrantRequest{TTL: ttl, ID: memberID} + lease, err := clientv3.RetryLeaseClient( + config.EtcdConfig.Client, + ).LeaseGrant(ctx, r) + if err != nil { + return errors.Wrap(err, "creating lease") + } + + leaseID := clientv3.LeaseID(lease.ID) + + s, err := concurrency.NewSession( + config.EtcdConfig.Client, + concurrency.WithTTL(int(lease.TTL)), + concurrency.WithLease(leaseID), + ) + if err != nil { + return err + } + + election := concurrency.NewElection(s, config.Name) + + m := &member{ + client: config.EtcdConfig.Client, + election: election, + callbacks: config.Callbacks, + memberID: config.MemberID, + weAreTheLeader: make(chan struct{}, 1), + leaseTTL: lease.TTL, + } + + go m.tryToBeLeader(ctx) + m.watchLeaderChanges(ctx) + + return nil +} + +type member struct { + key string + client *clientv3.Client + election *concurrency.Election + isLeader bool + currentLeaderKey string + callbacks LeaderCallbacks + memberID string + weAreTheLeader chan struct{} + leaseTTL int64 +} + +func (m *member) watchLeaderChanges(ctx context.Context) { + observeCtx, observeCancel := context.WithCancel(ctx) + defer observeCancel() + changes := m.election.Observe(observeCtx) + +watcher: + for { + select { + case <-ctx.Done(): + break watcher + case <-m.weAreTheLeader: + + m.isLeader = true + m.key = m.election.Key() // by this time, this should already be set, since Campaign has already returned + log.Debugf("[%s] Marking self as leader with key %s\n", m.memberID, m.key) + case response := <-changes: + log.Debugf("[%s] Leader Changes: %+v\n", m.memberID, response) + if len(response.Kvs) == 0 { + // There is a race condition where just after we stop being the leader + // if there are no more leaders, we might get a response with no key-values + // just before the response channel is closed or the context is cancel + // In that case, just continue and let one of those two things happen + continue + } + newLeaderKey := response.Kvs[0].Key + if m.isLeader && m.key != string(newLeaderKey) { + // We stopped being leaders + + // exit the loop, so we cancel the observe context so we stop watching + // for new leaders. That will close the channel and make this function exit, + // which also makes the routine to finish and RunElection returns + break watcher + } + + if m.currentLeaderKey != string(newLeaderKey) { + // we observed a leader, this could be us or someone else + m.currentLeaderKey = string(newLeaderKey) + m.callbacks.OnNewLeader(string(response.Kvs[0].Value)) + } + } + } + + // If we are here, either we have stopped being leaders or we lost the watcher + // Make sure we call OnStoppedLeading if we were the leader. + if m.isLeader { + m.callbacks.OnStoppedLeading() + } + + log.Debugf("[%s] Exiting watcher\n", m.memberID) +} + +func (m *member) tryToBeLeader(ctx context.Context) { + if err := m.election.Campaign(ctx, m.memberID); err != nil { + log.Errorf("Failed trying to become the leader: %s", err) + // Resign just in case we acquired leadership just before failing + if err := m.election.Resign(m.client.Ctx()); err != nil { + log.Warnf("Failed to resign after we failed becoming the leader, this might not be a problem if we were never the leader: %s", err) + } + return + // TODO: what to do here? + // We probably want watchLeaderChanges to exit as well, since Run + // is expecting us to try to become the leader, but if we are here, + // we won't. So if we don't panic, we need to signal it somehow + } + + // Inform the observer that we are the leader as soon as possible, + // so it can detect if we stop being it + m.weAreTheLeader <- struct{}{} + + // Once we are the leader, start the routine to resign if context is canceled + go m.resignOnCancel(ctx) + + // After becoming the leader, we wait for at least a lease TTL to wait for + // the previous leader to detect the new leadership (if there was one) and + // stop its processes + // TODO: is this too cautious? + log.Debugf("[%s] Waiting %d seconds before running OnStartedLeading", m.memberID, m.leaseTTL) + time.Sleep(time.Second * time.Duration(m.leaseTTL)) + + // We are the leader, execute our code + m.callbacks.OnStartedLeading(ctx) + + // Here the routine dies if OnStartedLeading doesn't block, there is nothing else to do +} + +func (m *member) resignOnCancel(ctx context.Context) { + <-ctx.Done() + if err := m.election.Resign(m.client.Ctx()); err != nil { + log.Errorf("Failed to resign after the context was canceled: %s", err) + } +} diff --git a/pkg/etcd/election_test.go b/pkg/etcd/election_test.go new file mode 100644 index 00000000..1ecf0e54 --- /dev/null +++ b/pkg/etcd/election_test.go @@ -0,0 +1,169 @@ +//go:build integration +// +build integration + +package etcd_test + +import ( + "context" + "log" + "math/rand" + "sync" + "testing" + "time" + + "github.com/kube-vip/kube-vip/pkg/etcd" + . "github.com/onsi/gomega" + clientv3 "go.etcd.io/etcd/client/v3" + "go.uber.org/zap" +) + +func TestRunElectionWithMemberIDCollision(t *testing.T) { + t.Parallel() + g := NewWithT(t) + ctx := context.Background() + cli := client(g) + defer cli.Close() + + electionName := randomElectionNameForTest("memberIDConflict") + log.Printf("Election name %s\n", electionName) + memberCtx, cancelMember1 := context.WithCancel(ctx) + config := &etcd.LeaderElectionConfig{ + EtcdConfig: etcd.ClientConfig{ + Client: cli, + }, + Name: electionName, + MemberID: "my-host", + LeaseDurationSeconds: 1, + Callbacks: etcd.LeaderCallbacks{ + OnStartedLeading: func(ctx context.Context) { + log.Println("I'm the leader!!!!") + log.Println("Renouncing as leader by canceling context") + cancelMember1() + }, + OnNewLeader: func(identity string) { + log.Printf("New leader: %s\n", identity) + }, + OnStoppedLeading: func() { + log.Println("I'm not the leader anymore") + }, + }, + } + + wg := &sync.WaitGroup{} + wg.Add(2) + + go func() { + defer wg.Done() + g.Expect(etcd.RunElection(memberCtx, config)).To(Succeed()) + }() + + go func() { + defer wg.Done() + time.Sleep(time.Millisecond * 50) // make sure the first one becomes leader + g.Expect(etcd.RunElection(ctx, config)).Should(MatchError(ContainSubstring("creating lease"))) + }() + + wg.Wait() +} + +func TestRunElectionWithTwoMembersAndReelection(t *testing.T) { + t.Parallel() + g := NewWithT(t) + ctx := context.Background() + cli := client(g) + defer cli.Close() + + cliMember1 := client(g) + defer cliMember1.Close() + + electionName := randomElectionNameForTest("steppingDown") + configBase := etcd.LeaderElectionConfig{ + EtcdConfig: etcd.ClientConfig{ + Client: cli, + }, + Name: electionName, + LeaseDurationSeconds: 1, + } + + member1Ctx, _ := context.WithCancel(ctx) + member2Ctx, cancelMember2 := context.WithCancel(ctx) + + config1 := configBase + config1.EtcdConfig.Client = cliMember1 + config1.MemberID = "my-host" + uniqueID := rand.Int63() + config1.MemberUniqueID = &uniqueID + config1.Callbacks = baseCallbacksForName(config1.MemberID) + config1.Callbacks.OnStartedLeading = func(_ context.Context) { + log.Println("I'm my-host, the new leader!!!!") + log.Println("Loosing the leadership on purpose by stopping renewing the lease") + g.Expect(cliMember1.Lease.Close()).To(Succeed()) + log.Println("Member1 leases closed") + } + + config2 := configBase + config2.MemberID = "my-other-host" + config2.Callbacks = baseCallbacksForName(config2.MemberID) + config2.Callbacks.OnStartedLeading = func(_ context.Context) { + log.Println("I'm my-other-host, the new leader!!!!") + log.Println("Renouncing as leader by canceling context") + cancelMember2() + } + + wg := &sync.WaitGroup{} + wg.Add(2) + + go func() { + defer wg.Done() + g.Expect(etcd.RunElection(member1Ctx, &config1)).To(Succeed()) + log.Println("Member1 routine done") + }() + + go func() { + defer wg.Done() + time.Sleep(time.Millisecond * 50) // Make sure member1 becomes leader + g.Expect(etcd.RunElection(member2Ctx, &config2)).To(Succeed()) + log.Println("Member2 routine done") + }() + + wg.Wait() +} + +func baseCallbacksForName(name string) etcd.LeaderCallbacks { + return etcd.LeaderCallbacks{ + OnStartedLeading: func(ctx context.Context) { + log.Printf("[%s] I'm the new leader!!!!\n", name) + }, + OnNewLeader: func(identity string) { + log.Printf("[%s] New leader: %s\n", name, identity) + }, + OnStoppedLeading: func() { + log.Printf("[%s] I'm not the leader anymore\n", name) + }, + } +} + +func randomElectionNameForTest(name string) string { + return name + "-" + randomString(6) +} + +const charSet = "0123456789abcdefghijklmnopqrstuvwxyz" + +var rnd = rand.New(rand.NewSource(time.Now().UnixNano())) + +func randomString(n int) string { + result := make([]byte, n) + for i := range result { + result[i] = charSet[rnd.Intn(len(charSet))] + } + return string(result) +} + +func client(g Gomega) *clientv3.Client { + c, err := clientv3.New(clientv3.Config{ + Endpoints: []string{"localhost:2379"}, + Logger: zap.NewNop(), + }) + g.Expect(err).NotTo(HaveOccurred()) + return c +} diff --git a/pkg/etcd/etcd_suite_test.go b/pkg/etcd/etcd_suite_test.go new file mode 100644 index 00000000..0842d845 --- /dev/null +++ b/pkg/etcd/etcd_suite_test.go @@ -0,0 +1,153 @@ +//go:build integration +// +build integration + +package etcd_test + +import ( + "context" + "fmt" + "log" + "os" + "os/exec" + "path/filepath" + "strconv" + "testing" + "time" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const ( + etcdVersion = "v3.5.9" + etcdBinDir = "bin" + etcdBinPath = etcdBinDir + "/etcd" + downloadURL = "https://storage.googleapis.com/etcd" + tmpDownloadFile = "etcd.tar.gz" + pidFile = "etcd.pid" +) + +func TestMain(m *testing.M) { + logrus.SetLevel(logrus.DebugLevel) + ctx := context.Background() + expectSuccess(startEtcd(ctx), "starting etcd") + + os.Exit(runTestsWithCleanup(m, func() { + expectSuccess(stopEtcd(), "stopping etcd") + })) +} + +func runTestsWithCleanup(m *testing.M, cleanup func()) int { + defer cleanup() + return m.Run() +} + +func expectSuccess(err error, msg string) { + if err != nil { + log.Fatalf("%s: %s\n", msg, err) + } +} + +func startEtcd(ctx context.Context) error { + if _, err := os.Stat(pidFile); err == nil { + log.Println("Etcd already running, reusing") + return nil + } + + etcdPath, err := installEtcd(ctx) + if err != nil { + errors.Wrap(err, "installing etcd for tests") + } + + etcdCmd := exec.Command(etcdPath, "--data-dir", "./etcd-data") + if os.Getenv("ETCD_SERVER_LOGS") == "true" { + log.Println("Enabling etcd server logs") + etcdCmd.Stdout = os.Stdout + etcdCmd.Stderr = os.Stderr + } + log.Println("Starting etcd") + if err := etcdCmd.Start(); err != nil { + errors.Wrap(err, "starting etcd for tests") + } + + if err := os.WriteFile(pidFile, []byte(strconv.Itoa(etcdCmd.Process.Pid)), 0o600); err != nil { + return err + } + + log.Println("Waiting for etcd to be up") + time.Sleep(time.Second) + + return nil +} + +func installEtcd(ctx context.Context) (string, error) { + projectRoot, err := filepath.Abs("../../") + if err != nil { + return "", err + } + binDir := filepath.Join(projectRoot, etcdBinDir) + etcdPath := filepath.Join(projectRoot, etcdBinPath) + + if _, err := os.Stat(etcdPath); err == nil { + log.Println("Etcd already installed, skipping") + return etcdPath, nil + } + + if err := os.MkdirAll(binDir, 0o755); err != nil { + return "", err + } + + download := fmt.Sprintf("%s/%s/etcd-%s-linux-amd64.tar.gz", downloadURL, etcdVersion, etcdVersion) + + // Hacky to run bash, but simplifies this code a lot + cmd := fmt.Sprintf("curl -sL %s | tar -xzvf - -C %s --strip-components=1", download, binDir) + out, err := exec.CommandContext(ctx, "bash", "-c", cmd).CombinedOutput() + if err != nil { + return "", errors.Wrapf(err, "downloading etcd: %s", string(out)) + } + + return etcdPath, nil +} + +func stopEtcd() error { + if os.Getenv("REUSE_ETCD") == "true" { + log.Println("REUSE_ETCD=true, leaving etcd running") + return nil + } + + if _, err := os.Stat(pidFile); os.IsNotExist(err) { + log.Println("Etcd pid file doesn't exit, skipping cleanup") + return nil + } + + dat, err := os.ReadFile(pidFile) + if err != nil { + return err + } + pid, err := strconv.Atoi(string(dat)) + if err != nil { + return err + } + + etcdProcess, err := os.FindProcess(pid) + if err != nil { + return err + } + + log.Println("Stopping etcd") + if err := etcdProcess.Kill(); err != nil { + return errors.Wrap(err, "Failed stopping etcd") + } + + log.Println("Deleting etcd data") + if err := os.RemoveAll("./etcd-data"); err != nil { + return errors.Wrap(err, "deleting etcd data") + } + + log.Println("Deleting etcd pid file") + if err := os.RemoveAll(pidFile); err != nil { + return errors.Wrap(err, "deleting pid file") + } + + return nil +} diff --git a/pkg/iptables/iptables.go b/pkg/iptables/iptables.go new file mode 100644 index 00000000..adb1ca05 --- /dev/null +++ b/pkg/iptables/iptables.go @@ -0,0 +1,737 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package iptables + +import ( + "bytes" + "fmt" + "io" + "net" + "os/exec" + "regexp" + "strconv" + "strings" + "syscall" +) + +// Adds the output of stderr to exec.ExitError +type Error struct { + exec.ExitError + cmd exec.Cmd + msg string + exitStatus *int //for overriding +} + +func (e *Error) ExitStatus() int { + if e.exitStatus != nil { + return *e.exitStatus + } + return e.Sys().(syscall.WaitStatus).ExitStatus() +} + +func (e *Error) Error() string { + return fmt.Sprintf("running %v: exit status %v: %v", e.cmd.Args, e.ExitStatus(), e.msg) +} + +// IsNotExist returns true if the error is due to the chain or rule not existing +func (e *Error) IsNotExist() bool { + if e.ExitStatus() != 1 { + return false + } + msgNoRuleExist := "Bad rule (does a matching rule exist in that chain?).\n" + msgNoChainExist := "No chain/target/match by that name.\n" + return strings.Contains(e.msg, msgNoRuleExist) || strings.Contains(e.msg, msgNoChainExist) +} + +// Protocol to differentiate between IPv4 and IPv6 +type Protocol byte + +const ( + ProtocolIPv4 Protocol = iota + ProtocolIPv6 +) + +const ( + TableFilter = "filter" + ChainInput = "INPUT" +) + +type IPTables struct { + path string + proto Protocol + hasCheck bool + hasWait bool + waitSupportSecond bool + hasRandomFully bool + v1 int + v2 int + v3 int + mode string // the underlying iptables operating mode, e.g. nf_tables + timeout int // time to wait for the iptables lock, default waits forever + + nftables bool +} + +// Stat represents a structured statistic entry. +type Stat struct { + Packets uint64 `json:"pkts"` + Bytes uint64 `json:"bytes"` + Target string `json:"target"` + Protocol string `json:"prot"` + Opt string `json:"opt"` + Input string `json:"in"` + Output string `json:"out"` + Source *net.IPNet `json:"source"` + Destination *net.IPNet `json:"destination"` + Options string `json:"options"` +} + +type Option func(*IPTables) + +func IPFamily(proto Protocol) Option { + return func(ipt *IPTables) { + ipt.proto = proto + } +} + +func Timeout(timeout int) Option { + return func(ipt *IPTables) { + ipt.timeout = timeout + } +} + +func EnableNFTables(enable bool) Option { + return func(ipt *IPTables) { + ipt.nftables = enable + } +} + +// New creates a new IPTables configured with the options passed as parameter. +// For backwards compatibility, by default always uses IPv4 and timeout 0. +// i.e. you can create an IPv6 IPTables using a timeout of 5 seconds passing +// the IPFamily and Timeout options as follow: +// +// ip6t := New(IPFamily(ProtocolIPv6), Timeout(5)) +func New(opts ...Option) (*IPTables, error) { + + ipt := &IPTables{ + proto: ProtocolIPv4, + timeout: 0, + } + + for _, opt := range opts { + opt(ipt) + } + + path, err := exec.LookPath(getIptablesCommand(ipt.proto, ipt.nftables)) + if err != nil { + return nil, err + } + ipt.path = path + + vstring, err := getIptablesVersionString(path) + if err != nil { + return nil, fmt.Errorf("could not get iptables version: %v", err) + } + v1, v2, v3, mode, err := extractIptablesVersion(vstring) + if err != nil { + return nil, fmt.Errorf("failed to extract iptables version from [%s]: %v", vstring, err) + } + ipt.v1 = v1 + ipt.v2 = v2 + ipt.v3 = v3 + ipt.mode = mode + + checkPresent, waitPresent, waitSupportSecond, randomFullyPresent := getIptablesCommandSupport(v1, v2, v3) + ipt.hasCheck = checkPresent + ipt.hasWait = waitPresent + ipt.waitSupportSecond = waitSupportSecond + ipt.hasRandomFully = randomFullyPresent + + return ipt, nil +} + +// New creates a new IPTables for the given proto. +// The proto will determine which command is used, either "iptables" or "ip6tables". +func NewWithProtocol(proto Protocol) (*IPTables, error) { + return New(IPFamily(proto), Timeout(0)) +} + +// Proto returns the protocol used by this IPTables. +func (ipt *IPTables) Proto() Protocol { + return ipt.proto +} + +// Exists checks if given rulespec in specified table/chain exists +func (ipt *IPTables) Exists(table, chain string, rulespec ...string) (bool, error) { + if !ipt.hasCheck { + return ipt.existsForOldIptables(table, chain, rulespec) + + } + cmd := append([]string{"-t", table, "-C", chain}, rulespec...) + err := ipt.run(cmd...) + eerr, eok := err.(*Error) + switch { + case err == nil: + return true, nil + case eok && eerr.ExitStatus() == 1: + return false, nil + default: + return false, err + } +} + +// Insert inserts rulespec to specified table/chain (in specified pos) +func (ipt *IPTables) Insert(table, chain string, pos int, rulespec ...string) error { + cmd := append([]string{"-t", table, "-I", chain, strconv.Itoa(pos)}, rulespec...) + return ipt.run(cmd...) +} + +// InsertUnique acts like Insert except that it won't insert a duplicate (no matter the position in the chain) +func (ipt *IPTables) InsertUnique(table, chain string, pos int, rulespec ...string) error { + exists, err := ipt.Exists(table, chain, rulespec...) + if err != nil { + return err + } + + if !exists { + return ipt.Insert(table, chain, pos, rulespec...) + } + + return nil +} + +// Append appends rulespec to specified table/chain +func (ipt *IPTables) Append(table, chain string, rulespec ...string) error { + cmd := append([]string{"-t", table, "-A", chain}, rulespec...) + return ipt.run(cmd...) +} + +// AppendUnique acts like Append except that it won't add a duplicate +func (ipt *IPTables) AppendUnique(table, chain string, rulespec ...string) error { + exists, err := ipt.Exists(table, chain, rulespec...) + if err != nil { + return err + } + + if !exists { + return ipt.Append(table, chain, rulespec...) + } + + return nil +} + +// Delete removes rulespec in specified table/chain +func (ipt *IPTables) Delete(table, chain string, rulespec ...string) error { + cmd := append([]string{"-t", table, "-D", chain}, rulespec...) + return ipt.run(cmd...) +} + +func (ipt *IPTables) DeleteIfExists(table, chain string, rulespec ...string) error { + exists, err := ipt.Exists(table, chain, rulespec...) + if err == nil && exists { + err = ipt.Delete(table, chain, rulespec...) + } + return err +} + +// List rules in specified table/chain +func (ipt *IPTables) ListByID(table, chain string, id int) (string, error) { + args := []string{"-t", table, "-S", chain, strconv.Itoa(id)} + rule, err := ipt.executeList(args) + if err != nil { + return "", err + } + return rule[0], nil +} + +// List rules in specified table/chain +func (ipt *IPTables) List(table, chain string) ([]string, error) { + args := []string{"-t", table, "-S", chain} + return ipt.executeList(args) +} + +// List rules (with counters) in specified table/chain +func (ipt *IPTables) ListWithCounters(table, chain string) ([]string, error) { + args := []string{"-t", table, "-v", "-S", chain} + return ipt.executeList(args) +} + +// ListChains returns a slice containing the name of each chain in the specified table. +func (ipt *IPTables) ListChains(table string) ([]string, error) { + args := []string{"-t", table, "-S"} + + result, err := ipt.executeList(args) + if err != nil { + return nil, err + } + + // Iterate over rules to find all default (-P) and user-specified (-N) chains. + // Chains definition always come before rules. + // Format is the following: + // -P OUTPUT ACCEPT + // -N Custom + var chains []string + for _, val := range result { + if strings.HasPrefix(val, "-P") || strings.HasPrefix(val, "-N") { + chains = append(chains, strings.Fields(val)[1]) + } else { + break + } + } + return chains, nil +} + +// '-S' is fine with non existing rule index as long as the chain exists +// therefore pass index 1 to reduce overhead for large chains +func (ipt *IPTables) ChainExists(table, chain string) (bool, error) { + err := ipt.run("-t", table, "-S", chain, "1") + eerr, eok := err.(*Error) + switch { + case err == nil: + return true, nil + case eok && eerr.ExitStatus() == 1: + return false, nil + default: + return false, err + } +} + +// Stats lists rules including the byte and packet counts +func (ipt *IPTables) Stats(table, chain string) ([][]string, error) { + args := []string{"-t", table, "-L", chain, "-n", "-v", "-x"} + lines, err := ipt.executeList(args) + if err != nil { + return nil, err + } + + appendSubnet := func(addr string) string { + if strings.IndexByte(addr, byte('/')) < 0 { + if strings.IndexByte(addr, '.') < 0 { + return addr + "/128" + } + return addr + "/32" + } + return addr + } + + ipv6 := ipt.proto == ProtocolIPv6 + + rows := [][]string{} + for i, line := range lines { + // Skip over chain name and field header + if i < 2 { + continue + } + + // Fields: + // 0=pkts 1=bytes 2=target 3=prot 4=opt 5=in 6=out 7=source 8=destination 9=options + line = strings.TrimSpace(line) + fields := strings.Fields(line) + + // The ip6tables verbose output cannot be naively split due to the default "opt" + // field containing 2 single spaces. + if ipv6 { + // Check if field 6 is "opt" or "source" address + dest := fields[6] + ip, _, _ := net.ParseCIDR(dest) + if ip == nil { + ip = net.ParseIP(dest) + } + + // If we detected a CIDR or IP, the "opt" field is empty.. insert it. + if ip != nil { + f := []string{} + f = append(f, fields[:4]...) + f = append(f, " ") // Empty "opt" field for ip6tables + f = append(f, fields[4:]...) + fields = f + } + } + + // Adjust "source" and "destination" to include netmask, to match regular + // List output + fields[7] = appendSubnet(fields[7]) + fields[8] = appendSubnet(fields[8]) + + // Combine "options" fields 9... into a single space-delimited field. + options := fields[9:] + fields = fields[:9] + fields = append(fields, strings.Join(options, " ")) + rows = append(rows, fields) + } + return rows, nil +} + +// ParseStat parses a single statistic row into a Stat struct. The input should +// be a string slice that is returned from calling the Stat method. +func (ipt *IPTables) ParseStat(stat []string) (parsed Stat, err error) { + // For forward-compatibility, expect at least 10 fields in the stat + if len(stat) < 10 { + return parsed, fmt.Errorf("stat contained fewer fields than expected") + } + + // Convert the fields that are not plain strings + parsed.Packets, err = strconv.ParseUint(stat[0], 0, 64) + if err != nil { + return parsed, fmt.Errorf(err.Error(), "could not parse packets") + } + parsed.Bytes, err = strconv.ParseUint(stat[1], 0, 64) + if err != nil { + return parsed, fmt.Errorf(err.Error(), "could not parse bytes") + } + _, parsed.Source, err = net.ParseCIDR(stat[7]) + if err != nil { + return parsed, fmt.Errorf(err.Error(), "could not parse source") + } + _, parsed.Destination, err = net.ParseCIDR(stat[8]) + if err != nil { + return parsed, fmt.Errorf(err.Error(), "could not parse destination") + } + + // Put the fields that are strings + parsed.Target = stat[2] + parsed.Protocol = stat[3] + parsed.Opt = stat[4] + parsed.Input = stat[5] + parsed.Output = stat[6] + parsed.Options = stat[9] + + return parsed, nil +} + +// StructuredStats returns statistics as structured data which may be further +// parsed and marshaled. +func (ipt *IPTables) StructuredStats(table, chain string) ([]Stat, error) { + rawStats, err := ipt.Stats(table, chain) + if err != nil { + return nil, err + } + + structStats := []Stat{} + for _, rawStat := range rawStats { + stat, err := ipt.ParseStat(rawStat) + if err != nil { + return nil, err + } + structStats = append(structStats, stat) + } + + return structStats, nil +} + +func (ipt *IPTables) executeList(args []string) ([]string, error) { + var stdout bytes.Buffer + if err := ipt.runWithOutput(args, &stdout); err != nil { + return nil, err + } + + rules := strings.Split(stdout.String(), "\n") + + // strip trailing newline + if len(rules) > 0 && rules[len(rules)-1] == "" { + rules = rules[:len(rules)-1] + } + + for i, rule := range rules { + rules[i] = filterRuleOutput(rule) + } + + return rules, nil +} + +// NewChain creates a new chain in the specified table. +// If the chain already exists, it will result in an error. +func (ipt *IPTables) NewChain(table, chain string) error { + return ipt.run("-t", table, "-N", chain) +} + +const existsErr = 1 + +// ClearChain flushed (deletes all rules) in the specified table/chain. +// If the chain does not exist, a new one will be created +func (ipt *IPTables) ClearChain(table, chain string) error { + err := ipt.NewChain(table, chain) + + eerr, eok := err.(*Error) + switch { + case err == nil: + return nil + case eok && eerr.ExitStatus() == existsErr: + // chain already exists. Flush (clear) it. + return ipt.run("-t", table, "-F", chain) + default: + return err + } +} + +// RenameChain renames the old chain to the new one. +func (ipt *IPTables) RenameChain(table, oldChain, newChain string) error { + return ipt.run("-t", table, "-E", oldChain, newChain) +} + +// DeleteChain deletes the chain in the specified table. +// The chain must be empty +func (ipt *IPTables) DeleteChain(table, chain string) error { + return ipt.run("-t", table, "-X", chain) +} + +func (ipt *IPTables) ClearAndDeleteChain(table, chain string) error { + exists, err := ipt.ChainExists(table, chain) + if err != nil || !exists { + return err + } + err = ipt.run("-t", table, "-F", chain) + if err == nil { + err = ipt.run("-t", table, "-X", chain) + } + return err +} + +func (ipt *IPTables) ClearAll() error { + return ipt.run("-F") +} + +func (ipt *IPTables) DeleteAll() error { + return ipt.run("-X") +} + +// ChangePolicy changes policy on chain to target +func (ipt *IPTables) ChangePolicy(table, chain, target string) error { + return ipt.run("-t", table, "-P", chain, target) +} + +// Check if the underlying iptables command supports the --random-fully flag +func (ipt *IPTables) HasRandomFully() bool { + return ipt.hasRandomFully +} + +// Return version components of the underlying iptables command +func (ipt *IPTables) GetIptablesVersion() (int, int, int) { + return ipt.v1, ipt.v2, ipt.v3 +} + +// run runs an iptables command with the given arguments, ignoring +// any stdout output +func (ipt *IPTables) run(args ...string) error { + return ipt.runWithOutput(args, nil) +} + +// runWithOutput runs an iptables command with the given arguments, +// writing any stdout output to the given writer +func (ipt *IPTables) runWithOutput(args []string, stdout io.Writer) error { + args = append([]string{ipt.path}, args...) + if ipt.hasWait { + args = append(args, "--wait") + if ipt.timeout != 0 && ipt.waitSupportSecond { + args = append(args, strconv.Itoa(ipt.timeout)) + } + } else { + fmu, err := newXtablesFileLock() + if err != nil { + return err + } + ul, err := fmu.tryLock() + if err != nil { + syscall.Close(fmu.fd) + return err + } + defer func() { + _ = ul.Unlock() + }() + } + + var stderr bytes.Buffer + cmd := exec.Cmd{ + Path: ipt.path, + Args: args, + Stdout: stdout, + Stderr: &stderr, + } + + if err := cmd.Run(); err != nil { + switch e := err.(type) { + case *exec.ExitError: + return &Error{*e, cmd, stderr.String(), nil} + default: + return err + } + } + + return nil +} + +// getIptablesCommand returns the correct command for the given protocol, either "iptables" or "ip6tables". +func getIptablesCommand(proto Protocol, nftables bool) string { + if proto == ProtocolIPv6 { + if nftables { + return "ip6tables-nft" + } + return "ip6tables-legacy" + } + + if nftables { + return "iptables-nft" + } + return "iptables-legacy" +} + +// Checks if iptables has the "-C" and "--wait" flag +func getIptablesCommandSupport(v1 int, v2 int, v3 int) (bool, bool, bool, bool) { + return iptablesHasCheckCommand(v1, v2, v3), iptablesHasWaitCommand(v1, v2, v3), iptablesWaitSupportSecond(v1, v2), iptablesHasRandomFully(v1, v2, v3) +} + +// getIptablesVersion returns the first three components of the iptables version +// and the operating mode (e.g. nf_tables or legacy) +// e.g. "iptables v1.3.66" would return (1, 3, 66, legacy, nil) +func extractIptablesVersion(str string) (int, int, int, string, error) { + versionMatcher := regexp.MustCompile(`v([0-9]+)\.([0-9]+)\.([0-9]+)(?:\s+\((\w+))?`) + result := versionMatcher.FindStringSubmatch(str) + if result == nil { + return 0, 0, 0, "", fmt.Errorf("no iptables version found in string: %s", str) + } + + v1, err := strconv.Atoi(result[1]) + if err != nil { + return 0, 0, 0, "", err + } + + v2, err := strconv.Atoi(result[2]) + if err != nil { + return 0, 0, 0, "", err + } + + v3, err := strconv.Atoi(result[3]) + if err != nil { + return 0, 0, 0, "", err + } + + mode := "legacy" + if result[4] != "" { + mode = result[4] + } + return v1, v2, v3, mode, nil +} + +// Runs "iptables --version" to get the version string +func getIptablesVersionString(path string) (string, error) { + cmd := exec.Command(path, "--version") + var out bytes.Buffer + cmd.Stdout = &out + err := cmd.Run() + if err != nil { + return "", err + } + return out.String(), nil +} + +// Checks if an iptables version is after 1.4.11, when --check was added +func iptablesHasCheckCommand(v1 int, v2 int, v3 int) bool { + if v1 > 1 { + return true + } + if v1 == 1 && v2 > 4 { + return true + } + if v1 == 1 && v2 == 4 && v3 >= 11 { + return true + } + return false +} + +// Checks if an iptables version is after 1.4.20, when --wait was added +func iptablesHasWaitCommand(v1 int, v2 int, v3 int) bool { //nolint + if v1 > 1 { + return true + } + if v1 == 1 && v2 > 4 { + return true + } + if v1 == 1 && v2 == 4 && v3 >= 20 { + return true + } + return false +} + +// Checks if an iptablse version is after 1.6.0, when --wait support second +func iptablesWaitSupportSecond(v1 int, v2 int) bool { + if v1 > 1 { + return true + } + if v1 == 1 && v2 >= 6 { + return true + } + return false +} + +// Checks if an iptables version is after 1.6.2, when --random-fully was added +func iptablesHasRandomFully(v1 int, v2 int, v3 int) bool { + if v1 > 1 { + return true + } + if v1 == 1 && v2 > 6 { + return true + } + if v1 == 1 && v2 == 6 && v3 >= 2 { + return true + } + return false +} + +// Checks if a rule specification exists for a table +func (ipt *IPTables) existsForOldIptables(table, chain string, rulespec []string) (bool, error) { + rs := strings.Join(append([]string{"-A", chain}, rulespec...), " ") + args := []string{"-t", table, "-S"} + var stdout bytes.Buffer + err := ipt.runWithOutput(args, &stdout) + if err != nil { + return false, err + } + return strings.Contains(stdout.String(), rs), nil +} + +// counterRegex is the regex used to detect nftables counter format +var counterRegex = regexp.MustCompile(`^\[([0-9]+):([0-9]+)\] `) + +// filterRuleOutput works around some inconsistencies in output. +// For example, when iptables is in legacy vs. nftables mode, it produces +// different results. +func filterRuleOutput(rule string) string { + out := rule + + // work around an output difference in nftables mode where counters + // are output in iptables-save format, rather than iptables -S format + // The string begins with "[0:0]" + // + // Fixes #49 + if groups := counterRegex.FindStringSubmatch(out); groups != nil { + // drop the brackets + out = out[len(groups[0]):] + out = fmt.Sprintf("%s -c %s %s", out, groups[1], groups[2]) + } + + return out +} + +func GetIPTablesRuleSpecification(rule, specification string) string { + parts := strings.Split(rule, " ") + for i, part := range parts { + if part == specification && i+1 < len(parts) { + return parts[i+1] + } + } + + return "" +} diff --git a/pkg/iptables/lock.go b/pkg/iptables/lock.go new file mode 100644 index 00000000..11c08aac --- /dev/null +++ b/pkg/iptables/lock.go @@ -0,0 +1,84 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package iptables + +import ( + "os" + "sync" + "syscall" +) + +const ( + // In earlier versions of iptables, the xtables lock was implemented + // via a Unix socket, but now flock is used via this lockfile: + // http://git.netfilter.org/iptables/commit/?id=aa562a660d1555b13cffbac1e744033e91f82707 + // Note the LSB-conforming "/run" directory does not exist on old + // distributions, so assume "/var" is symlinked + xtablesLockFilePath = "/var/run/xtables.lock" + + defaultFilePerm = 0600 +) + +type Unlocker interface { + Unlock() error +} + +type nopUnlocker struct{} + +func (n nopUnlocker) Unlock() error { return nil } + +type fileLock struct { + // mu is used to protect against concurrent invocations from within this process + mu sync.Mutex + fd int +} + +// tryLock takes an exclusive lock on the xtables lock file without blocking. +// This is best-effort only: if the exclusive lock would block (i.e. because +// another process already holds it), no error is returned. Otherwise, any +// error encountered during the locking operation is returned. +// The returned Unlocker should be used to release the lock when the caller is +// done invoking iptables commands. +func (l *fileLock) tryLock() (Unlocker, error) { + l.mu.Lock() + err := syscall.Flock(l.fd, syscall.LOCK_EX|syscall.LOCK_NB) + switch err { + case syscall.EWOULDBLOCK: + l.mu.Unlock() + return nopUnlocker{}, nil + case nil: + return l, nil + default: + l.mu.Unlock() + return nil, err + } +} + +// Unlock closes the underlying file, which implicitly unlocks it as well. It +// also unlocks the associated mutex. +func (l *fileLock) Unlock() error { + defer l.mu.Unlock() + return syscall.Close(l.fd) +} + +// newXtablesFileLock opens a new lock on the xtables lockfile without +// acquiring the lock +func newXtablesFileLock() (*fileLock, error) { + fd, err := syscall.Open(xtablesLockFilePath, os.O_CREATE, defaultFilePerm) + if err != nil { + return nil, err + } + return &fileLock{fd: fd}, nil +} diff --git a/pkg/k8s/client.go b/pkg/k8s/client.go index 9be3848f..2414ef1d 100644 --- a/pkg/k8s/client.go +++ b/pkg/k8s/client.go @@ -31,8 +31,19 @@ func NewClientset(configPath string, inCluster bool, hostname string) (*kubernet } func restConfig(kubeconfig string, inCluster bool) (*rest.Config, error) { + cfg, err := rest.InClusterConfig() + if kubeconfig != "" && !inCluster { - return clientcmd.BuildConfigFromFlags("", kubeconfig) + cfg, err = clientcmd.BuildConfigFromFlags("", kubeconfig) } - return rest.InClusterConfig() + + if err != nil { + return nil, err + } + + // Override some of the defaults allowing a little bit more flexibility speaking with the API server + // these should hopefully be redundant, however issues will still be logged. + cfg.QPS = 100 + cfg.Burst = 250 + return cfg, nil } diff --git a/pkg/kubevip/config_endpoints.go b/pkg/kubevip/config_endpoints.go deleted file mode 100644 index c9717319..00000000 --- a/pkg/kubevip/config_endpoints.go +++ /dev/null @@ -1,71 +0,0 @@ -package kubevip - -import ( - "fmt" - "net/url" - "strconv" - - log "github.com/sirupsen/logrus" -) - -func init() { - // Start the index negative as it will be incrememnted of first approach - endPointIndex = -1 -} - -// ValidateBackEndURLS will run through the endpoints and ensure that they're a valid URL -func ValidateBackEndURLS(endpoints *[]BackEnd) error { - - for i := range *endpoints { - log.Debugf("Parsing [%s]", (*endpoints)[i].RawURL) - u, err := url.Parse((*endpoints)[i].RawURL) - if err != nil { - return err - } - - // No error is returned if the prefix/schema is missing - // If the Host is empty then we were unable to parse correctly (could be prefix is missing) - if u.Host == "" { - return fmt.Errorf("Unable to parse [%s], ensure it's prefixed with http(s)://", (*endpoints)[i].RawURL) - } - (*endpoints)[i].Address = u.Hostname() - // if a port is specified then update the internal endpoint stuct, if not rely on the schema - if u.Port() != "" { - portNum, err := strconv.Atoi(u.Port()) - if err != nil { - return err - } - (*endpoints)[i].Port = portNum - } - (*endpoints)[i].ParsedURL = u - } - return nil -} - -// ReturnEndpointAddr - returns an endpoint -func (lb LoadBalancer) ReturnEndpointAddr() (string, error) { - if len(lb.Backends) == 0 { - return "", fmt.Errorf("No Backends configured") - } - if endPointIndex < len(lb.Backends)-1 { - endPointIndex++ - } else { - // reset the index to the beginning - endPointIndex = 0 - } - // TODO - weighting, decision algorythmn - return fmt.Sprintf("%s:%d", lb.Backends[endPointIndex].Address, lb.Backends[endPointIndex].Port), nil -} - -// ReturnEndpointURL - returns an endpoint -func (lb LoadBalancer) ReturnEndpointURL() *url.URL { - - if endPointIndex != len(lb.Backends)-1 { - endPointIndex++ - } else { - // reset the index to the beginning - endPointIndex = 0 - } - // TODO - weighting, decision algorythmn - return lb.Backends[endPointIndex].ParsedURL -} diff --git a/pkg/kubevip/config_environment.go b/pkg/kubevip/config_environment.go new file mode 100644 index 00000000..fd2c48a6 --- /dev/null +++ b/pkg/kubevip/config_environment.go @@ -0,0 +1,495 @@ +package kubevip + +import ( + "encoding/json" + "os" + "strconv" + + "github.com/kube-vip/kube-vip/pkg/bgp" + "github.com/kube-vip/kube-vip/pkg/detector" +) + +// ParseEnvironment - will popultate the configuration from environment variables +func ParseEnvironment(c *Config) error { + if c == nil { + return nil + } + // Ensure that logging is set through the environment variables + env := os.Getenv(vipLogLevel) + // Set default value + if env == "" { + env = "4" + } + + if env != "" { + logLevel, err := strconv.ParseUint(env, 10, 32) + if err != nil { + panic("Unable to parse environment variable [vip_loglevel], should be int") + } + c.Logging = int(logLevel) + } + + // Find interface + env = os.Getenv(vipInterface) + if env != "" { + c.Interface = env + } + + // Find (services) interface + env = os.Getenv(vipServicesInterface) + if env != "" { + c.ServicesInterface = env + } + + // Find provider configuration + env = os.Getenv(providerConfig) + if env != "" { + c.ProviderConfig = env + } + + // Find Kubernetes Leader Election configuration + env = os.Getenv(vipLeaderElection) + if env != "" { + b, err := strconv.ParseBool(env) + if err != nil { + return err + } + c.EnableLeaderElection = b + } + + // Attempt to find the Lease name from the environment variables + env = os.Getenv(vipLeaseName) + if env != "" { + c.LeaseName = env + } + + // Attempt to find the Lease configuration from the environment variables + env = os.Getenv(vipLeaseDuration) + if env != "" { + i, err := strconv.ParseInt(env, 10, 32) + if err != nil { + return err + } + c.LeaseDuration = int(i) + } + + env = os.Getenv(vipRenewDeadline) + if env != "" { + i, err := strconv.ParseInt(env, 10, 32) + if err != nil { + return err + } + c.RenewDeadline = int(i) + } + + env = os.Getenv(vipRetryPeriod) + if env != "" { + i, err := strconv.ParseInt(env, 10, 32) + if err != nil { + return err + } + c.RetryPeriod = int(i) + } + + // Attempt to find the Lease annotations from the environment variables + env = os.Getenv(vipLeaseAnnotations) + if env != "" { + err := json.Unmarshal([]byte(env), &c.LeaseAnnotations) + if err != nil { + return err + } + } + + // Find vip address + env = os.Getenv(vipAddress) + if env != "" { + // TODO - parse address net.Host() + c.VIP = env + // } else { + // c.VIP = os.Getenv(address) + } + + // Find address + env = os.Getenv(address) + if env != "" { + // TODO - parse address net.Host() + c.Address = env + } + + // Find vip port + env = os.Getenv(port) + if env != "" { + i, err := strconv.ParseInt(env, 10, 32) + if err != nil { + return err + } + c.Port = int(i) + } + + // Find vipDdns + env = os.Getenv(vipDdns) + if env != "" { + b, err := strconv.ParseBool(env) + if err != nil { + return err + } + c.DDNS = b + } + + // Find the namespace that the control plane should use (for leaderElection lock) + env = os.Getenv(cpNamespace) + if env != "" { + c.Namespace = env + } + + // Find controlplane toggle + env = os.Getenv(cpEnable) + if env != "" { + b, err := strconv.ParseBool(env) + if err != nil { + return err + } + c.EnableControlPlane = b + } + + // Find Services toggle + env = os.Getenv(svcEnable) + if env != "" { + b, err := strconv.ParseBool(env) + if err != nil { + return err + } + c.EnableServices = b + + // Find Services leader Election + env = os.Getenv(svcElection) + if env != "" { + b, err := strconv.ParseBool(env) + if err != nil { + return err + } + c.EnableServicesElection = b + } + + // Find load-balancer class only + env = os.Getenv(lbClassOnly) + if env != "" { + b, err := strconv.ParseBool(env) + if err != nil { + return err + } + c.LoadBalancerClassOnly = b + } + + // Load-balancer class name + env = os.Getenv(lbClassName) + if env != "" { + c.LoadBalancerClassName = env + } + + // Find the namespace that the control plane should use (for leaderElection lock) + env = os.Getenv(svcNamespace) + if env != "" { + c.ServiceNamespace = env + } + + // Gets the leaseName for services in arp mode + env = os.Getenv(svcLeaseName) + if env != "" { + c.ServicesLeaseName = env + } + } + + // Find vip address cidr range + env = os.Getenv(vipCidr) + if env != "" { + c.VIPCIDR = env + } + + // Find vip address subnet + env = os.Getenv(vipSubnet) + if env != "" { + c.VIPSubnet = env + } + + // Find Single Node + env = os.Getenv(vipSingleNode) + if env != "" { + b, err := strconv.ParseBool(env) + if err != nil { + return err + } + c.SingleNode = b + } + + // Find annotation configuration + env = os.Getenv(annotations) + if env != "" { + c.Annotations = env + } + + // Find Start As Leader + // TODO - does this need deprecating? + // Required when the host sets itself as leader before the state change + env = os.Getenv(vipStartLeader) + if env != "" { + b, err := strconv.ParseBool(env) + if err != nil { + return err + } + c.StartAsLeader = b + } + + // Find if ARP is enabled + env = os.Getenv(vipArp) + if env != "" { + b, err := strconv.ParseBool(env) + if err != nil { + return err + } + c.EnableARP = b + } + + // Find if ARP is enabled + env = os.Getenv(vipArpRate) + if env != "" { + i64, err := strconv.ParseInt(env, 10, 32) + if err != nil { + return err + } + c.ArpBroadcastRate = i64 + } else { + // default to three seconds + c.ArpBroadcastRate = 3000 + } + + // Wireguard Mode + env = os.Getenv(vipWireguard) + if env != "" { + b, err := strconv.ParseBool(env) + if err != nil { + return err + } + c.EnableWireguard = b + } + + // Routing Table Mode + env = os.Getenv(vipRoutingTable) + if env != "" { + b, err := strconv.ParseBool(env) + if err != nil { + return err + } + c.EnableRoutingTable = b + } + + // Routing Table ID + env = os.Getenv(vipRoutingTableID) + if env != "" { + i, err := strconv.ParseInt(env, 10, 32) + if err != nil { + return err + } + c.RoutingTableID = int(i) + } + + // Routing Table Type + env = os.Getenv(vipRoutingTableType) + if env != "" { + i, err := strconv.ParseInt(env, 10, 32) + if err != nil { + return err + } + c.RoutingTableType = int(i) + } + + // BGP Server options + env = os.Getenv(bgpEnable) + if env != "" { + b, err := strconv.ParseBool(env) + if err != nil { + return err + } + c.EnableBGP = b + } + + // BGP Router interface determines an interface that we can use to find an address for + env = os.Getenv(bgpRouterInterface) + if env != "" { + _, address, err := detector.FindIPAddress(env) + if err != nil { + return err + } + c.BGPConfig.RouterID = address + } + + // RouterID + env = os.Getenv(bgpRouterID) + if env != "" { + c.BGPConfig.RouterID = env + } + + // AS + env = os.Getenv(bgpRouterAS) + if env != "" { + u64, err := strconv.ParseUint(env, 10, 32) + if err != nil { + return err + } + c.BGPConfig.AS = uint32(u64) + } + + // Peer AS + env = os.Getenv(bgpPeerAS) + if env != "" { + u64, err := strconv.ParseUint(env, 10, 32) + if err != nil { + return err + } + c.BGPPeerConfig.AS = uint32(u64) + } + + // Peer AS + env = os.Getenv(bgpPeers) + if env != "" { + peers, err := bgp.ParseBGPPeerConfig(env) + if err != nil { + return err + } + c.BGPConfig.Peers = peers + } + + // BGP Peer mutlihop + env = os.Getenv(bgpMultiHop) + if env != "" { + b, err := strconv.ParseBool(env) + if err != nil { + return err + } + c.BGPPeerConfig.MultiHop = b + } + + // BGP Peer password + env = os.Getenv(bgpPeerPassword) + if env != "" { + c.BGPPeerConfig.Password = env + } + + // BGP Source Interface + env = os.Getenv(bgpSourceIF) + if env != "" { + c.BGPConfig.SourceIF = env + } + + // BGP Source Address + env = os.Getenv(bgpSourceIP) + if env != "" { + c.BGPConfig.SourceIP = env + } + + // BGP Peer options, add them if relevant + env = os.Getenv(bgpPeerAddress) + if env != "" { + c.BGPPeerConfig.Address = env + // If we've added in a peer configuration, then we should add it to the BGP configuration + c.BGPConfig.Peers = append(c.BGPConfig.Peers, c.BGPPeerConfig) + } + + // Enable the Equinix Metal API calls + env = os.Getenv(vipPacket) + if env != "" { + b, err := strconv.ParseBool(env) + if err != nil { + return err + } + c.EnableMetal = b + } + + // Find the Equinix Metal project name + env = os.Getenv(vipPacketProject) + if env != "" { + // TODO - parse address net.Host() + c.MetalProject = env + } + + // Find the Equinix Metal project ID + env = os.Getenv(vipPacketProjectID) + if env != "" { + // TODO - parse address net.Host() + c.MetalProjectID = env + } + + // Enable the load-balancer + env = os.Getenv(lbEnable) + if env != "" { + b, err := strconv.ParseBool(env) + if err != nil { + return err + } + c.EnableLoadBalancer = b + } + + // Find loadbalancer port + env = os.Getenv(lbPort) + if env != "" { + i, err := strconv.ParseInt(env, 10, 32) + if err != nil { + return err + } + c.LoadBalancerPort = int(i) + } + + // Find loadbalancer forwarding method + env = os.Getenv(lbForwardingMethod) + if env != "" { + c.LoadBalancerForwardingMethod = env + } + + env = os.Getenv(EnableServiceSecurity) + if env != "" { + b, err := strconv.ParseBool(env) + if err != nil { + return err + } + c.EnableServiceSecurity = b + } + + // Find if node labeling is enabled + env = os.Getenv(EnableNodeLabeling) + if env != "" { + b, err := strconv.ParseBool(env) + if err != nil { + return err + } + c.EnableNodeLabeling = b + } + + // Find Prometheus configuration + env = os.Getenv(prometheusServer) + if env != "" { + c.PrometheusHTTPServer = env + } + + // Set Egress configuration(s) + env = os.Getenv(egressPodCidr) + if env != "" { + c.EgressPodCidr = env + } + + env = os.Getenv(egressServiceCidr) + if env != "" { + c.EgressServiceCidr = env + } + + // if this is set then we're enabling nftables + env = os.Getenv(egressWithNftables) + if env != "" { + b, err := strconv.ParseBool(env) + if err != nil { + return err + } + c.EgressWithNftables = b + } + + return nil +} diff --git a/pkg/kubevip/config_envvar.go b/pkg/kubevip/config_envvar.go index d8481496..3f4a82be 100644 --- a/pkg/kubevip/config_envvar.go +++ b/pkg/kubevip/config_envvar.go @@ -3,39 +3,60 @@ package kubevip // Environment variables const ( - //vipArp - defines if the arp broadcast should be enabled + // vipArp - defines if the arp broadcast should be enabled vipArp = "vip_arp" - //vipLeaderElection - defines if the kubernetes algorithm should be used + // vip_arpRate - defines the rate of gARP broadcasts + vipArpRate = "vip_arpRate" + + // vipLeaderElection - defines if the kubernetes algorithm should be used vipLeaderElection = "vip_leaderelection" - //vipLeaderElection - defines if the kubernetes algorithm should be used + // vipLeaseName - defines the name of the lease lock + vipLeaseName = "vip_leasename" + + // vipLeaderElection - defines if the kubernetes algorithm should be used vipLeaseDuration = "vip_leaseduration" - //vipLeaderElection - defines if the kubernetes algorithm should be used + // vipLeaderElection - defines if the kubernetes algorithm should be used vipRenewDeadline = "vip_renewdeadline" - //vipLeaderElection - defines if the kubernetes algorithm should be used + // vipLeaderElection - defines if the kubernetes algorithm should be used vipRetryPeriod = "vip_retryperiod" - //vipLogLevel - defines the level of logging to produce (5 being the most verbose) + // vipLeaderElection - defines the annotations given to the lease lock + vipLeaseAnnotations = "vip_leaseannotations" + + // vipLogLevel - defines the level of logging to produce (5 being the most verbose) vipLogLevel = "vip_loglevel" - //vipInterface - defines the interface that the vip should bind too + // vipInterface - defines the interface that the vip should bind too vipInterface = "vip_interface" - //vipServicesInterface - defines the interface that the service vips should bind too + // vipServicesInterface - defines the interface that the service vips should bind too vipServicesInterface = "vip_servicesinterface" - //vipCidr - defines the cidr that the vip will use + // vipCidr - defines the cidr that the vip will use (for BGP) vipCidr = "vip_cidr" + // vipSubnet - defines the subnet that the vip will use + vipSubnet = "vip_subnet" + + // egressPodCidr - defines the cidr that egress will ignore + egressPodCidr = "egress_podcidr" + + // egressServiceCidr - defines the cidr that egress will ignore + egressServiceCidr = "egress_servicecidr" + + // egressWithNftables - enables using nftables over iptables + egressWithNftables = "egress_withnftables" + ///////////////////////////////////// // TO DO: // Determine how to tidy this mess up ///////////////////////////////////// - //vipAddress - defines the address that the vip will expose + // vipAddress - defines the address that the vip will expose // DEPRECATED: will be removed in a next release vipAddress = "vip_address" @@ -44,71 +65,115 @@ const ( // kube-vip will try to resolve it and use the IP as a VIP address = "address" - //port - defines the port for the VIP + // port - defines the port for the VIP port = "port" // annotations annotations = "annotation" - //vipDdns - defines if use dynamic dns to allocate IP for "address" + // vipDdns - defines if use dynamic dns to allocate IP for "address" vipDdns = "vip_ddns" - //vipSingleNode - defines the vip start as a single node cluster + // vipSingleNode - defines the vip start as a single node cluster vipSingleNode = "vip_singlenode" - //vipStartLeader - will start this instance as the leader of the cluster + // vipStartLeader - will start this instance as the leader of the cluster vipStartLeader = "vip_startleader" - //vipPacket defines that the packet API will be used for EIP + // vipPacket defines that the packet API will be used for EIP vipPacket = "vip_packet" - //vipPacketProject defines which project within Packet to use + // vipPacketProject defines which project within Packet to use vipPacketProject = "vip_packetproject" - //vipPacketProjectID defines which projectID within Packet to use + // vipPacketProjectID defines which projectID within Packet to use vipPacketProjectID = "vip_packetprojectid" - //providerConfig defines a path to a configuration that should be parsed + // providerConfig defines a path to a configuration that should be parsed providerConfig = "provider_config" - //bgpEnable defines if BGP should be enabled + // bgpEnable defines if BGP should be enabled bgpEnable = "bgp_enable" - //bgpRouterID defines the routerID for the BGP server + // bgpRouterID defines the routerID for the BGP server bgpRouterID = "bgp_routerid" - //bgpRouterInterface defines the interface that we can find the address for + // bgpRouterInterface defines the interface that we can find the address for bgpRouterInterface = "bgp_routerinterface" - //bgpRouterAS defines the AS for the BGP server + // bgpRouterAS defines the AS for the BGP server bgpRouterAS = "bgp_as" - //bgpPeerAddress defines the address for a BGP peer + // bgpPeerAddress defines the address for a BGP peer bgpPeerAddress = "bgp_peeraddress" - //bgpPeers defines the address for a BGP peer + // bgpPeers defines the address for a BGP peer bgpPeers = "bgp_peers" - //bgpPeerAS defines the AS for a BGP peer + // bgpPeerAS defines the AS for a BGP peer bgpPeerAS = "bgp_peeras" - //bgpPeerAS defines the AS for a BGP peer + // bgpPeerAS defines the AS for a BGP peer bgpPeerPassword = "bgp_peerpass" // nolint - //bgpMultiHop enables mulithop routing + // bgpMultiHop enables mulithop routing bgpMultiHop = "bgp_multihop" - //bgpSourceIF defines the source interface for BGP peering - bgpSourceIF = "bgp_source_if" - //bgpSourceIP defines the source address for BGP peering - bgpSourceIP = "bgp_source_ip" + // bgpSourceIF defines the source interface for BGP peering + bgpSourceIF = "bgp_sourceif" + // bgpSourceIP defines the source address for BGP peering + bgpSourceIP = "bgp_sourceip" + + // vipWireguard - defines if wireguard will be used for vips + vipWireguard = "vip_wireguard" //nolint + + // vipRoutingTable - defines if table mode will be used for vips + vipRoutingTable = "vip_routingtable" //nolint + + // vipRoutingTableID - defines which table mode will be used for vips + vipRoutingTableID = "vip_routingtableid" //nolint - //cpNamespace defines the namespace the control plane pods will run in + // vipRoutingTableType - defines which table type will be used for vip routes + // valid values for this variable can be found in: + // https://pkg.go.dev/golang.org/x/sys/unix#RTN_UNSPEC + // Note that route type have the prefix `RTN_`, and you + // specify the integer value, not the name. For example: + // you should say `vip_routingtabletype=2` for RTN_LOCAL + vipRoutingTableType = "vip_routingtabletype" //nolint + + // cpNamespace defines the namespace the control plane pods will run in cpNamespace = "cp_namespace" - //cpEnable starts kube-vip in the hybrid mode + // cpEnable enables the control plane feature cpEnable = "cp_enable" - //cpEnable starts kube-vip in the hybrid mode + // svcEnable enables the Kubernetes service feature svcEnable = "svc_enable" - //lbEnable defines if the load-balancer should be enabled + // svcNamespace defines the namespace the service pods will run in + svcNamespace = "svc_namespace" + + // svcElection enables election per Kubernetes service + svcElection = "svc_election" + + // svcLeaseName Name of the lease that is used for leader election for services (in arp mode) + svcLeaseName = "svc_leasename" + + // lbClassOnly enables load-balancer for class "kube-vip.io/kube-vip-class" only + lbClassOnly = "lb_class_only" + + // lbClassName enables load-balancer for a specific class only + lbClassName = "lb_class_name" + + // lbEnable defines if the load-balancer should be enabled lbEnable = "lb_enable" - //lbPort defines the port of load-balancer + // lbPort defines the port of load-balancer lbPort = "lb_port" - //vipConfigMap defines the configmap that kube-vip will watch for service definitions + // lbForwardingMethod defines the forwarding method of load-balancer + lbForwardingMethod = "lb_fwdmethod" + + // EnableServiceSecurity defines if the load-balancer should only allow traffic to service ports + EnableServiceSecurity = "enable_service_security" + + // EnableNodeLabeling, will enable node labeling as the node becomes leader + EnableNodeLabeling = "enable_node_labeling" + + // prometheusServer defines the address prometheus listens on + prometheusServer = "prometheus_server" + + // vipConfigMap defines the configmap that kube-vip will watch for service definitions // vipConfigMap = "vip_configmap" ) diff --git a/pkg/kubevip/config_generator.go b/pkg/kubevip/config_generator.go index c3ef4e51..b4882819 100644 --- a/pkg/kubevip/config_generator.go +++ b/pkg/kubevip/config_generator.go @@ -2,341 +2,26 @@ package kubevip import ( "fmt" - "os" "strconv" - "github.com/ghodss/yaml" - "github.com/kube-vip/kube-vip/pkg/bgp" - "github.com/kube-vip/kube-vip/pkg/detector" - log "github.com/sirupsen/logrus" appv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/yaml" ) -// ParseEnvironment - will popultate the configuration from environment variables -func ParseEnvironment(c *Config) error { - - // Ensure that logging is set through the environment variables - env := os.Getenv(vipLogLevel) - if env != "" { - logLevel, err := strconv.Atoi(env) - if err != nil { - panic("Unable to parse environment variable [vip_loglevel], should be int") - } - log.SetLevel(log.Level(logLevel)) - } - - // Find interface - env = os.Getenv(vipInterface) - if env != "" { - c.Interface = env - } - - // Find (services) interface - env = os.Getenv(vipServicesInterface) - if env != "" { - c.ServicesInterface = env - } - - // Find provider configuration - env = os.Getenv(providerConfig) - if env != "" { - c.ProviderConfig = env - } - - // Find Kubernetes Leader Election configuration - env = os.Getenv(vipLeaderElection) - if env != "" { - b, err := strconv.ParseBool(env) - if err != nil { - return err - } - c.EnableLeaderElection = b - } - - // Attempt to find the Lease configuration from the environment variables - env = os.Getenv(vipLeaseDuration) - if env != "" { - i, err := strconv.ParseInt(env, 10, 32) - if err != nil { - return err - } - c.LeaseDuration = int(i) - } - - env = os.Getenv(vipRenewDeadline) - if env != "" { - i, err := strconv.ParseInt(env, 10, 32) - if err != nil { - return err - } - c.RenewDeadline = int(i) - } - - env = os.Getenv(vipRetryPeriod) - if env != "" { - i, err := strconv.ParseInt(env, 10, 32) - if err != nil { - return err - } - c.RetryPeriod = int(i) - } - - // Find vip address - env = os.Getenv(vipAddress) - if env != "" { - // TODO - parse address net.Host() - c.VIP = env - // } else { - // c.VIP = os.Getenv(address) - } - - // Find address - env = os.Getenv(address) - if env != "" { - // TODO - parse address net.Host() - c.Address = env - } - - // Find vip port - env = os.Getenv(port) - if env != "" { - i, err := strconv.ParseInt(env, 10, 32) - if err != nil { - return err - } - c.Port = int(i) - } - - // Find vipDdns - env = os.Getenv(vipDdns) - if env != "" { - b, err := strconv.ParseBool(env) - if err != nil { - return err - } - c.DDNS = b - } - - // Find vip address cidr range - env = os.Getenv(cpNamespace) - if env != "" { - c.Namespace = env - } - - // Find the namespace that the control pane should use (for leaderElection lock) - env = os.Getenv(cpNamespace) - if env != "" { - c.Namespace = env - } - - // Find controlplane toggle - env = os.Getenv(cpEnable) - if env != "" { - b, err := strconv.ParseBool(env) - if err != nil { - return err - } - c.EnableControlPane = b - } - - // Find Services toggle - env = os.Getenv(svcEnable) - if env != "" { - b, err := strconv.ParseBool(env) - if err != nil { - return err - } - c.EnableServices = b - } - - // Find vip address cidr range - env = os.Getenv(vipCidr) - if env != "" { - c.VIPCIDR = env - } - - // Find Single Node - env = os.Getenv(vipSingleNode) - if env != "" { - b, err := strconv.ParseBool(env) - if err != nil { - return err - } - c.SingleNode = b - } - - // Find annotation configuration - env = os.Getenv(annotations) - if env != "" { - c.Annotations = env - } - - // Find Start As Leader - // TODO - does this need depricating? - // Required when the host sets itself as leader before the state change - env = os.Getenv(vipStartLeader) - if env != "" { - b, err := strconv.ParseBool(env) - if err != nil { - return err - } - c.StartAsLeader = b - } - - // Find ARP - env = os.Getenv(vipArp) - if env != "" { - b, err := strconv.ParseBool(env) - if err != nil { - return err - } - c.EnableARP = b - } - - // BGP Server options - env = os.Getenv(bgpEnable) - if env != "" { - b, err := strconv.ParseBool(env) - if err != nil { - return err - } - c.EnableBGP = b - } - - // BGP Router interface determines an interface that we can use to find an address for - env = os.Getenv(bgpRouterInterface) - if env != "" { - _, address, err := detector.FindIPAddress(env) - if err != nil { - return err - } - c.BGPConfig.RouterID = address - } - - // RouterID - env = os.Getenv(bgpRouterID) - if env != "" { - c.BGPConfig.RouterID = env - } - - // AS - env = os.Getenv(bgpRouterAS) - if env != "" { - u64, err := strconv.ParseUint(env, 10, 32) - if err != nil { - return err - } - c.BGPConfig.AS = uint32(u64) - } - - // Peer AS - env = os.Getenv(bgpPeerAS) - if env != "" { - u64, err := strconv.ParseUint(env, 10, 32) - if err != nil { - return err - } - c.BGPPeerConfig.AS = uint32(u64) - } - - // Peer AS - env = os.Getenv(bgpPeers) - if env != "" { - peers, err := bgp.ParseBGPPeerConfig(env) - if err != nil { - return err - } - c.BGPConfig.Peers = peers - } - - // BGP Peer mutlihop - env = os.Getenv(bgpMultiHop) - if env != "" { - b, err := strconv.ParseBool(env) - if err != nil { - return err - } - c.BGPPeerConfig.MultiHop = b - } - - // BGP Peer password - env = os.Getenv(bgpPeerPassword) - if env != "" { - c.BGPPeerConfig.Password = env - } - - // BGP Source Interface - env = os.Getenv(bgpSourceIF) - if env != "" { - c.BGPConfig.SourceIF = env - } - - // BGP Source Address - env = os.Getenv(bgpSourceIP) - if env != "" { - c.BGPConfig.SourceIP = env - } - - // BGP Peer options, add them if relevant - env = os.Getenv(bgpPeerAddress) - if env != "" { - c.BGPPeerConfig.Address = env - // If we've added in a peer configuration, then we should add it to the BGP configuration - c.BGPConfig.Peers = append(c.BGPConfig.Peers, c.BGPPeerConfig) - } - - // Enable the Packet API calls - env = os.Getenv(vipPacket) - if env != "" { - b, err := strconv.ParseBool(env) - if err != nil { - return err - } - c.EnableMetal = b - } - - // Find the Packet project name - env = os.Getenv(vipPacketProject) - if env != "" { - // TODO - parse address net.Host() - c.MetalProject = env - } - - // Find the Packet project ID - env = os.Getenv(vipPacketProjectID) - if env != "" { - // TODO - parse address net.Host() - c.MetalProjectID = env - } - - // Enable the load-balancer - env = os.Getenv(lbEnable) - if env != "" { - b, err := strconv.ParseBool(env) - if err != nil { - return err - } - c.EnableLoadBalancer = b - } - - // Find loadbalancer port - env = os.Getenv(lbPort) - if env != "" { - i, err := strconv.ParseInt(env, 10, 32) - if err != nil { - return err - } - c.LoadBalancerPort = int(i) - } - return nil -} - // generatePodSpec will take a kube-vip config and generate a Pod spec func generatePodSpec(c *Config, imageVersion string, inCluster bool) *corev1.Pod { command := "manager" + // Determine where the pods should be living (for multi-tenancy) + var namespace string + if c.ServiceNamespace != "" { + namespace = c.ServiceNamespace + } else { + namespace = metav1.NamespaceSystem + } + // build environment variables newEnvironment := []corev1.EnvVar{ { @@ -382,15 +67,26 @@ func generatePodSpec(c *Config, imageVersion string, inCluster bool) *corev1.Pod }, } newEnvironment = append(newEnvironment, cidr...) + } + // If a subnet is required for the VIP + if c.VIPSubnet != "" { + // build environment variables + cidr := []corev1.EnvVar{ + { + Name: vipSubnet, + Value: c.VIPSubnet, + }, + } + newEnvironment = append(newEnvironment, cidr...) } // If we're doing the hybrid mode - if c.EnableControlPane { + if c.EnableControlPlane { cp := []corev1.EnvVar{ { Name: cpEnable, - Value: strconv.FormatBool(c.EnableControlPane), + Value: strconv.FormatBool(c.EnableControlPlane), }, { Name: cpNamespace, @@ -406,13 +102,44 @@ func generatePodSpec(c *Config, imageVersion string, inCluster bool) *corev1.Pod // If we're doing the hybrid mode if c.EnableServices { - cp := []corev1.EnvVar{ + svc := []corev1.EnvVar{ { Name: svcEnable, Value: strconv.FormatBool(c.EnableServices), }, + { + Name: svcLeaseName, + Value: c.ServicesLeaseName, + }, + } + newEnvironment = append(newEnvironment, svc...) + if c.EnableServicesElection { + svcElection := []corev1.EnvVar{ + { + Name: svcElection, + Value: strconv.FormatBool(c.EnableServicesElection), + }, + } + newEnvironment = append(newEnvironment, svcElection...) + } + if c.LoadBalancerClassOnly { + lbClassOnlyVar := []corev1.EnvVar{ + { + Name: lbClassOnly, + Value: strconv.FormatBool(c.LoadBalancerClassOnly), + }, + } + newEnvironment = append(newEnvironment, lbClassOnlyVar...) + } + if c.EnableServiceSecurity { + EnableServiceSecurityVar := []corev1.EnvVar{ + { + Name: EnableServiceSecurity, + Value: strconv.FormatBool(c.EnableServiceSecurity), + }, + } + newEnvironment = append(newEnvironment, EnableServiceSecurityVar...) } - newEnvironment = append(newEnvironment, cp...) } // If Leader election is enabled then add the configuration to the manifest @@ -423,6 +150,10 @@ func generatePodSpec(c *Config, imageVersion string, inCluster bool) *corev1.Pod Name: vipLeaderElection, Value: strconv.FormatBool(c.EnableLeaderElection), }, + { + Name: vipLeaseName, + Value: c.LeaseName, + }, { Name: vipLeaseDuration, Value: fmt.Sprintf("%d", c.LeaseDuration), @@ -454,6 +185,17 @@ func generatePodSpec(c *Config, imageVersion string, inCluster bool) *corev1.Pod newEnvironment = append(newEnvironment, leaderElection...) } + // If we're enabling node labeling on leader election + if c.EnableNodeLabeling { + EnableNodeLabeling := []corev1.EnvVar{ + { + Name: EnableNodeLabeling, + Value: strconv.FormatBool(c.EnableNodeLabeling), + }, + } + newEnvironment = append(newEnvironment, EnableNodeLabeling...) + } + // If we're specifying an annotation configuration if c.Annotations != "" { annotations := []corev1.EnvVar{ @@ -477,7 +219,7 @@ func generatePodSpec(c *Config, imageVersion string, inCluster bool) *corev1.Pod newEnvironment = append(newEnvironment, provider...) } - // If Packet is enabled then add it to the manifest + // If Equinix Metal is enabled then add it to the manifest if c.EnableMetal { packet := []corev1.EnvVar{ { @@ -500,7 +242,29 @@ func generatePodSpec(c *Config, imageVersion string, inCluster bool) *corev1.Pod newEnvironment = append(newEnvironment, packet...) } - // If BGP, but we're not using packet + // Detect and enable wireguard mode + if c.EnableWireguard { + wireguard := []corev1.EnvVar{ + { + Name: vipWireguard, + Value: strconv.FormatBool(c.EnableWireguard), + }, + } + newEnvironment = append(newEnvironment, wireguard...) + } + + // Detect and enable routing table mode + if c.EnableRoutingTable { + routingtable := []corev1.EnvVar{ + { + Name: vipRoutingTable, + Value: strconv.FormatBool(c.EnableRoutingTable), + }, + } + newEnvironment = append(newEnvironment, routingtable...) + } + + // If BGP, but we're not using Equinix Metal if c.EnableBGP { bgp := []corev1.EnvVar{ { @@ -510,7 +274,7 @@ func generatePodSpec(c *Config, imageVersion string, inCluster bool) *corev1.Pod } newEnvironment = append(newEnvironment, bgp...) } - // If BGP, but we're not using packet + // If BGP, but we're not using Equinix Metal if c.EnableBGP && !c.EnableMetal { bgpConfig := []corev1.EnvVar{ { @@ -584,6 +348,10 @@ func generatePodSpec(c *Config, imageVersion string, inCluster bool) *corev1.Pod Name: lbPort, Value: fmt.Sprintf("%d", c.LoadBalancerPort), }, + { + Name: lbForwardingMethod, + Value: c.LoadBalancerForwardingMethod, + }, } newEnvironment = append(newEnvironment, lb...) @@ -601,6 +369,16 @@ func generatePodSpec(c *Config, imageVersion string, inCluster bool) *corev1.Pod }) } + if c.PrometheusHTTPServer != "" { + prometheus := []corev1.EnvVar{ + { + Name: prometheusServer, + Value: c.PrometheusHTTPServer, + }, + } + newEnvironment = append(newEnvironment, prometheus...) + } + newManifest := &corev1.Pod{ TypeMeta: metav1.TypeMeta{ Kind: "Pod", @@ -608,7 +386,7 @@ func generatePodSpec(c *Config, imageVersion string, inCluster bool) *corev1.Pod }, ObjectMeta: metav1.ObjectMeta{ Name: "kube-vip", - Namespace: "kube-system", + Namespace: namespace, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ @@ -621,7 +399,6 @@ func generatePodSpec(c *Config, imageVersion string, inCluster bool) *corev1.Pod Add: []corev1.Capability{ "NET_ADMIN", "NET_RAW", - "SYS_TIME", }, }, }, @@ -636,7 +413,7 @@ func generatePodSpec(c *Config, imageVersion string, inCluster bool) *corev1.Pod } if inCluster { - // If we're running this inCluster then the acccount name will be required + // If we're running this inCluster then the account name will be required newManifest.Spec.ServiceAccountName = "kube-vip" } else { // If this isn't inside a cluster then add the external path mount @@ -684,7 +461,6 @@ func generatePodSpec(c *Config, imageVersion string, inCluster bool) *corev1.Pod } return newManifest - } // GeneratePodManifestFromConfig will take a kube-vip config and generate a manifest @@ -694,8 +470,15 @@ func GeneratePodManifestFromConfig(c *Config, imageVersion string, inCluster boo return string(b) } -// GenerateDeamonsetManifestFromConfig will take a kube-vip config and generate a manifest -func GenerateDeamonsetManifestFromConfig(c *Config, imageVersion string, inCluster, taint bool) string { +// GenerateDaemonsetManifestFromConfig will take a kube-vip config and generate a manifest +func GenerateDaemonsetManifestFromConfig(c *Config, imageVersion string, inCluster, taint bool) string { + // Determine where the pod should be deployed + var namespace string + if c.ServiceNamespace != "" { + namespace = c.ServiceNamespace + } else { + namespace = metav1.NamespaceSystem + } podSpec := generatePodSpec(c, imageVersion, inCluster).Spec newManifest := &appv1.DaemonSet{ @@ -705,18 +488,23 @@ func GenerateDeamonsetManifestFromConfig(c *Config, imageVersion string, inClust }, ObjectMeta: metav1.ObjectMeta{ Name: "kube-vip-ds", - Namespace: "kube-system", + Namespace: namespace, + Labels: map[string]string{ + "app.kubernetes.io/name": "kube-vip-ds", + "app.kubernetes.io/version": imageVersion, + }, }, Spec: appv1.DaemonSetSpec{ Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{ - "name": "kube-vip-ds", + "app.kubernetes.io/name": "kube-vip-ds", }, }, Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ - "name": "kube-vip-ds", + "app.kubernetes.io/name": "kube-vip-ds", + "app.kubernetes.io/version": imageVersion, }, }, Spec: podSpec, diff --git a/pkg/kubevip/config_manager.go b/pkg/kubevip/config_manager.go index a9e588c0..be605298 100644 --- a/pkg/kubevip/config_manager.go +++ b/pkg/kubevip/config_manager.go @@ -2,182 +2,47 @@ package kubevip import ( "fmt" - "io/ioutil" - "os" - "strconv" - "strings" - "github.com/ghodss/yaml" log "github.com/sirupsen/logrus" + "github.com/vishvananda/netlink" ) -var endPointIndex int // Holds the previous endpoint (for determining decisions on next endpoint) - -//ParseBackendConfig - -func ParseBackendConfig(ep string) (*BackEnd, error) { - endpoint := strings.Split(ep, ":") - if len(endpoint) != 2 { - return nil, fmt.Errorf("Ensure a backend is in in the format address:port, e.g. 10.0.0.1:8080") - } - p, err := strconv.Atoi(endpoint[1]) - if err != nil { - return nil, err - } - return &BackEnd{Address: endpoint[0], Port: p}, nil -} - -//ParsePeerConfig - -func ParsePeerConfig(ep string) (*RaftPeer, error) { - endpoint := strings.Split(ep, ":") - if len(endpoint) != 3 { - return nil, fmt.Errorf("Ensure a peer is in in the format id:address:port, e.g. server1:10.0.0.1:8080") - } - p, err := strconv.Atoi(endpoint[2]) - if err != nil { - return nil, err - } - return &RaftPeer{ID: endpoint[0], Address: endpoint[1], Port: p}, nil -} - -//OpenConfig will attempt to read a file and parse it's contents into a configuration -func OpenConfig(path string) (*Config, error) { - if path == "" { - return nil, fmt.Errorf("Path cannot be blank") - } - - log.Infof("Reading configuration from [%s]", path) - - // Check the actual path from the string - if _, err := os.Stat(path); !os.IsNotExist(err) { - // Attempt to read the data - configData, err := ioutil.ReadFile(path) - if err != nil { - return nil, err +func (c *Config) CheckInterface() error { + if c.Interface != "" { + if err := isValidInterface(c.Interface); err != nil { + return fmt.Errorf("%s is not valid interface, reason: %w", c.Interface, err) } - - // If data is read successfully parse the yaml - var c Config - err = yaml.Unmarshal(configData, &c) - if err != nil { - return nil, err - } - return &c, nil - } - return nil, fmt.Errorf("Error reading [%s]", path) -} - -//PrintConfig - will print out an instance of the kubevip config -func (c *Config) PrintConfig() { - b, _ := yaml.Marshal(c) - fmt.Print(string(b)) -} - -//ParseFlags will write the current configuration to a specified [path] -func (c *Config) ParseFlags(localPeer string, remotePeers, backends []string) error { - // Parse localPeer - p, err := ParsePeerConfig(localPeer) - if err != nil { - return err - } - c.LocalPeer = *p - - // Parse remotePeers - //Iterate backends - for i := range remotePeers { - p, err := ParsePeerConfig(remotePeers[i]) - if err != nil { - return err - - } - c.RemotePeers = append(c.RemotePeers, *p) - } - - //Iterate backends - for i := range backends { - b, err := ParseBackendConfig(backends[i]) - if err != nil { - return err + if c.ServicesInterface != "" { + if err := isValidInterface(c.ServicesInterface); err != nil { + return fmt.Errorf("%s is not valid interface, reason: %w", c.ServicesInterface, err) } - c.LoadBalancers[0].Backends = append(c.LoadBalancers[0].Backends, *b) } return nil } -//SampleConfig will create an example configuration and write it to the specified [path] -func SampleConfig() { - - // Generate Sample configuration - c := &Config{ - // Generate sample peers - RemotePeers: []RaftPeer{ - { - ID: "server2", - Address: "192.168.0.2", - Port: 10000, - }, - { - ID: "server3", - Address: "192.168.0.3", - Port: 10000, - }, - }, - LocalPeer: RaftPeer{ - ID: "server1", - Address: "192.168.0.1", - Port: 10000, - }, - // Virtual IP address - VIP: "192.168.0.100", - // Interface to bind to - Interface: "eth0", - // Load Balancer Configuration - LoadBalancers: []LoadBalancer{ - { - Name: "Kubernetes Control Plane", - Type: "http", - Port: 6443, - BindToVip: true, - Backends: []BackEnd{ - { - Address: "192.168.0.100", - Port: 6443, - }, - { - Address: "192.168.0.101", - Port: 6443, - }, - { - Address: "192.168.0.102", - Port: 6443, - }, - }, - }, - }, - } - b, _ := yaml.Marshal(c) - - fmt.Print(string(b)) -} - -//WriteConfig will write the current configuration to a specified [path] -func (c *Config) WriteConfig(path string) error { - f, err := os.Create(path) +func isValidInterface(iface string) error { + l, err := netlink.LinkByName(iface) if err != nil { - return err + return fmt.Errorf("get %s failed, error: %w", iface, err) } - defer f.Close() + attrs := l.Attrs() - b, err := yaml.Marshal(c) - if err != nil { - return err - } - bytesWritten, err := f.Write(b) - if err != nil { - return err + // Some interfaces (included but not limited to lo and point-to-point + // interfaces) do not provide a operational status but are safe to use. + // From kernek.org: "Interface is in unknown state, neither driver nor + // userspace has set operational state. Interface must be considered for user + // data as setting operational state has not been implemented in every driver." + if attrs.OperState == netlink.OperUnknown { + log.Warningf( + "the status of the interface %s is unknown. Ensure your interface is ready to accept traffic, if so you can safely ignore this message", + iface, + ) + } else if attrs.OperState != netlink.OperUp { + return fmt.Errorf("%s is not up", iface) } - log.Debugf("wrote %d bytes\n", bytesWritten) + return nil } diff --git a/pkg/kubevip/config_types.go b/pkg/kubevip/config_types.go index 0cc63b0a..7704e136 100644 --- a/pkg/kubevip/config_types.go +++ b/pkg/kubevip/config_types.go @@ -1,13 +1,13 @@ package kubevip import ( - "net/url" - "github.com/kube-vip/kube-vip/pkg/bgp" ) // Config defines all of the settings for the Kube-Vip Pod type Config struct { + // Logging, settings + Logging int `yaml:"logging"` // EnableARP, will use ARP to advertise the VIP address EnableARP bool `yaml:"enableARP"` @@ -15,23 +15,48 @@ type Config struct { // EnableBGP, will use BGP to advertise the VIP address EnableBGP bool `yaml:"enableBGP"` - // EnableControlPane, will enable the control plane functionality (used for hybrid behaviour) - EnableControlPane bool `yaml:"enableControlPane"` + // EnableWireguard, will use wireguard to advertise the VIP address + EnableWireguard bool `yaml:"enableWireguard"` + + // EnableRoutingTable, will use the routing table to advertise the VIP address + EnableRoutingTable bool `yaml:"enableRoutingTable"` + + // EnableControlPlane, will enable the control plane functionality (used for hybrid behaviour) + EnableControlPlane bool `yaml:"enableControlPlane"` - // EnableControlPane, will enable the control plane functionality (used for hybrid behaviour) + // EnableServices, will enable the services functionality (used for hybrid behaviour) EnableServices bool `yaml:"enableServices"` + // EnableServicesElection, will enable leaderElection per service + EnableServicesElection bool `yaml:"enableServicesElection"` + + // EnableNodeLabeling, will enable node labeling as it becomes leader + EnableNodeLabeling bool `yaml:"enableNodeLabeling"` + + // LoadBalancerClassOnly, will enable load balancing only for services with LoadBalancerClass set to "kube-vip.io/kube-vip-class" + LoadBalancerClassOnly bool `yaml:"lbClassOnly"` + + // LoadBalancerClassName, will limit the load balancing services to services with LoadBalancerClass set to this value + LoadBalancerClassName string `yaml:"lbClassName"` + + // EnableServiceSecurity, will enable the use of iptables to secure services + EnableServiceSecurity bool `yaml:"EnableServiceSecurity"` + + // ArpBroadcastRate, defines how often kube-vip will update the network about updates to the network + ArpBroadcastRate int64 `yaml:"arpBroadcastRate"` + // Annotations will define if we're going to wait and lookup configuration from Kubernetes node annotations Annotations string - // LeaderElection defines the settings around Kubernetes LeaderElection - LeaderElection + // LeaderElectionType defines the backend to run the leader election: kubernetes or etcd. Defaults to kubernetes. + // Etcd doesn't support load balancer mode (EnableLoadBalancer=true) or any other feature that depends on the kube-api server. + LeaderElectionType string `yaml:"leaderElectionType"` - // LocalPeer is the configuration of this host - LocalPeer RaftPeer `yaml:"localPeer"` + // KubernetesLeaderElection defines the settings around Kubernetes KubernetesLeaderElection + KubernetesLeaderElection - // Peers are all of the peers within the RAFT cluster - RemotePeers []RaftPeer `yaml:"remotePeers"` + // Etcd defines all the settings for the etcd client. + Etcd Etcd // AddPeersAsBackends, this will automatically add RAFT peers as backends to a loadbalancer AddPeersAsBackends bool `yaml:"addPeersAsBackends"` @@ -39,6 +64,9 @@ type Config struct { // VIP is the Virtual IP address exposed for the cluster (TODO: deprecate) VIP string `yaml:"vip"` + // VipSubnet is the Subnet that is applied to the VIP + VIPSubnet string `yaml:"vipSubnet"` + // VIPCIDR is cidr range for the VIP (primarily needed for BGP) VIPCIDR string `yaml:"vipCidr"` @@ -51,6 +79,9 @@ type Config struct { // Namespace will define which namespace the control plane pods will run in Namespace string `yaml:"namespace"` + // Namespace will define which namespace the control plane pods will run in + ServiceNamespace string `yaml:"serviceNamespace"` + // use DDNS to allocate IP when Address is set to a DNS Name DDNS bool `yaml:"ddns"` @@ -72,12 +103,21 @@ type Config struct { // Listen port for the IPVS Service LoadBalancerPort int `yaml:"lbPort"` + // Forwarding method for the IPVS Service + LoadBalancerForwardingMethod string `yaml:"lbForwardingMethod"` + + // Routing Table ID for when using routing table mode + RoutingTableID int `yaml:"routingTableID"` + + // Routing Table Type, what sort of route should be added to the routing table + RoutingTableType int `yaml:"routingTableType"` + // BGP Configuration BGPConfig bgp.Config BGPPeerConfig bgp.Peer BGPPeers []string - // EnablePacket, will use the metal API to update the EIP <-> VIP (if BGP is enabled then BGP will be used) + // EnableMetal, will use the metal API to update the EIP <-> VIP (if BGP is enabled then BGP will be used) EnableMetal bool `yaml:"enableMetal"` // MetalAPIKey, is the API token used to authenticate to the API @@ -97,14 +137,30 @@ type Config struct { // The hostport used to expose Prometheus metrics over an HTTP server PrometheusHTTPServer string `yaml:"prometheusHTTPServer,omitempty"` -} -// LeaderElection defines all of the settings for Kubernetes LeaderElection -type LeaderElection struct { + // Egress configuration + + // EgressPodCidr, this contains the pod cidr range to ignore Egress + EgressPodCidr string + + // EgressServiceCidr, this contains the service cidr range to ignore + EgressServiceCidr string + // EgressWithNftables, this will use the iptables-nftables OVER iptables + EgressWithNftables bool + + // ServicesLeaseName, this will set the lease name for services leader in arp mode + ServicesLeaseName string `yaml:"servicesLeaseName"` +} + +// KubernetesLeaderElection defines all of the settings for Kubernetes KubernetesLeaderElection +type KubernetesLeaderElection struct { // EnableLeaderElection will use the Kubernetes leader election algorithm EnableLeaderElection bool `yaml:"enableLeaderElection"` + // LeaseName - name of the lease for leader election + LeaseName string `yaml:"leaseName"` + // Lease Duration - length of time a lease can be held for LeaseDuration int @@ -113,18 +169,17 @@ type LeaderElection struct { // RetryPerion - Number of times the host will retry to hold a lease RetryPeriod int -} -// RaftPeer details the configuration of all cluster peers -type RaftPeer struct { - // ID is the unique identifier a peer instance - ID string `yaml:"id"` - - // IP Address of a peer instance - Address string `yaml:"address"` + // LeaseAnnotations - annotations which will be given to the lease object + LeaseAnnotations map[string]string +} - // Listening port of this peer instance - Port int `yaml:"port"` +// Etcd defines all the settings for the etcd client. +type Etcd struct { + CAFile string + ClientCertFile string + ClientKeyFile string + Endpoints []string } // LoadBalancer contains the configuration of a load balancing instance @@ -141,24 +196,6 @@ type LoadBalancer struct { // BindToVip will bind the load balancer port to the VIP itself BindToVip bool `yaml:"bindToVip"` - //BackendPort, is a port that all backends are listening on (To be used to simplify building a list of backends) - BackendPort int `yaml:"backendPort"` - - //Backends, is an array of backend servers - Backends []BackEnd `yaml:"backends"` -} - -// BackEnd is a server we will load balance over -type BackEnd struct { - // Backend Port to Load Balance to - Port int `yaml:"port"` - - // Address of a server/service - Address string `yaml:"address"` - - // URL is a raw URL to a backend service - RawURL string `yaml:"url,omitempty"` - - // ParsedURL - A validated URL to a backend - ParsedURL *url.URL `yaml:"parsedURL,omitempty"` + // Forwarding method of LoadBalancer, either Local, Tunnel, DirectRoute or Bypass + ForwardingMethod string `yaml:"forwardingMethod"` } diff --git a/pkg/loadbalancer/ipvs.go b/pkg/loadbalancer/ipvs.go index fb24fc53..b0b15cb6 100644 --- a/pkg/loadbalancer/ipvs.go +++ b/pkg/loadbalancer/ipvs.go @@ -35,44 +35,56 @@ type IPVSLoadBalancer struct { client ipvs.Client loadBalancerService ipvs.Service Port int + forwardingMethod ipvs.ForwardType } -func NewIPVSLB(address string, port int) (*IPVSLoadBalancer, error) { - +func NewIPVSLB(address string, port int, forwardingMethod string) (*IPVSLoadBalancer, error) { // Create IPVS client c, err := ipvs.New() if err != nil { - return nil, fmt.Errorf("error creating IPVS client: %v", err) + log.Errorf("ensure IPVS kernel modules are loaded") + log.Fatalf("Error starting IPVS [%v]", err) + } + i, err := c.Info() + if err != nil { + log.Errorf("ensure IPVS kernel modules are loaded") + log.Fatalf("Error getting IPVS version [%v]", err) } + log.Infof("IPVS Loadbalancer enabled for %d.%d.%d", i.Version[0], i.Version[1], i.Version[2]) + + ip, family := ipAndFamily(address) // Generate out API Server LoadBalancer instance svc := ipvs.Service{ - Family: ipvs.INET, + Family: family, Protocol: ipvs.TCP, Port: uint16(port), - Address: ipvs.NewIP(net.ParseIP(address)), + Address: ip, Scheduler: ROUNDROBIN, } - err = c.CreateService(svc) - // If we've an error it could be that the IPVS lb instance has been left from a previous leadership - if err != nil && strings.Contains(err.Error(), "file exists") { - log.Warnf("load balancer for API server already exists, attempting to remove and re-create") - err = c.RemoveService(svc) - if err != nil { - return nil, fmt.Errorf("error re-creating IPVS service: %v", err) - } - err = c.CreateService(svc) - if err != nil { - return nil, fmt.Errorf("error re-creating IPVS service: %v", err) - } - } else if err != nil { - return nil, fmt.Errorf("error creating IPVS service: %v", err) + + var m ipvs.ForwardType + switch strings.ToLower(forwardingMethod) { + case "masquerade": + m = ipvs.Masquarade + case "local": + m = ipvs.Local + case "tunnel": + m = ipvs.Tunnel + case "directroute": + m = ipvs.DirectRoute + case "bypass": + m = ipvs.Bypass + default: + m = ipvs.Local + log.Warnf("unknown forwarding method. Defaulting to Local") } lb := &IPVSLoadBalancer{ Port: port, client: c, loadBalancerService: svc, + forwardingMethod: m, } // Return our created load-balancer return lb, nil @@ -84,32 +96,78 @@ func (lb *IPVSLoadBalancer) RemoveIPVSLB() error { return fmt.Errorf("error removing existing IPVS service: %v", err) } return nil - } -func (lb *IPVSLoadBalancer) AddBackend(address string) error { +func (lb *IPVSLoadBalancer) AddBackend(address string, port int) error { + // Check if this is the first backend + backends, err := lb.client.Destinations(lb.loadBalancerService) + if err != nil && strings.Contains(err.Error(), "file does not exist") { + log.Errorf("Error querying backends %s", err) + } + // If this is our first backend, then we can create the load-balancer service and add a backend + if len(backends) == 0 { + err = lb.client.CreateService(lb.loadBalancerService) + // If we've an error it could be that the IPVS lb instance has been left from a previous leadership + if err != nil && strings.Contains(err.Error(), "file exists") { + log.Warnf("load balancer for API server already exists, attempting to remove and re-create") + err = lb.client.RemoveService(lb.loadBalancerService) + + if err != nil { + return fmt.Errorf("error re-creating IPVS service: %v", err) + } + err = lb.client.CreateService(lb.loadBalancerService) + if err != nil { + return fmt.Errorf("error re-creating IPVS service: %v", err) + } + } else if err != nil { + // Fatal error at this point as IPVS is probably not working + log.Errorf("Unable to create an IPVS service, ensure IPVS kernel modules are loaded") + log.Fatalf("IPVS service error: %v", err) + } + log.Infof("Created Load-Balancer services on [%s:%d]", lb.addrString(), lb.Port) + } + + ip, family := ipAndFamily(address) + + // Ignore backends that use a different address family. + // Looks like different families could be supported in tunnel mode... + if family != lb.loadBalancerService.Family { + return nil + } + dst := ipvs.Destination{ - Address: ipvs.NewIP(net.ParseIP(address)), - Port: 6443, - Family: ipvs.INET, + Address: ip, + Port: uint16(port), + Family: family, Weight: 1, - FwdMethod: ipvs.Local, + FwdMethod: lb.forwardingMethod, } - err := lb.client.CreateDestination(lb.loadBalancerService, dst) + err = lb.client.CreateDestination(lb.loadBalancerService, dst) // Swallow error of existing back end, the node watcher may attempt to apply // the same back end multiple times - if err != nil && !strings.Contains(err.Error(), "file exists") { - return fmt.Errorf("error creating backend: %v", err) + if err != nil { + if !strings.Contains(err.Error(), "file exists") { + return fmt.Errorf("error creating backend: %v", err) + } + // file exists is fine, we will just return at this point + return nil } + log.Infof("Added backend for [%s:%d] on [%s:%d]", lb.addrString(), lb.Port, address, port) + return nil } -func (lb *IPVSLoadBalancer) RemoveBackend(address string) error { +func (lb *IPVSLoadBalancer) RemoveBackend(address string, port int) error { + ip, family := ipAndFamily(address) + if family != lb.loadBalancerService.Family { + return nil + } + dst := ipvs.Destination{ - Address: ipvs.NewIP(net.ParseIP(address)), - Port: 6443, - Family: ipvs.INET, + Address: ip, + Port: uint16(port), + Family: family, Weight: 1, } err := lb.client.RemoveDestination(lb.loadBalancerService, dst) @@ -118,3 +176,15 @@ func (lb *IPVSLoadBalancer) RemoveBackend(address string) error { } return nil } + +func (lb *IPVSLoadBalancer) addrString() string { + return lb.loadBalancerService.Address.Net(lb.loadBalancerService.Family).String() +} + +func ipAndFamily(address string) (ipvs.IP, ipvs.AddressFamily) { + ipAddr := net.ParseIP(address) + if ipAddr.To4() == nil { + return ipvs.NewIP(ipAddr), ipvs.INET6 + } + return ipvs.NewIP(ipAddr), ipvs.INET +} diff --git a/pkg/manager/cluster.go b/pkg/manager/cluster.go new file mode 100644 index 00000000..da2222de --- /dev/null +++ b/pkg/manager/cluster.go @@ -0,0 +1,29 @@ +package manager + +import ( + "github.com/pkg/errors" + + "github.com/kube-vip/kube-vip/pkg/cluster" + "github.com/kube-vip/kube-vip/pkg/etcd" +) + +func initClusterManager(sm *Manager) (*cluster.Manager, error) { + m := &cluster.Manager{ + SignalChan: sm.signalChan, + } + + switch sm.config.LeaderElectionType { + case "kubernetes", "": + m.KubernetesClient = sm.clientSet + case "etcd": + client, err := etcd.NewClient(sm.config) + if err != nil { + return nil, err + } + m.EtcdClient = client + default: + return nil, errors.Errorf("invalid LeaderElectionMode %s not supported", sm.config.LeaderElectionType) + } + + return m, nil +} diff --git a/pkg/manager/instance.go b/pkg/manager/instance.go new file mode 100644 index 00000000..004f6992 --- /dev/null +++ b/pkg/manager/instance.go @@ -0,0 +1,202 @@ +package manager + +import ( + "fmt" + "net" + + log "github.com/sirupsen/logrus" + "github.com/vishvananda/netlink" + v1 "k8s.io/api/core/v1" + + "github.com/kube-vip/kube-vip/pkg/cluster" + "github.com/kube-vip/kube-vip/pkg/kubevip" + "github.com/kube-vip/kube-vip/pkg/vip" +) + +// Instance defines an instance of everything needed to manage a vip +type Instance struct { + // Virtual IP / Load Balancer configuration + vipConfig *kubevip.Config + + // cluster instance + cluster *cluster.Cluster + + // Service uses DHCP + isDHCP bool + dhcpInterface string + dhcpInterfaceHwaddr string + dhcpInterfaceIP string + dhcpHostname string + dhcpClient *vip.DHCPClient + + // Kubernetes service mapping + Vip string + Port int32 + UID string + Type string + + serviceSnapshot *v1.Service +} + +func NewInstance(svc *v1.Service, config *kubevip.Config) (*Instance, error) { + instanceAddress := fetchServiceAddress(svc) + instanceUID := string(svc.UID) + + // Detect if we're using a specific interface for services + var serviceInterface string + if config.ServicesInterface != "" { + serviceInterface = config.ServicesInterface + } else { + serviceInterface = config.Interface + } + + // Generate new Virtual IP configuration + newVip := &kubevip.Config{ + VIP: instanceAddress, // TODO support more than one vip? + Interface: serviceInterface, + SingleNode: true, + EnableARP: config.EnableARP, + EnableBGP: config.EnableBGP, + VIPCIDR: config.VIPCIDR, + VIPSubnet: config.VIPSubnet, + EnableRoutingTable: config.EnableRoutingTable, + RoutingTableID: config.RoutingTableID, + RoutingTableType: config.RoutingTableType, + ArpBroadcastRate: config.ArpBroadcastRate, + EnableServiceSecurity: config.EnableServiceSecurity, + } + + // Create new service + instance := &Instance{ + UID: instanceUID, + Vip: instanceAddress, + serviceSnapshot: svc, + } + if len(svc.Spec.Ports) > 0 { + instance.Type = string(svc.Spec.Ports[0].Protocol) + instance.Port = svc.Spec.Ports[0].Port + } + + if svc.Annotations != nil { + instance.dhcpInterfaceHwaddr = svc.Annotations[hwAddrKey] + instance.dhcpInterfaceIP = svc.Annotations[requestedIP] + instance.dhcpHostname = svc.Annotations[loadbalancerHostname] + } + + // Generate Load Balancer config + newLB := kubevip.LoadBalancer{ + Name: fmt.Sprintf("%s-load-balancer", svc.Name), + Port: int(instance.Port), + Type: instance.Type, + BindToVip: true, + } + // Add Load Balancer Configuration + newVip.LoadBalancers = append(newVip.LoadBalancers, newLB) + // Create Add configuration to the new service + instance.vipConfig = newVip + + // If this was purposely created with the address 0.0.0.0, + // we will create a macvlan on the main interface and a DHCP client + if instanceAddress == "0.0.0.0" { + err := instance.startDHCP() + if err != nil { + return nil, err + } + select { + case err := <-instance.dhcpClient.ErrorChannel(): + return nil, fmt.Errorf("error starting DHCP for %s/%s: error: %s", + instance.serviceSnapshot.Namespace, instance.serviceSnapshot.Name, err) + case ip := <-instance.dhcpClient.IPChannel(): + instance.vipConfig.VIP = ip + instance.dhcpInterfaceIP = ip + } + } + + c, err := cluster.InitCluster(instance.vipConfig, false) + if err != nil { + log.Errorf("Failed to add Service %s/%s", svc.Namespace, svc.Name) + return nil, err + } + c.Network.SetServicePorts(svc) + instance.cluster = c + + return instance, nil +} + +func (i *Instance) startDHCP() error { + parent, err := netlink.LinkByName(i.vipConfig.Interface) + if err != nil { + return fmt.Errorf("error finding VIP Interface, for building DHCP Link : %v", err) + } + + // Generate name from UID + interfaceName := fmt.Sprintf("vip-%s", i.UID[0:8]) + + // Check if the interface doesn't exist first + iface, err := net.InterfaceByName(interfaceName) + if err != nil { + log.Infof("Creating new macvlan interface for DHCP [%s]", interfaceName) + + hwaddr, err := net.ParseMAC(i.dhcpInterfaceHwaddr) + if i.dhcpInterfaceHwaddr != "" && err != nil { + return err + } else if hwaddr == nil { + hwaddr, err = net.ParseMAC(vip.GenerateMac()) + if err != nil { + return err + } + } + + log.Infof("New interface [%s] mac is %s", interfaceName, hwaddr) + mac := &netlink.Macvlan{ + LinkAttrs: netlink.LinkAttrs{ + Name: interfaceName, + ParentIndex: parent.Attrs().Index, + HardwareAddr: hwaddr, + }, + Mode: netlink.MACVLAN_MODE_DEFAULT, + } + + err = netlink.LinkAdd(mac) + if err != nil { + return fmt.Errorf("could not add %s: %v", interfaceName, err) + } + + err = netlink.LinkSetUp(mac) + if err != nil { + return fmt.Errorf("could not bring up interface [%s] : %v", interfaceName, err) + } + + iface, err = net.InterfaceByName(interfaceName) + if err != nil { + return fmt.Errorf("error finding new DHCP interface by name [%v]", err) + } + } else { + log.Infof("Using existing macvlan interface for DHCP [%s]", interfaceName) + } + + var initRebootFlag bool + if i.dhcpInterfaceIP != "" { + initRebootFlag = true + } + + client := vip.NewDHCPClient(iface, initRebootFlag, i.dhcpInterfaceIP) + + // Add hostname to dhcp client if annotated + if i.dhcpHostname != "" { + log.Infof("Hostname specified for dhcp lease: [%s] - [%s]", interfaceName, i.dhcpHostname) + client.WithHostName(i.dhcpHostname) + } + + go client.Start() + + // Set that DHCP is enabled + i.isDHCP = true + // Set the name of the interface so that it can be removed on Service deletion + i.dhcpInterface = interfaceName + i.dhcpInterfaceHwaddr = iface.HardwareAddr.String() + // Add the client so that we can call it to stop function + i.dhcpClient = client + + return nil +} diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index 8980f68c..fdec7d73 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -2,20 +2,18 @@ package manager import ( "fmt" - "io/ioutil" "os" "os/signal" "path/filepath" "strings" + "sync" "syscall" "github.com/kube-vip/kube-vip/pkg/k8s" "github.com/kamhlos/upnp" "github.com/kube-vip/kube-vip/pkg/bgp" - "github.com/kube-vip/kube-vip/pkg/cluster" "github.com/kube-vip/kube-vip/pkg/kubevip" - "github.com/kube-vip/kube-vip/pkg/vip" "github.com/prometheus/client_golang/prometheus" log "github.com/sirupsen/logrus" "k8s.io/client-go/kubernetes" @@ -33,7 +31,7 @@ type Manager struct { // service bool // Keeps track of all running instances - serviceInstances []Instance + serviceInstances []*Instance // Additional functionality upnp *upnp.Upnp @@ -41,36 +39,22 @@ type Manager struct { //BGP Manager, this is a singleton that manages all BGP advertisements bgpServer *bgp.Server - // This channel is used to signal a shutdown + // This channel is used to catch an OS signal and trigger a shutdown signalChan chan os.Signal + // This channel is used to signal a shutdown + shutdownChan chan struct{} + // This is a prometheus counter used to count the number of events received // from the service watcher countServiceWatchEvent *prometheus.CounterVec -} - -// Instance defines an instance of everything needed to manage a vip -type Instance struct { - // Virtual IP / Load Balancer configuration - vipConfig kubevip.Config - - // cluster instance - cluster cluster.Cluster - - // Service uses DHCP - isDHCP bool - dhcpInterface string - dhcpInterfaceHwaddr string - dhcpInterfaceIP string - dhcpClient *vip.DHCPClient - // Kubernetes service mapping - Vip string - Port int32 - UID string - Type string + // This is a prometheus gauge indicating the state of the sessions. + // 1 means "ESTABLISHED", 0 means "NOT ESTABLISHED" + bgpSessionInfoGauge *prometheus.GaugeVec - ServiceName string + // This mutex is to protect calls from various goroutines + mutex sync.Mutex } // New will create a new managing object @@ -83,9 +67,11 @@ func New(configMap string, config *kubevip.Config) (*Manager, error) { homeConfigPath := filepath.Join(os.Getenv("HOME"), ".kube", "config") switch { + case config.LeaderElectionType == "etcd": + // Do nothing, we don't construct a k8s client for etcd leader election case fileExists(adminConfigPath): - if config.EnableControlPane { - // If this is a control pane host it will likely have started as a static pod or won't have the + if config.EnableControlPlane { + // If this is a control plane host it will likely have started as a static pod or won't have the // VIP up before trying to connect to the API server, we set the API endpoint to this machine to // ensure connectivity. clientset, err = k8s.NewClientset(adminConfigPath, false, fmt.Sprintf("kubernetes:%v", config.Port)) @@ -120,6 +106,12 @@ func New(configMap string, config *kubevip.Config) (*Manager, error) { Name: "all_services_events", Help: "Count all events fired by the service watcher categorised by event type", }, []string{"type"}), + bgpSessionInfoGauge: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "kube_vip", + Subsystem: "manager", + Name: "bgp_session_info", + Help: "Display state of session by setting metric for label value with current state to 1", + }, []string{"state", "peer"}), }, nil } @@ -136,9 +128,8 @@ func (sm *Manager) Start() error { // Add Notification for SIGTERM (sent from Kubernetes) signal.Notify(sm.signalChan, syscall.SIGTERM) - // Add Notification for SIGKILL (sent from Kubernetes) - //nolint - signal.Notify(sm.signalChan, syscall.SIGKILL) + // All watchers and other goroutines should have an additional goroutine that blocks on this, to shut things down + sm.shutdownChan = make(chan struct{}) // If BGP is enabled then we start a server instance that will broadcast VIPs if sm.config.EnableBGP { @@ -150,23 +141,31 @@ func (sm *Manager) Start() error { } log.Infoln("Starting Kube-vip Manager with the BGP engine") - log.Infof("Namespace [%s], Hybrid mode [%t]", sm.config.Namespace, sm.config.EnableControlPane && sm.config.EnableServices) return sm.startBGP() } // If ARP is enabled then we start a LeaderElection that will use ARP to advertise VIPs if sm.config.EnableARP { log.Infoln("Starting Kube-vip Manager with the ARP engine") - log.Infof("Namespace [%s], Hybrid mode [%t]", sm.config.Namespace, sm.config.EnableControlPane && sm.config.EnableServices) return sm.startARP() } - log.Infoln("Prematurely exiting Load-balancer as neither Layer2 or Layer3 is enabled") + if sm.config.EnableWireguard { + log.Infoln("Starting Kube-vip Manager with the Wireguard engine") + return sm.startWireguard() + } + + if sm.config.EnableRoutingTable { + log.Infoln("Starting Kube-vip Manager with the Routing Table engine") + return sm.startTableMode() + } + + log.Errorln("prematurely exiting Load-balancer as no modes [ARP/BGP/Wireguard] are enabled") return nil } func returnNameSpace() (string, error) { - if data, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace"); err == nil { + if data, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace"); err == nil { if ns := strings.TrimSpace(string(data)); len(ns) > 0 { return ns, nil } diff --git a/pkg/manager/manager_arp.go b/pkg/manager/manager_arp.go index b5558837..1860589c 100644 --- a/pkg/manager/manager_arp.go +++ b/pkg/manager/manager_arp.go @@ -8,11 +8,13 @@ import ( "time" "github.com/kamhlos/upnp" - "github.com/kube-vip/kube-vip/pkg/cluster" log "github.com/sirupsen/logrus" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/leaderelection" "k8s.io/client-go/tools/leaderelection/resourcelock" + + "github.com/kube-vip/kube-vip/pkg/cluster" + "github.com/kube-vip/kube-vip/pkg/vip" ) // Start will begin the Manager, which will start services and watch the configmap @@ -29,29 +31,31 @@ func (sm *Manager) startARP() error { // Shutdown function that will wait on this signal, unless we call it ourselves go func() { <-sm.signalChan - log.Info("Received termination, signaling shutdown") - if sm.config.EnableControlPane { + log.Info("Received kube-vip termination, signaling shutdown") + if sm.config.EnableControlPlane { cpCluster.Stop() } + // Close all go routines + close(sm.shutdownChan) // Cancel the context, which will in turn cancel the leadership cancel() }() - if sm.config.EnableControlPane { + if sm.config.EnableControlPlane { cpCluster, err = cluster.InitCluster(sm.config, false) if err != nil { return err } - clusterManager := &cluster.Manager{ - KubernetesClient: sm.clientSet, - SignalChan: sm.signalChan, + clusterManager, err := initClusterManager(sm) + if err != nil { + return err } go func() { err := cpCluster.StartCluster(sm.config, clusterManager, nil) if err != nil { - log.Errorf("Control Pane Error [%v]", err) + log.Errorf("Control Plane Error [%v]", err) // Trigger the shutdown of this manager instance sm.signalChan <- syscall.SIGINT @@ -68,9 +72,11 @@ func (sm *Manager) startARP() error { ns = sm.config.Namespace } else { + ns, err = returnNameSpace() if err != nil { - return err + log.Warnf("unable to auto-detect namespace, dropping to [%s]", sm.config.Namespace) + ns = sm.config.Namespace } } @@ -94,59 +100,87 @@ func (sm *Manager) startARP() error { } } - log.Infof("Beginning cluster membership, namespace [%s], lock name [%s], id [%s]", ns, plunderLock, id) - // we use the Lease lock type since edits to Leases are less common - // and fewer objects in the cluster watch "all Leases". - lock := &resourcelock.LeaseLock{ - LeaseMeta: metav1.ObjectMeta{ - Name: plunderLock, - Namespace: ns, - }, - Client: sm.clientSet.CoordinationV1(), - LockConfig: resourcelock.ResourceLockConfig{ - Identity: id, - }, + // This will tidy any dangling kube-vip iptables rules + if os.Getenv("EGRESS_CLEAN") != "" { + i, err := vip.CreateIptablesClient(sm.config.EgressWithNftables, sm.config.ServiceNamespace) + if err != nil { + log.Warnf("(egress) Unable to clean any dangling egress rules [%v]", err) + log.Warn("(egress) Can be ignored in non iptables release of kube-vip") + } else { + log.Info("(egress) Cleaning any dangling kube-vip egress rules") + cleanErr := i.CleanIPtables() + if cleanErr != nil { + log.Errorf("Error cleaning rules [%v]", cleanErr) + } + } } - // start the leader election code loop - leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{ - Lock: lock, - // IMPORTANT: you MUST ensure that any code you have that - // is protected by the lease must terminate **before** - // you call cancel. Otherwise, you could have a background - // loop still running and another process could - // get elected before your background loop finished, violating - // the stated goal of the lease. - ReleaseOnCancel: true, - LeaseDuration: 10 * time.Second, - RenewDeadline: 5 * time.Second, - RetryPeriod: 1 * time.Second, - Callbacks: leaderelection.LeaderCallbacks{ - OnStartedLeading: func(ctx context.Context) { - err = sm.servicesWatcher(ctx) - if err != nil { - log.Error(err) - } - }, - OnStoppedLeading: func() { - // we can do cleanup here - log.Infof("leader lost: %s", id) - for x := range sm.serviceInstances { - sm.serviceInstances[x].cluster.Stop() - } - - log.Fatal("lost leadership, restarting kube-vip") + // Start a services watcher (all kube-vip pods will watch services), upon a new service + // a lock based upon that service is created that they will all leaderElection on + if sm.config.EnableServicesElection { + log.Infof("beginning watching services, leaderelection will happen for every service") + err = sm.startServicesWatchForLeaderElection(ctx) + if err != nil { + return err + } + } else { + + log.Infof("beginning services leadership, namespace [%s], lock name [%s], id [%s]", ns, sm.config.ServicesLeaseName, id) + // we use the Lease lock type since edits to Leases are less common + // and fewer objects in the cluster watch "all Leases". + lock := &resourcelock.LeaseLock{ + LeaseMeta: metav1.ObjectMeta{ + Name: sm.config.ServicesLeaseName, + Namespace: ns, }, - OnNewLeader: func(identity string) { - // we're notified when new leader elected - if identity == id { - // I just got the lock - return - } - log.Infof("new leader elected: %s", identity) + Client: sm.clientSet.CoordinationV1(), + LockConfig: resourcelock.ResourceLockConfig{ + Identity: id, }, - }, - }) + } + // start the leader election code loop + leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{ + Lock: lock, + // IMPORTANT: you MUST ensure that any code you have that + // is protected by the lease must terminate **before** + // you call cancel. Otherwise, you could have a background + // loop still running and another process could + // get elected before your background loop finished, violating + // the stated goal of the lease. + ReleaseOnCancel: true, + LeaseDuration: time.Duration(sm.config.LeaseDuration) * time.Second, + RenewDeadline: time.Duration(sm.config.RenewDeadline) * time.Second, + RetryPeriod: time.Duration(sm.config.RetryPeriod) * time.Second, + Callbacks: leaderelection.LeaderCallbacks{ + OnStartedLeading: func(ctx context.Context) { + err = sm.servicesWatcher(ctx, sm.syncServices) + if err != nil { + log.Error(err) + } + }, + OnStoppedLeading: func() { + // we can do cleanup here + log.Infof("leader lost: %s", id) + for x := range sm.serviceInstances { + sm.serviceInstances[x].cluster.Stop() + } + + log.Fatal("lost leadership, restarting kube-vip") + }, + OnNewLeader: func(identity string) { + // we're notified when new leader elected + if sm.config.EnableNodeLabeling { + applyNodeLabel(sm.clientSet, sm.config.Address, id, identity) + } + if identity == id { + // I just got the lock + return + } + log.Infof("new leader elected: %s", identity) + }, + }, + }) + } return nil } diff --git a/pkg/manager/manager_bgp.go b/pkg/manager/manager_bgp.go index 2c2522a5..edc2d8db 100644 --- a/pkg/manager/manager_bgp.go +++ b/pkg/manager/manager_bgp.go @@ -2,27 +2,30 @@ package manager import ( "context" + "fmt" "os" "syscall" "github.com/kube-vip/kube-vip/pkg/bgp" "github.com/kube-vip/kube-vip/pkg/cluster" - "github.com/kube-vip/kube-vip/pkg/packet" + "github.com/kube-vip/kube-vip/pkg/equinixmetal" + api "github.com/osrg/gobgp/v3/api" "github.com/packethost/packngo" + "github.com/prometheus/client_golang/prometheus" log "github.com/sirupsen/logrus" ) // Start will begin the Manager, which will start services and watch the configmap func (sm *Manager) startBGP() error { var cpCluster *cluster.Cluster - //var ns string + // var ns string var err error - // If Packet is enabled then we can begin our preparation work + // If Equinix Metal is enabled then we can begin our preparation work var packetClient *packngo.Client if sm.config.EnableMetal { if sm.config.ProviderConfig != "" { - key, project, err := packet.GetPacketConfig(sm.config.ProviderConfig) + key, project, err := equinixmetal.GetPacketConfig(sm.config.ProviderConfig) if err != nil { log.Error(err) } else { @@ -37,10 +40,10 @@ func (sm *Manager) startBGP() error { log.Error(err) } - // We're using Packet with BGP, popuplate the Peer information from the API + // We're using Equinix Metal with BGP, populate the Peer information from the API if sm.config.EnableBGP { - log.Infoln("Looking up the BGP configuration from packet") - err = packet.BGPLookup(packetClient, sm.config) + log.Infoln("Looking up the BGP configuration from Equinix Metal") + err = equinixmetal.BGPLookup(packetClient, sm.config) if err != nil { log.Error(err) } @@ -48,10 +51,27 @@ func (sm *Manager) startBGP() error { } log.Info("Starting the BGP server to advertise VIP routes to BGP peers") - sm.bgpServer, err = bgp.NewBGPServer(&sm.config.BGPConfig) + sm.bgpServer, err = bgp.NewBGPServer(&sm.config.BGPConfig, func(p *api.WatchEventResponse_PeerEvent) { + ipaddr := p.GetPeer().GetState().GetNeighborAddress() + port := uint64(179) + peerDescription := fmt.Sprintf("%s:%d", ipaddr, port) + + for stateName, stateValue := range api.PeerState_SessionState_value { + metricValue := 0.0 + if stateValue == int32(p.GetPeer().GetState().GetSessionState().Number()) { + metricValue = 1 + } + + sm.bgpSessionInfoGauge.With(prometheus.Labels{ + "state": stateName, + "peer": peerDescription, + }).Set(metricValue) + } + }) if err != nil { return err } + // use a Go context so we can tell the leaderelection code when we // want to step down ctx, cancel := context.WithCancel(context.Background()) @@ -68,7 +88,7 @@ func (sm *Manager) startBGP() error { go func() { <-sm.signalChan log.Info("Received termination, signaling shutdown") - if sm.config.EnableControlPane { + if sm.config.EnableControlPlane { if cpCluster != nil { cpCluster.Stop() } @@ -77,22 +97,25 @@ func (sm *Manager) startBGP() error { cancel() }() - if sm.config.EnableControlPane { - + if sm.config.EnableControlPlane { cpCluster, err = cluster.InitCluster(sm.config, false) if err != nil { return err } - clusterManager := &cluster.Manager{ - KubernetesClient: sm.clientSet, - SignalChan: sm.signalChan, + clusterManager, err := initClusterManager(sm) + if err != nil { + return err } go func() { - err = cpCluster.StartVipService(sm.config, clusterManager, sm.bgpServer, packetClient) + if sm.config.EnableLeaderElection { + err = cpCluster.StartCluster(sm.config, clusterManager, sm.bgpServer) + } else { + err = cpCluster.StartVipService(sm.config, clusterManager, sm.bgpServer, packetClient) + } if err != nil { - log.Errorf("Control Pane Error [%v]", err) + log.Errorf("Control Plane Error [%v]", err) // Trigger the shutdown of this manager instance sm.signalChan <- syscall.SIGINT } @@ -107,7 +130,7 @@ func (sm *Manager) startBGP() error { } } - err = sm.servicesWatcher(ctx) + err = sm.servicesWatcher(ctx, sm.syncServices) if err != nil { return err } diff --git a/pkg/manager/manager_table.go b/pkg/manager/manager_table.go new file mode 100644 index 00000000..eb16b11f --- /dev/null +++ b/pkg/manager/manager_table.go @@ -0,0 +1,110 @@ +package manager + +import ( + "context" + "os" + "time" + + log "github.com/sirupsen/logrus" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/leaderelection" + "k8s.io/client-go/tools/leaderelection/resourcelock" +) + +// Start will begin the Manager, which will start services and watch the configmap +func (sm *Manager) startTableMode() error { + var ns string + var err error + + id, err := os.Hostname() + if err != nil { + return err + } + + // use a Go context so we can tell the leaderelection code when we + // want to step down + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + log.Infof("all routing table entries will exist in table [%d]", sm.config.RoutingTableID) + + // Shutdown function that will wait on this signal, unless we call it ourselves + go func() { + <-sm.signalChan + log.Info("Received termination, signaling shutdown") + + // Cancel the context, which will in turn cancel the leadership + cancel() + }() + + ns, err = returnNameSpace() + if err != nil { + log.Warnf("unable to auto-detect namespace, dropping to [%s]", sm.config.Namespace) + ns = sm.config.Namespace + } + + // Start a services watcher (all kube-vip pods will watch services), upon a new service + // a lock based upon that service is created that they will all leaderElection on + if sm.config.EnableServicesElection { + log.Infof("beginning watching services, leaderelection will happen for every service") + err = sm.startServicesWatchForLeaderElection(ctx) + if err != nil { + return err + } + } else { + + log.Infof("beginning services leadership, namespace [%s], lock name [%s], id [%s]", ns, plunderLock, id) + // we use the Lease lock type since edits to Leases are less common + // and fewer objects in the cluster watch "all Leases". + lock := &resourcelock.LeaseLock{ + LeaseMeta: metav1.ObjectMeta{ + Name: plunderLock, + Namespace: ns, + }, + Client: sm.clientSet.CoordinationV1(), + LockConfig: resourcelock.ResourceLockConfig{ + Identity: id, + }, + } + + // start the leader election code loop + leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{ + Lock: lock, + // IMPORTANT: you MUST ensure that any code you have that + // is protected by the lease must terminate **before** + // you call cancel. Otherwise, you could have a background + // loop still running and another process could + // get elected before your background loop finished, violating + // the stated goal of the lease. + ReleaseOnCancel: true, + LeaseDuration: time.Duration(sm.config.LeaseDuration) * time.Second, + RenewDeadline: time.Duration(sm.config.RenewDeadline) * time.Second, + RetryPeriod: time.Duration(sm.config.RetryPeriod) * time.Second, + Callbacks: leaderelection.LeaderCallbacks{ + OnStartedLeading: func(ctx context.Context) { + err = sm.servicesWatcher(ctx, sm.syncServices) + if err != nil { + log.Error(err) + } + }, + OnStoppedLeading: func() { + // we can do cleanup here + log.Infof("leader lost: %s", id) + for x := range sm.serviceInstances { + sm.serviceInstances[x].cluster.Stop() + } + + log.Fatal("lost leadership, restarting kube-vip") + }, + OnNewLeader: func(identity string) { + // we're notified when new leader elected + if identity == id { + // I just got the lock + return + } + log.Infof("new leader elected: %s", identity) + }, + }, + }) + } + return nil +} diff --git a/pkg/manager/manager_wireguard.go b/pkg/manager/manager_wireguard.go new file mode 100644 index 00000000..a17c2c97 --- /dev/null +++ b/pkg/manager/manager_wireguard.go @@ -0,0 +1,142 @@ +package manager + +import ( + "context" + "os" + "strconv" + "time" + + "github.com/kamhlos/upnp" + "github.com/kube-vip/kube-vip/pkg/wireguard" + log "github.com/sirupsen/logrus" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/leaderelection" + "k8s.io/client-go/tools/leaderelection/resourcelock" +) + +// Start will begin the Manager, which will start services and watch the configmap +func (sm *Manager) startWireguard() error { + var ns string + var err error + + id, err := os.Hostname() + if err != nil { + return err + } + + // use a Go context so we can tell the leaderelection code when we + // want to step down + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + log.Infoln("reading wireguard peer configuration from Kubernetes secret") + s, err := sm.clientSet.CoreV1().Secrets(sm.config.Namespace).Get(ctx, "wireguard", metav1.GetOptions{}) + if err != nil { + return err + } + // parse all the details needed for Wireguard + peerPublicKey := s.Data["peerPublicKey"] + peerEndpoint := s.Data["peerEndpoint"] + privateKey := s.Data["privateKey"] + + // Configure the interface to join the Wireguard VPN + err = wireguard.ConfigureInterface(string(privateKey), string(peerPublicKey), string(peerEndpoint)) + if err != nil { + return err + } + + // Shutdown function that will wait on this signal, unless we call it ourselves + go func() { + <-sm.signalChan + log.Info("Received termination, signaling shutdown") + + // Cancel the context, which will in turn cancel the leadership + cancel() + }() + + ns, err = returnNameSpace() + if err != nil { + log.Warnf("unable to auto-detect namespace, dropping to [%s]", sm.config.Namespace) + ns = sm.config.Namespace + } + + // Before starting the leader Election enable any additional functionality + upnpEnabled, _ := strconv.ParseBool(os.Getenv("enableUPNP")) + + if upnpEnabled { + sm.upnp = new(upnp.Upnp) + err := sm.upnp.ExternalIPAddr() + if err != nil { + log.Errorf("Error Enabling UPNP %s", err.Error()) + // Set the struct to nil so nothing should use it in future + sm.upnp = nil + } else { + log.Infof("Successfully enabled UPNP, Gateway address [%s]", sm.upnp.GatewayOutsideIP) + } + } + + // Start a services watcher (all kube-vip pods will watch services), upon a new service + // a lock based upon that service is created that they will all leaderElection on + if sm.config.EnableServicesElection { + log.Infof("beginning watching services, leaderelection will happen for every service") + err = sm.startServicesWatchForLeaderElection(ctx) + if err != nil { + return err + } + } else { + + log.Infof("beginning services leadership, namespace [%s], lock name [%s], id [%s]", ns, plunderLock, id) + // we use the Lease lock type since edits to Leases are less common + // and fewer objects in the cluster watch "all Leases". + lock := &resourcelock.LeaseLock{ + LeaseMeta: metav1.ObjectMeta{ + Name: plunderLock, + Namespace: ns, + }, + Client: sm.clientSet.CoordinationV1(), + LockConfig: resourcelock.ResourceLockConfig{ + Identity: id, + }, + } + + // start the leader election code loop + leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{ + Lock: lock, + // IMPORTANT: you MUST ensure that any code you have that + // is protected by the lease must terminate **before** + // you call cancel. Otherwise, you could have a background + // loop still running and another process could + // get elected before your background loop finished, violating + // the stated goal of the lease. + ReleaseOnCancel: true, + LeaseDuration: time.Duration(sm.config.LeaseDuration) * time.Second, + RenewDeadline: time.Duration(sm.config.RenewDeadline) * time.Second, + RetryPeriod: time.Duration(sm.config.RetryPeriod) * time.Second, + Callbacks: leaderelection.LeaderCallbacks{ + OnStartedLeading: func(ctx context.Context) { + err = sm.servicesWatcher(ctx, sm.syncServices) + if err != nil { + log.Error(err) + } + }, + OnStoppedLeading: func() { + // we can do cleanup here + log.Infof("leader lost: %s", id) + for x := range sm.serviceInstances { + sm.serviceInstances[x].cluster.Stop() + } + + log.Fatal("lost leadership, restarting kube-vip") + }, + OnNewLeader: func(identity string) { + // we're notified when new leader elected + if identity == id { + // I just got the lock + return + } + log.Infof("new leader elected: %s", identity) + }, + }, + }) + } + return nil +} diff --git a/pkg/manager/node_labeling.go b/pkg/manager/node_labeling.go new file mode 100644 index 00000000..610cdfc0 --- /dev/null +++ b/pkg/manager/node_labeling.go @@ -0,0 +1,73 @@ +package manager + +import ( + "context" + "encoding/json" + "fmt" + + log "github.com/sirupsen/logrus" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes" +) + +const ( + nodeLabelIndex = "kube-vip.io/has-ip" + nodeLabelJSONPath = `kube-vip.io~1has-ip` +) + +type patchStringLabel struct { + Op string `json:"op"` + Path string `json:"path"` + Value string `json:"value"` +} + +// applyNodeLabel add/remove node label `kube-vip.io/has-ip=` to/from +// the node where the virtual IP was added to/removed from. +func applyNodeLabel(clientSet *kubernetes.Clientset, address, id, identity string) { + ctx := context.Background() + node, err := clientSet.CoreV1().Nodes().Get(ctx, id, metav1.GetOptions{}) + if err != nil { + log.Errorf("can't query node %s labels. error: %v", id, err) + return + } + + log.Debugf("node %s labels: %+v", id, node.Labels) + + value, ok := node.Labels[nodeLabelIndex] + path := fmt.Sprintf("/metadata/labels/%s", nodeLabelJSONPath) + if (!ok || value != address) && id == identity { + log.Debugf("setting node label `has-ip=%s` on %s", address, id) + // Append label + applyPatchLabels(ctx, clientSet, id, "add", path, address) + } else if ok && value == address { + log.Debugf("removing node label `has-ip=%s` on %s", address, id) + // Remove label + applyPatchLabels(ctx, clientSet, id, "remove", path, address) + } else { + log.Debugf("no node label change needed") + } +} + +// applyPatchLabels add/remove node labels +func applyPatchLabels(ctx context.Context, clientSet *kubernetes.Clientset, + name, operation, path, value string) { + patchLabels := []patchStringLabel{{ + Op: operation, + Path: path, + Value: value, + }} + patchData, err := json.Marshal(patchLabels) + if err != nil { + log.Errorf("node patch marshaling failed. error: %v", err) + return + } + // patch node + node, err := clientSet.CoreV1().Nodes().Patch(ctx, + name, types.JSONPatchType, patchData, metav1.PatchOptions{}) + if err != nil { + log.Errorf("can't patch node %s. error: %v", name, err) + return + } + log.Debugf("updated node %s labels: %+v", name, node.Labels) +} diff --git a/pkg/manager/prom.go b/pkg/manager/prom.go index 3020484a..9683411a 100644 --- a/pkg/manager/prom.go +++ b/pkg/manager/prom.go @@ -4,5 +4,5 @@ import "github.com/prometheus/client_golang/prometheus" // PrometheusCollector defines a service watch event counter. func (sm *Manager) PrometheusCollector() []prometheus.Collector { - return []prometheus.Collector{sm.countServiceWatchEvent} + return []prometheus.Collector{sm.countServiceWatchEvent, sm.bgpSessionInfoGauge} } diff --git a/pkg/manager/service_egress.go b/pkg/manager/service_egress.go new file mode 100644 index 00000000..c0c0dfdb --- /dev/null +++ b/pkg/manager/service_egress.go @@ -0,0 +1,207 @@ +package manager + +import ( + "bufio" + "context" + "fmt" + "os" + "strings" + + "github.com/kube-vip/kube-vip/pkg/vip" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// DEBUG +const ( + defaultPodCIDR = "10.0.0.0/16" + defaultServiceCIDR = "10.96.0.0/12" +) + +func (sm *Manager) iptablesCheck() error { + file, err := os.Open("/proc/modules") + if err != nil { + return err + } + scanner := bufio.NewScanner(file) + scanner.Split(bufio.ScanLines) + var nat, filter, mangle bool + for scanner.Scan() { + line := strings.Fields(scanner.Text()) + switch line[0] { + case "iptable_filter": + filter = true + case "iptable_nat": + nat = true + case "iptable_mangle": + mangle = true + } + } + + if !filter || !nat || !mangle { + return fmt.Errorf("missing iptables modules -> nat [%t] -> filter [%t] mangle -> [%t]", nat, filter, mangle) + } + return nil +} + +func (sm *Manager) configureEgress(vipIP, podIP, destinationPorts, namespace string) error { + // serviceCIDR, podCIDR, err := sm.AutoDiscoverCIDRs() + // if err != nil { + // serviceCIDR = "10.96.0.0/12" + // podCIDR = "10.0.0.0/16" + // } + + var podCidr, serviceCidr string + + if sm.config.EgressPodCidr != "" { + podCidr = sm.config.EgressPodCidr + } else { + podCidr = defaultPodCIDR + } + + if sm.config.EgressServiceCidr != "" { + serviceCidr = sm.config.EgressServiceCidr + } else { + serviceCidr = defaultServiceCIDR + } + + i, err := vip.CreateIptablesClient(sm.config.EgressWithNftables, namespace) + if err != nil { + return fmt.Errorf("error Creating iptables client [%s]", err) + } + + // Check if the kube-vip mangle chain exists, if not create it + exists, err := i.CheckMangleChain(vip.MangleChainName) + if err != nil { + return fmt.Errorf("error checking for existence of mangle chain [%s], error [%s]", vip.MangleChainName, err) + } + if !exists { + err = i.CreateMangleChain(vip.MangleChainName) + if err != nil { + return fmt.Errorf("error creating mangle chain [%s], error [%s]", vip.MangleChainName, err) + } + } + err = i.AppendReturnRulesForDestinationSubnet(vip.MangleChainName, podCidr) + if err != nil { + return fmt.Errorf("error adding rules to mangle chain [%s], error [%s]", vip.MangleChainName, err) + } + err = i.AppendReturnRulesForDestinationSubnet(vip.MangleChainName, serviceCidr) + if err != nil { + return fmt.Errorf("error adding rules to mangle chain [%s], error [%s]", vip.MangleChainName, err) + } + err = i.AppendReturnRulesForMarking(vip.MangleChainName, podIP+"/32") + if err != nil { + return fmt.Errorf("error adding marking rules to mangle chain [%s], error [%s]", vip.MangleChainName, err) + } + + err = i.InsertMangeTableIntoPrerouting(vip.MangleChainName) + if err != nil { + return fmt.Errorf("error adding prerouting mangle chain [%s], error [%s]", vip.MangleChainName, err) + } + + if destinationPorts != "" { + + fixedPorts := strings.Split(destinationPorts, ",") + + for _, fixedPort := range fixedPorts { + var proto, port string + + data := strings.Split(fixedPort, ":") + if len(data) == 0 { + continue + } else if len(data) == 1 { + proto = "tcp" + port = data[0] + } else { + proto = data[0] + port = data[1] + } + + err = i.InsertSourceNatForDestinationPort(vipIP, podIP, port, proto) + if err != nil { + return fmt.Errorf("error adding snat rules to nat chain [%s], error [%s]", vip.MangleChainName, err) + } + + } + } else { + err = i.InsertSourceNat(vipIP, podIP) + if err != nil { + return fmt.Errorf("error adding snat rules to nat chain [%s], error [%s]", vip.MangleChainName, err) + } + } + //_ = i.DumpChain(vip.MangleChainName) + err = vip.DeleteExistingSessions(podIP, false) + if err != nil { + return err + } + + return nil +} + +func (sm *Manager) AutoDiscoverCIDRs() (serviceCIDR, podCIDR string, err error) { + pod, err := sm.clientSet.CoreV1().Pods("kube-system").Get(context.TODO(), "kube-controller-manager", v1.GetOptions{}) + if err != nil { + return "", "", err + } + for flags := range pod.Spec.Containers[0].Command { + if strings.Contains(pod.Spec.Containers[0].Command[flags], "--cluster-cidr=") { + podCIDR = strings.ReplaceAll(pod.Spec.Containers[0].Command[flags], "--cluster-cidr=", "") + } + if strings.Contains(pod.Spec.Containers[0].Command[flags], "--service-cluster-ip-range=") { + serviceCIDR = strings.ReplaceAll(pod.Spec.Containers[0].Command[flags], "--service-cluster-ip-range=", "") + } + } + if podCIDR == "" || serviceCIDR == "" { + err = fmt.Errorf("unable to fully determine cluster CIDR configurations") + } + + return +} + +func (sm *Manager) TeardownEgress(podIP, vipIP, destinationPorts, namespace string) error { + i, err := vip.CreateIptablesClient(sm.config.EgressWithNftables, namespace) + if err != nil { + return fmt.Errorf("error Creating iptables client [%s]", err) + } + + // Remove the marking of egress packets + err = i.DeleteMangleMarking(podIP, vip.MangleChainName) + if err != nil { + return fmt.Errorf("error changing iptables rules for egress [%s]", err) + } + + // Clear up SNAT rules + if destinationPorts != "" { + fixedPorts := strings.Split(destinationPorts, ",") + + for _, fixedPort := range fixedPorts { + var proto, port string + + data := strings.Split(fixedPort, ":") + if len(data) == 0 { + continue + } else if len(data) == 1 { + proto = "tcp" + port = data[0] + } else { + proto = data[0] + port = data[1] + } + + err = i.DeleteSourceNatForDestinationPort(podIP, vipIP, port, proto) + if err != nil { + return fmt.Errorf("error changing iptables rules for egress [%s]", err) + } + + } + } else { + err = i.DeleteSourceNat(podIP, vipIP) + if err != nil { + return fmt.Errorf("error changing iptables rules for egress [%s]", err) + } + } + err = vip.DeleteExistingSessions(podIP, false) + if err != nil { + return fmt.Errorf("error changing iptables rules for egress [%s]", err) + } + return nil +} diff --git a/pkg/manager/services.go b/pkg/manager/services.go index 5e039428..f0d3dbfe 100644 --- a/pkg/manager/services.go +++ b/pkg/manager/services.go @@ -3,71 +3,228 @@ package manager import ( "context" "fmt" + "os" "strings" "sync" + "time" log "github.com/sirupsen/logrus" "github.com/vishvananda/netlink" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/util/retry" - "github.com/kube-vip/kube-vip/pkg/cluster" - "github.com/kube-vip/kube-vip/pkg/kubevip" + "github.com/kube-vip/kube-vip/pkg/vip" ) const ( - hwAddrKey = "kube-vip.io/hwaddr" - requestedIP = "kube-vip.io/requestedIP" + hwAddrKey = "kube-vip.io/hwaddr" + requestedIP = "kube-vip.io/requestedIP" + vipHost = "kube-vip.io/vipHost" + egress = "kube-vip.io/egress" + egressDestinationPorts = "kube-vip.io/egress-destination-ports" + egressSourcePorts = "kube-vip.io/egress-source-ports" + endpoint = "kube-vip.io/active-endpoint" + flushContrack = "kube-vip.io/flush-conntrack" + loadbalancerIPAnnotation = "kube-vip.io/loadbalancerIPs" + loadbalancerHostname = "kube-vip.io/loadbalancerHostname" ) -func (sm *Manager) stopService(uid string) error { - found := false +func (sm *Manager) syncServices(_ context.Context, svc *v1.Service, wg *sync.WaitGroup) error { + defer wg.Done() + + log.Debugf("[STARTING] Service Sync") + + // Iterate through the synchronising services + foundInstance := false + newServiceAddress := fetchServiceAddress(svc) + newServiceUID := string(svc.UID) + for x := range sm.serviceInstances { - if sm.serviceInstances[x].UID == uid { - found = true - sm.serviceInstances[x].cluster.Stop() + if sm.serviceInstances[x].UID == newServiceUID { + log.Debugf("isDHCP: %t, newServiceAddress: %s", sm.serviceInstances[x].isDHCP, newServiceAddress) + // If the found instance's DHCP configuration doesn't match the new service, delete it. + if (sm.serviceInstances[x].isDHCP && newServiceAddress != "0.0.0.0") || + (!sm.serviceInstances[x].isDHCP && newServiceAddress == "0.0.0.0") || + (!sm.serviceInstances[x].isDHCP && len(svc.Status.LoadBalancer.Ingress) > 0 && + newServiceAddress != svc.Status.LoadBalancer.Ingress[0].IP) || + (len(svc.Status.LoadBalancer.Ingress) > 0 && !comparePortsAndPortStatuses(svc)) || + (sm.serviceInstances[x].isDHCP && len(svc.Status.LoadBalancer.Ingress) > 0 && + sm.serviceInstances[x].dhcpInterfaceIP != svc.Status.LoadBalancer.Ingress[0].IP) { + if err := sm.deleteService(newServiceUID); err != nil { + return err + } + break + } + foundInstance = true } } - if !found { - return fmt.Errorf("unable to find/stop service [%s]", uid) + + // This instance wasn't found, we need to add it to the manager + if !foundInstance && newServiceAddress != "" { + if err := sm.addService(svc); err != nil { + return err + } + } + + return nil +} + +func comparePortsAndPortStatuses(svc *v1.Service) bool { + portsStatus := svc.Status.LoadBalancer.Ingress[0].Ports + if len(portsStatus) != len(svc.Spec.Ports) { + return false + } + for i, portSpec := range svc.Spec.Ports { + if portsStatus[i].Port != portSpec.Port || portsStatus[i].Protocol != portSpec.Protocol { + return false + } + } + return true +} + +func (sm *Manager) addService(svc *v1.Service) error { + startTime := time.Now() + + newService, err := NewInstance(svc, sm.config) + if err != nil { + return err + } + + log.Infof("(svcs) adding VIP [%s] for [%s/%s]", newService.Vip, newService.serviceSnapshot.Namespace, newService.serviceSnapshot.Name) + + newService.cluster.StartLoadBalancerService(newService.vipConfig, sm.bgpServer) + + sm.upnpMap(newService) + + if newService.isDHCP { + go func() { + for ip := range newService.dhcpClient.IPChannel() { + log.Debugf("IP %s may have changed", ip) + newService.vipConfig.VIP = ip + newService.dhcpInterfaceIP = ip + if err := sm.updateStatus(newService); err != nil { + log.Warnf("error updating svc: %s", err) + } + } + log.Debugf("IP update channel closed, stopping") + }() + } + + sm.serviceInstances = append(sm.serviceInstances, newService) + + if err := sm.updateStatus(newService); err != nil { + // delete service to collect garbage + if deleteErr := sm.deleteService(newService.UID); err != nil { + return deleteErr + } + return err + } + + serviceIP := fetchServiceAddress(svc) + + // Check if we need to flush any conntrack connections (due to some dangling conntrack connections) + if svc.Annotations[flushContrack] == "true" { + log.Debugf("Flushing conntrack rules for service [%s]", svc.Name) + err = vip.DeleteExistingSessions(serviceIP, false) + if err != nil { + log.Errorf("Error flushing any remaining egress connections [%s]", err) + } + err = vip.DeleteExistingSessions(serviceIP, true) + if err != nil { + log.Errorf("Error flushing any remaining ingress connections [%s]", err) + } + } + + // Check if egress is enabled on the service, if so we'll need to configure some rules + if svc.Annotations[egress] == "true" { + log.Debugf("Enabling egress for the service [%s]", svc.Name) + if svc.Annotations[endpoint] != "" { + // We will need to modify the iptables rules + err = sm.iptablesCheck() + if err != nil { + log.Errorf("Error configuring egress for loadbalancer [%s]", err) + } + err = sm.configureEgress(serviceIP, svc.Annotations[endpoint], svc.Annotations[egressDestinationPorts], svc.Namespace) + if err != nil { + log.Errorf("Error configuring egress for loadbalancer [%s]", err) + } else { + err = sm.updateServiceEndpointAnnotation(svc.Annotations[endpoint], svc) + if err != nil { + log.Errorf("Error configuring egress annotation for loadbalancer [%s]", err) + } + } + } } + finishTime := time.Since(startTime) + log.Infof("[service] synchronised in %dms", finishTime.Milliseconds()) + return nil } func (sm *Manager) deleteService(uid string) error { - var updatedInstances []Instance + // protect multiple calls + sm.mutex.Lock() + defer sm.mutex.Unlock() + + var updatedInstances []*Instance + var serviceInstance *Instance found := false for x := range sm.serviceInstances { + log.Debugf("Looking for [%s], found [%s]", uid, sm.serviceInstances[x].UID) // Add the running services to the new array if sm.serviceInstances[x].UID != uid { updatedInstances = append(updatedInstances, sm.serviceInstances[x]) } else { // Flip the found when we match found = true - if sm.serviceInstances[x].isDHCP { - sm.serviceInstances[x].dhcpClient.Stop() - macvlan, err := netlink.LinkByName(sm.serviceInstances[x].dhcpInterface) - if err != nil { - return fmt.Errorf("error finding VIP Interface: %v", err) - } + serviceInstance = sm.serviceInstances[x] + } + } + // If we've been through all services and not found the correct one then error + if !found { + // TODO: - fix UX + // return fmt.Errorf("unable to find/stop service [%s]", uid) + return nil + } + shared := false + for x := range updatedInstances { + if updatedInstances[x].Vip == serviceInstance.Vip { + shared = true + } + } + if !shared { + serviceInstance.cluster.Stop() + if serviceInstance.isDHCP { + serviceInstance.dhcpClient.Stop() + macvlan, err := netlink.LinkByName(serviceInstance.dhcpInterface) + if err != nil { + return fmt.Errorf("error finding VIP Interface: %v", err) + } + + err = netlink.LinkDel(macvlan) + if err != nil { + return fmt.Errorf("error deleting DHCP Link : %v", err) + } + } + if serviceInstance.vipConfig.EnableBGP { + cidrVip := fmt.Sprintf("%s/%s", serviceInstance.vipConfig.VIP, serviceInstance.vipConfig.VIPCIDR) + err := sm.bgpServer.DelHost(cidrVip) + return err + } + + // We will need to tear down the egress + if serviceInstance.serviceSnapshot.Annotations[egress] == "true" { + if serviceInstance.serviceSnapshot.Annotations[endpoint] != "" { - err = netlink.LinkDel(macvlan) + log.Infof("service [%s] has an egress re-write enabled", serviceInstance.serviceSnapshot.Name) + err := sm.TeardownEgress(serviceInstance.serviceSnapshot.Annotations[endpoint], serviceInstance.serviceSnapshot.Spec.LoadBalancerIP, serviceInstance.serviceSnapshot.Annotations[egressDestinationPorts], serviceInstance.serviceSnapshot.Namespace) if err != nil { - return fmt.Errorf("error deleting DHCP Link : %v", err) + log.Errorf("%v", err) } - - } - if sm.serviceInstances[x].vipConfig.EnableBGP { - cidrVip := fmt.Sprintf("%s/%s", sm.serviceInstances[x].vipConfig.VIP, sm.serviceInstances[x].vipConfig.VIPCIDR) - err := sm.bgpServer.DelHost(cidrVip) - return err } } } - // If we've been through all services and not found the correct one then error - if !found { - return fmt.Errorf("unable to find/stop service [%s]", uid) - } // Update the service array sm.serviceInstances = updatedInstances @@ -77,123 +234,88 @@ func (sm *Manager) deleteService(uid string) error { return nil } -func (sm *Manager) syncServices(service *v1.Service, wg *sync.WaitGroup) error { - defer wg.Done() - - log.Debugf("[STARTING] Service Sync") - // Iterate through the synchronising services - foundInstance := false - newServiceAddress := service.Spec.LoadBalancerIP - newServiceUID := string(service.UID) - - for x := range sm.serviceInstances { - if sm.serviceInstances[x].UID == newServiceUID { - // We have found this instance in the manager, we can determine if it needs updating - foundInstance = true +func (sm *Manager) upnpMap(s *Instance) { + // If upnp is enabled then update the gateway/router with the address + // TODO - work out if we need to mapping.Reclaim() + if sm.upnp != nil { + log.Infof("[UPNP] Adding map to [%s:%d - %s]", s.Vip, s.Port, s.serviceSnapshot.Name) + if err := sm.upnp.AddPortMapping(int(s.Port), int(s.Port), 0, s.Vip, strings.ToUpper(s.Type), s.serviceSnapshot.Name); err == nil { + log.Infof("service should be accessible externally on port [%d]", s.Port) + } else { + sm.upnp.Reclaim() + log.Errorf("unable to map port to gateway [%s]", err.Error()) } - - } - - // Detect if we're using a specific interface for services - var serviceInterface string - if sm.config.ServicesInterface != "" { - serviceInterface = sm.config.ServicesInterface - } else { - serviceInterface = sm.config.Interface - } - - // Generate new Virtual IP configuration - newVip := kubevip.Config{ - VIP: newServiceAddress, //TODO support more than one vip? - Interface: serviceInterface, - SingleNode: true, - EnableARP: sm.config.EnableARP, - EnableBGP: sm.config.EnableBGP, - VIPCIDR: sm.config.VIPCIDR, } +} - // This instance wasn't found, we need to add it to the manager - if !foundInstance { - // Create new service - var newService Instance - newService.UID = newServiceUID - newService.Vip = newServiceAddress - newService.Type = string(service.Spec.Ports[0].Protocol) //TODO - support multiple port types - newService.Port = service.Spec.Ports[0].Port - newService.ServiceName = service.Name - newService.dhcpInterfaceHwaddr = service.Annotations[hwAddrKey] - newService.dhcpInterfaceIP = service.Annotations[requestedIP] - - // If this was purposely created with the address 0.0.0.0 then we will create a macvlan on the main interface and try DHCP - if newServiceAddress == "0.0.0.0" { - err := sm.createDHCPService(newServiceUID, &newVip, &newService, service) - if err != nil { - return err - } - return nil +func (sm *Manager) updateStatus(i *Instance) error { + retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { + // Retrieve the latest version of Deployment before attempting update + // RetryOnConflict uses exponential backoff to avoid exhausting the apiserver + currentService, err := sm.clientSet.CoreV1().Services(i.serviceSnapshot.Namespace).Get(context.TODO(), i.serviceSnapshot.Name, metav1.GetOptions{}) + if err != nil { + return err } - log.Infof("New VIP [%s] for [%s/%s] ", newService.Vip, newService.ServiceName, newService.UID) - - // Generate Load Balancer config - newLB := kubevip.LoadBalancer{ - Name: fmt.Sprintf("%s-load-balancer", newService.ServiceName), - Port: int(newService.Port), - Type: newService.Type, - BindToVip: true, + id, err := os.Hostname() + if err != nil { + return err } - // Add Load Balancer Configuration - newVip.LoadBalancers = append(newVip.LoadBalancers, newLB) + currentServiceCopy := currentService.DeepCopy() + if currentServiceCopy.Annotations == nil { + currentServiceCopy.Annotations = make(map[string]string) + } - // Create Add configuration to the new service - newService.vipConfig = newVip + // If we're using ARP then we can only broadcast the VIP from one place, add an annotation to the service + if sm.config.EnableARP { + // Add the current host + currentServiceCopy.Annotations[vipHost] = id + } + if i.dhcpInterfaceHwaddr != "" || i.dhcpInterfaceIP != "" { + currentServiceCopy.Annotations[hwAddrKey] = i.dhcpInterfaceHwaddr + currentServiceCopy.Annotations[requestedIP] = i.dhcpInterfaceIP + } - // TODO - start VIP - c, err := cluster.InitCluster(&newService.vipConfig, false) + updatedService, err := sm.clientSet.CoreV1().Services(currentService.Namespace).Update(context.TODO(), currentServiceCopy, metav1.UpdateOptions{}) if err != nil { - log.Errorf("Failed to add Service [%s] / [%s]", newService.ServiceName, newService.UID) + log.Errorf("Error updating Service Spec [%s] : %v", i.serviceSnapshot.Name, err) return err } - err = c.StartLoadBalancerService(&newService.vipConfig, sm.bgpServer) + + ports := make([]v1.PortStatus, 0, len(i.serviceSnapshot.Spec.Ports)) + for _, port := range i.serviceSnapshot.Spec.Ports { + ports = append(ports, v1.PortStatus{ + Port: port.Port, + Protocol: port.Protocol, + }) + } + updatedService.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{ + IP: i.vipConfig.VIP, + Ports: ports, + }} + _, err = sm.clientSet.CoreV1().Services(updatedService.Namespace).UpdateStatus(context.TODO(), updatedService, metav1.UpdateOptions{}) if err != nil { - log.Errorf("Failed to add Service [%s] / [%s]", newService.ServiceName, newService.UID) + log.Errorf("Error updating Service %s/%s Status: %v", i.serviceSnapshot.Namespace, i.serviceSnapshot.Name, err) return err } + return nil + }) - sm.upnpMap(newService) - - newService.cluster = *c - - // Begin watching this service - // TODO - we may need this - // go sm.serviceWatcher(&newService, sm.config.Namespace) - - // Update the "Status" of the LoadBalancer (one or many may do this), as long as one does it - service.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{IP: newVip.VIP}} - _, err = sm.clientSet.CoreV1().Services(service.Namespace).UpdateStatus(context.TODO(), service, metav1.UpdateOptions{}) - if err != nil { - log.Errorf("Error updating Service [%s] Status: %v", newService.ServiceName, err) - } - sm.serviceInstances = append(sm.serviceInstances, newService) + if retryErr != nil { + log.Errorf("Failed to set Services: %v", retryErr) + return retryErr } - - log.Debugf("[COMPLETE] Service Sync") - return nil } -func (sm *Manager) upnpMap(s Instance) { - // If upnp is enabled then update the gateway/router with the address - // TODO - work out if we need to mapping.Reclaim() - if sm.upnp != nil { - - log.Infof("[UPNP] Adding map to [%s:%d - %s]", s.Vip, s.Port, s.ServiceName) - if err := sm.upnp.AddPortMapping(int(s.Port), int(s.Port), 0, s.Vip, strings.ToUpper(s.Type), s.ServiceName); err == nil { - log.Infof("Service should be accessible externally on port [%d]", s.Port) - } else { - sm.upnp.Reclaim() - log.Errorf("Unable to map port to gateway [%s]", err.Error()) +// fetchServiceAddress tries to get the address from annotations +// kube-vip.io/loadbalancerIPs, then from spec.loadbalancerIP +func fetchServiceAddress(s *v1.Service) string { + if s.Annotations != nil { + if v, ok := s.Annotations[loadbalancerIPAnnotation]; ok { + return v } } + return s.Spec.LoadBalancerIP } diff --git a/pkg/manager/servicesLeader.go b/pkg/manager/servicesLeader.go new file mode 100644 index 00000000..1328439a --- /dev/null +++ b/pkg/manager/servicesLeader.go @@ -0,0 +1,106 @@ +package manager + +import ( + "context" + "fmt" + "os" + "sync" + "time" + + log "github.com/sirupsen/logrus" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/leaderelection" + "k8s.io/client-go/tools/leaderelection/resourcelock" +) + +// The startServicesWatchForLeaderElection function will start a services watcher, the +func (sm *Manager) startServicesWatchForLeaderElection(ctx context.Context) error { + + err := sm.servicesWatcher(ctx, sm.StartServicesLeaderElection) + if err != nil { + return err + } + + for x := range sm.serviceInstances { + sm.serviceInstances[x].cluster.Stop() + } + + log.Infof("Shutting down kube-Vip") + + return nil +} + +// The startServicesWatchForLeaderElection function will start a services watcher, the +func (sm *Manager) StartServicesLeaderElection(ctx context.Context, service *v1.Service, wg *sync.WaitGroup) error { + + id, err := os.Hostname() + if err != nil { + return err + } + + serviceLease := fmt.Sprintf("kubevip-%s", service.Name) + log.Infof("(svc election) service [%s], namespace [%s], lock name [%s], host id [%s]", service.Name, service.Namespace, serviceLease, id) + // we use the Lease lock type since edits to Leases are less common + // and fewer objects in the cluster watch "all Leases". + lock := &resourcelock.LeaseLock{ + LeaseMeta: metav1.ObjectMeta{ + Name: serviceLease, + Namespace: service.Namespace, + }, + Client: sm.clientSet.CoordinationV1(), + LockConfig: resourcelock.ResourceLockConfig{ + Identity: id, + }, + } + + activeService[string(service.UID)] = true + // start the leader election code loop + leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{ + Lock: lock, + // IMPORTANT: you MUST ensure that any code you have that + // is protected by the lease must terminate **before** + // you call cancel. Otherwise, you could have a background + // loop still running and another process could + // get elected before your background loop finished, violating + // the stated goal of the lease. + ReleaseOnCancel: true, + LeaseDuration: time.Duration(sm.config.LeaseDuration) * time.Second, + RenewDeadline: time.Duration(sm.config.RenewDeadline) * time.Second, + RetryPeriod: time.Duration(sm.config.RetryPeriod) * time.Second, + Callbacks: leaderelection.LeaderCallbacks{ + OnStartedLeading: func(ctx context.Context) { + // Mark this service as active (as we've started leading) + // we run this in background as it's blocking + wg.Add(1) + go func() { + if err := sm.syncServices(ctx, service, wg); err != nil { + log.Errorln(err) + } + }() + + }, + OnStoppedLeading: func() { + // we can do cleanup here + log.Infof("(svc election) service [%s] leader lost: [%s]", service.Name, id) + if activeService[string(service.UID)] { + if err := sm.deleteService(string(service.UID)); err != nil { + log.Errorln(err) + } + } + // Mark this service is inactive + activeService[string(service.UID)] = false + }, + OnNewLeader: func(identity string) { + // we're notified when new leader elected + if identity == id { + // I just got the lock + return + } + log.Infof("(svc election) new leader elected: %s", identity) + }, + }, + }) + log.Infof("(svc election) for service [%s] stopping", service.Name) + return nil +} diff --git a/pkg/manager/services_dhcp.go b/pkg/manager/services_dhcp.go deleted file mode 100644 index 87ef6430..00000000 --- a/pkg/manager/services_dhcp.go +++ /dev/null @@ -1,145 +0,0 @@ -package manager - -import ( - "context" - "fmt" - "net" - - "github.com/insomniacslk/dhcp/dhcpv4/nclient4" - log "github.com/sirupsen/logrus" - "github.com/vishvananda/netlink" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/util/retry" - - "github.com/kube-vip/kube-vip/pkg/cluster" - "github.com/kube-vip/kube-vip/pkg/kubevip" - "github.com/kube-vip/kube-vip/pkg/vip" -) - -func (sm *Manager) createDHCPService(newServiceUID string, newVip *kubevip.Config, newService *Instance, service *v1.Service) error { - parent, err := netlink.LinkByName(sm.config.Interface) - if err != nil { - return fmt.Errorf("Error finding VIP Interface, for building DHCP Link : %v", err) - } - - // Generate name from UID - interfaceName := fmt.Sprintf("vip-%s", newServiceUID[0:8]) - - // Check if the interface doesn't exist first - iface, err := net.InterfaceByName(interfaceName) - if err != nil { - log.Infof("Creating new macvlan interface for DHCP [%s]", interfaceName) - - hwaddr, err := net.ParseMAC(newService.dhcpInterfaceHwaddr) - if newService.dhcpInterfaceHwaddr != "" && err != nil { - return err - } - - mac := &netlink.Macvlan{ - LinkAttrs: netlink.LinkAttrs{ - Name: interfaceName, - ParentIndex: parent.Attrs().Index, - HardwareAddr: hwaddr, - }, - Mode: netlink.MACVLAN_MODE_DEFAULT, - } - - err = netlink.LinkAdd(mac) - if err != nil { - return fmt.Errorf("Could not add %s: %v", interfaceName, err) - } - - err = netlink.LinkSetUp(mac) - if err != nil { - return fmt.Errorf("Could not bring up interface [%s] : %v", interfaceName, err) - } - iface, err = net.InterfaceByName(interfaceName) - if err != nil { - return fmt.Errorf("Error finding new DHCP interface by name [%v]", err) - } - } else { - log.Infof("Using existing macvlan interface for DHCP [%s]", interfaceName) - } - - var initRebootFlag bool - if newService.dhcpInterfaceHwaddr != "" { - initRebootFlag = true - } - - client := vip.NewDHCPClient(iface, initRebootFlag, newService.dhcpInterfaceIP, func(lease *nclient4.Lease) { - newVip.VIP = lease.ACK.YourIPAddr.String() - - log.Infof("DHCP VIP [%s] for [%s/%s] ", newVip.VIP, newService.ServiceName, newServiceUID) - - // Create Add configuration to the new service - newService.vipConfig = *newVip - - // TODO - start VIP - c, err := cluster.InitCluster(&newService.vipConfig, false) - if err != nil { - log.Errorf("Failed to add Service [%s] / [%s]: %v", newService.ServiceName, newService.UID, err) - return - } - err = c.StartLoadBalancerService(&newService.vipConfig, sm.bgpServer) - if err != nil { - log.Errorf("Failed to add Load Balancer service Service [%s] / [%s]: %v", newService.ServiceName, newService.UID, err) - return - } - newService.cluster = *c - - retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { - // Retrieve the latest version of Deployment before attempting update - // RetryOnConflict uses exponential backoff to avoid exhausting the apiserver - currentService, err := sm.clientSet.CoreV1().Services(service.Namespace).Get(context.TODO(), service.Name, metav1.GetOptions{}) - if err != nil { - return err - } - - currentServiceCopy := currentService.DeepCopy() - if currentServiceCopy.Annotations == nil { - currentServiceCopy.Annotations = make(map[string]string) - } - currentServiceCopy.Annotations[hwAddrKey] = iface.HardwareAddr.String() - currentServiceCopy.Annotations[requestedIP] = newVip.VIP - updatedService, err := sm.clientSet.CoreV1().Services(currentService.Namespace).Update(context.TODO(), currentServiceCopy, metav1.UpdateOptions{}) - if err != nil { - log.Errorf("Error updating Service Spec [%s] : %v", newService.ServiceName, err) - return err - } - - updatedService.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{IP: newVip.VIP}} - _, err = sm.clientSet.CoreV1().Services(updatedService.Namespace).UpdateStatus(context.TODO(), updatedService, metav1.UpdateOptions{}) - if err != nil { - log.Errorf("Error updating Service [%s] Status: %v", newService.ServiceName, err) - return err - } - return nil - }) - - if retryErr != nil { - log.Errorf("Failed to set Services: %v", retryErr) - } - // Find an update our array - - for x := range sm.serviceInstances { - if sm.serviceInstances[x].UID == newServiceUID { - sm.serviceInstances[x] = *newService - } - } - sm.upnpMap(*newService) - }) - - // Set that DHCP is enabled - newService.isDHCP = true - // Set the name of the interface so that it can be removed on Service deletion - newService.dhcpInterface = interfaceName - // Add the client so that we can call it's stop function - newService.dhcpClient = client - - sm.serviceInstances = append(sm.serviceInstances, *newService) - - go client.Start() - - return nil -} diff --git a/pkg/manager/watcher.go b/pkg/manager/watch_annotations.go similarity index 57% rename from pkg/manager/watcher.go rename to pkg/manager/watch_annotations.go index 665f979d..93bc7f35 100644 --- a/pkg/manager/watcher.go +++ b/pkg/manager/watch_annotations.go @@ -5,12 +5,11 @@ import ( "encoding/base64" "fmt" "os" + "regexp" "strconv" "strings" - "sync" "github.com/kube-vip/kube-vip/pkg/bgp" - "github.com/prometheus/client_golang/prometheus" log "github.com/sirupsen/logrus" "github.com/davecgh/go-spew/spew" @@ -24,97 +23,6 @@ import ( "k8s.io/client-go/tools/cache" ) -// This file handles the watching of a services endpoints and updates a load balancers endpoint configurations accordingly -func (sm *Manager) servicesWatcher(ctx context.Context) error { - // Watch function - var wg sync.WaitGroup - - // Use a restartable watcher, as this should help in the event of etcd or timeout issues - rw, err := watchtools.NewRetryWatcher("1", &cache.ListWatch{ - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - return sm.clientSet.CoreV1().Services(v1.NamespaceAll).Watch(ctx, metav1.ListOptions{}) - }, - }) - if err != nil { - return fmt.Errorf("error creating services watcher: %s", err.Error()) - } - go func() { - <-sm.signalChan - // Cancel the context - rw.Stop() - }() - ch := rw.ResultChan() - //defer rw.Stop() - log.Infoln("Beginning watching services for type: LoadBalancer in all namespaces") - - for event := range ch { - sm.countServiceWatchEvent.With(prometheus.Labels{"type": string(event.Type)}).Add(1) - - // We need to inspect the event and get ResourceVersion out of it - switch event.Type { - case watch.Added, watch.Modified: - // log.Debugf("Endpoints for service [%s] have been Created or modified", s.service.ServiceName) - svc, ok := event.Object.(*v1.Service) - if !ok { - return fmt.Errorf("Unable to parse Kubernetes services from API watcher") - } - if svc.Annotations["kube-vip.io/ignore"] == "true" { - log.Infof("Service [%s] has an ignore annotation for kube-vip", svc.Name) - break - } - - if svc.Spec.LoadBalancerIP == "" { - log.Infof("Service [%s] has been addded/modified, it has no assigned external addresses", svc.Name) - } else { - log.Infof("Service [%s] has been addded/modified, it has an assigned external addresses [%s]", svc.Name, svc.Spec.LoadBalancerIP) - wg.Add(1) - err = sm.syncServices(svc, &wg) - if err != nil { - log.Error(err) - } - wg.Wait() - } - case watch.Deleted: - svc, ok := event.Object.(*v1.Service) - if !ok { - return fmt.Errorf("Unable to parse Kubernetes services from API watcher") - } - if svc.Annotations["kube-vip.io/ignore"] == "true" { - log.Infof("Service [%s] has an ignore annotation for kube-vip", svc.Name) - break - } - err = sm.stopService(string(svc.UID)) - if err != nil { - log.Error(err) - } - err = sm.deleteService(string(svc.UID)) - if err != nil { - log.Error(err) - } - log.Infof("Service [%s] has been deleted", svc.Name) - - case watch.Bookmark: - // Un-used - case watch.Error: - log.Error("Error attempting to watch Kubernetes services") - - // This round trip allows us to handle unstructured status - errObject := apierrors.FromObject(event.Object) - statusErr, ok := errObject.(*apierrors.StatusError) - if !ok { - log.Errorf(spew.Sprintf("Received an error which is not *metav1.Status but %#+v", event.Object)) - - } - - status := statusErr.ErrStatus - log.Errorf("%v", status) - default: - } - } - log.Warnln("Stopping watching services for type: LoadBalancer in all namespaces") - return nil -} - // This file handles the watching of node annotations for configuration, it will exit once the annotations are // present func (sm *Manager) annotationsWatcher() error { @@ -162,15 +70,23 @@ func (sm *Manager) annotationsWatcher() error { return fmt.Errorf("error creating annotations watcher: %s", err.Error()) } + exitFunction := make(chan struct{}) go func() { - <-sm.signalChan - log.Info("Received termination, signaling shutdown") - // Cancel the context - rw.Stop() + select { + case <-sm.shutdownChan: + log.Debug("[annotations] shutdown called") + // Stop the retry watcher + rw.Stop() + return + case <-exitFunction: + log.Debug("[annotations] function ending") + // Stop the retry watcher + rw.Stop() + return + } }() ch := rw.ResultChan() - //defer rw.Stop() for event := range ch { // We need to inspect the event and get ResourceVersion out of it @@ -178,7 +94,7 @@ func (sm *Manager) annotationsWatcher() error { case watch.Added, watch.Modified: node, ok := event.Object.(*v1.Node) if !ok { - return fmt.Errorf("Unable to parse Kubernetes Node from Annotation watcher") + return fmt.Errorf("unable to parse Kubernetes Node from Annotation watcher") } bgpConfig, bgpPeer, err := parseBgpAnnotations(node, sm.config.Annotations) @@ -194,7 +110,7 @@ func (sm *Manager) annotationsWatcher() error { case watch.Deleted: node, ok := event.Object.(*v1.Node) if !ok { - return fmt.Errorf("Unable to parse Kubernetes Node from Kubernetes watcher") + return fmt.Errorf("unable to parse Kubernetes Node from Kubernetes watcher") } log.Infof("Node [%s] has been deleted", node.Name) @@ -217,7 +133,7 @@ func (sm *Manager) annotationsWatcher() error { default: } } - + close(exitFunction) log.Infoln("Exiting Annotations watcher") return nil @@ -226,11 +142,26 @@ func (sm *Manager) annotationsWatcher() error { // parseNodeAnnotations parses the annotations on the node and updates the configuration // returning an error if the annotations are not valid or missing; and nil if everything is OK // to continue +// +// The regex expression for each annotation ensures (at least in terms of annotations) backwards +// compatibility with the Equinix Metal annotation format changed in +// https://github.com/equinix/cloud-provider-equinix-metal/releases/tag/v3.3.0 +// +// "metal.equinix.com/`" --> "metal.equinix.com/bgp-peers-{{n}}-`" +// * `` is the relevant information, such as `node-asn` or `peer-ip` +// * `{{n}}` is the number of the peer, always starting with `0` +// * kube-vip is only designed to manage one peer, just look for {{n}} == 0 func parseBgpAnnotations(node *v1.Node, prefix string) (bgp.Config, bgp.Peer, error) { bgpConfig := bgp.Config{} bgpPeer := bgp.Peer{} - nodeASN := node.Annotations[fmt.Sprintf("%s/node-asn", prefix)] + nodeASN := "" + for k, v := range node.Annotations { + regex := regexp.MustCompile(fmt.Sprintf("^%s/(bgp-peers-0-)?node-asn", prefix)) + if regex.Match([]byte(k)) { + nodeASN = v + } + } if nodeASN == "" { return bgpConfig, bgpPeer, fmt.Errorf("node-asn value missing or empty") } @@ -242,7 +173,13 @@ func parseBgpAnnotations(node *v1.Node, prefix string) (bgp.Config, bgp.Peer, er bgpConfig.AS = uint32(u64) - srcIP := node.Annotations[fmt.Sprintf("%s/src-ip", prefix)] + srcIP := "" + for k, v := range node.Annotations { + regex := regexp.MustCompile(fmt.Sprintf("^%s/(bgp-peers-0-)?src-ip", prefix)) + if regex.Match([]byte(k)) { + srcIP = v + } + } if srcIP == "" { return bgpConfig, bgpPeer, fmt.Errorf("src-ip value missing or empty") } @@ -251,7 +188,13 @@ func parseBgpAnnotations(node *v1.Node, prefix string) (bgp.Config, bgp.Peer, er // Also set the BGP peering to the sourceIP bgpConfig.RouterID, bgpConfig.SourceIP = srcIP, srcIP - peerASN := node.Annotations[fmt.Sprintf("%s/peer-asn", prefix)] + peerASN := "" + for k, v := range node.Annotations { + regex := regexp.MustCompile(fmt.Sprintf("^%s/(bgp-peers-0-)?peer-asn", prefix)) + if regex.Match([]byte(k)) { + peerASN = v + } + } if peerASN == "" { return bgpConfig, bgpPeer, fmt.Errorf("peer-asn value missing or empty") } @@ -263,7 +206,14 @@ func parseBgpAnnotations(node *v1.Node, prefix string) (bgp.Config, bgp.Peer, er bgpPeer.AS = uint32(u64) - peerIPString := node.Annotations[fmt.Sprintf("%s/peer-ip", prefix)] + peerIPString := "" + for k, v := range node.Annotations { + regex := regexp.MustCompile(fmt.Sprintf("^%s/(bgp-peers-[0-9]+-)?peer-ip", prefix)) + if regex.Match([]byte(k)) { + peerIPString += v + "," + } + } + peerIPString = strings.TrimRight(peerIPString, ",") peerIPs := strings.Split(peerIPString, ",") @@ -273,7 +223,13 @@ func parseBgpAnnotations(node *v1.Node, prefix string) (bgp.Config, bgp.Peer, er if ipAddr != "" { bgpPeer.Address = ipAddr // Check if we're also expecting a password for this peer - base64BGPPassword := node.Annotations[fmt.Sprintf("%s/bgp-pass", prefix)] + base64BGPPassword := "" + for k, v := range node.Annotations { + regex := regexp.MustCompile(fmt.Sprintf("^%s/(bgp-peers-0-)?bgp-pass", prefix)) + if regex.Match([]byte(k)) { + base64BGPPassword = v + } + } if base64BGPPassword != "" { // Decode base64 encoded string decodedPassword, err := base64.StdEncoding.DecodeString(base64BGPPassword) @@ -287,8 +243,8 @@ func parseBgpAnnotations(node *v1.Node, prefix string) (bgp.Config, bgp.Peer, er } } - log.Debugf("BGPConfig: %v\n", bgpConfig) - log.Debugf("BGPPeerConfig: %v\n", bgpPeer) + //log.Debugf("BGPConfig: %v\n", bgpConfig) + //log.Debugf("BGPPeerConfig: %v\n", bgpPeer) return bgpConfig, bgpPeer, nil } diff --git a/pkg/manager/watch_endpoints.go b/pkg/manager/watch_endpoints.go new file mode 100644 index 00000000..fccb1411 --- /dev/null +++ b/pkg/manager/watch_endpoints.go @@ -0,0 +1,245 @@ +package manager + +import ( + "context" + "fmt" + "strings" + "sync" + + log "github.com/sirupsen/logrus" + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/tools/cache" + watchtools "k8s.io/client-go/tools/watch" + "k8s.io/client-go/util/retry" +) + +func (sm *Manager) watchEndpoint(ctx context.Context, id string, service *v1.Service, wg *sync.WaitGroup) error { + log.Infof("[endpoint] watching for service [%s] in namespace [%s]", service.Name, service.Namespace) + // Use a restartable watcher, as this should help in the event of etcd or timeout issues + leaderContext, cancel := context.WithCancel(context.Background()) + var leaderElectionActive bool + defer cancel() + + opts := metav1.ListOptions{ + FieldSelector: fields.OneTermEqualSelector("metadata.name", service.Name).String(), + } + rw, err := watchtools.NewRetryWatcher("1", &cache.ListWatch{ + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return sm.clientSet.CoreV1().Endpoints(service.Namespace).Watch(ctx, opts) + }, + }) + if err != nil { + cancel() + return fmt.Errorf("error creating endpoint watcher: %s", err.Error()) + } + + exitFunction := make(chan struct{}) + go func() { + select { + case <-ctx.Done(): + log.Debug("[endpoint] context cancelled") + // Stop the retry watcher + rw.Stop() + // Cancel the context, which will in turn cancel the leadership + cancel() + return + case <-sm.shutdownChan: + log.Debug("[endpoint] shutdown called") + // Stop the retry watcher + rw.Stop() + // Cancel the context, which will in turn cancel the leadership + cancel() + return + case <-exitFunction: + log.Debug("[endpoint] function ending") + // Stop the retry watcher + rw.Stop() + // Cancel the context, which will in turn cancel the leadership + cancel() + return + } + }() + + ch := rw.ResultChan() + + var lastKnownGoodEndpoint string + for event := range ch { + + // We need to inspect the event and get ResourceVersion out of it + switch event.Type { + case watch.Added, watch.Modified: + ep, ok := event.Object.(*v1.Endpoints) + if !ok { + cancel() + return fmt.Errorf("unable to parse Kubernetes services from API watcher") + } + + // Build endpoints + var localendpoints []string + for subset := range ep.Subsets { + for address := range ep.Subsets[subset].Addresses { + // 1. Compare the hostname on the endpoint to the hostname + // 2. Compare the nodename on the endpoint to the hostname + // 3. Drop the FQDN to a shortname and compare to the nodename on the endpoint + + // 1. Compare the Hostname first (should be FQDN) + if id == ep.Subsets[subset].Addresses[address].Hostname { + log.Debugf("[endpoint] address: %s, hostname: %s", ep.Subsets[subset].Addresses[address].IP, ep.Subsets[subset].Addresses[address].Hostname) + localendpoints = append(localendpoints, ep.Subsets[subset].Addresses[address].IP) + } else { + // 2. Compare the Nodename (from testing could be FQDN or short) + if ep.Subsets[subset].Addresses[address].NodeName != nil { + log.Debugf("[endpoint] address: %s, hostname: %s, node: %s", ep.Subsets[subset].Addresses[address].IP, ep.Subsets[subset].Addresses[address].Hostname, *ep.Subsets[subset].Addresses[address].NodeName) + if id == *ep.Subsets[subset].Addresses[address].NodeName { + localendpoints = append(localendpoints, ep.Subsets[subset].Addresses[address].IP) + } else { + // 3. Compare to shortname + shortname, err := getShortname(id) + if err != nil { + log.Errorf("[endpoint] %v", err) + } else { + log.Debugf("[endpoint] address: %s, shortname: %s, node: %s", ep.Subsets[subset].Addresses[address].IP, shortname, *ep.Subsets[subset].Addresses[address].NodeName) + + if shortname == *ep.Subsets[subset].Addresses[address].NodeName { + localendpoints = append(localendpoints, ep.Subsets[subset].Addresses[address].IP) + } + } + } + } + } + } + } + + // Find out if we have any local endpoints + // if out endpoint is empty then populate it + // if not, go through the endpoints and see if ours still exists + // If we have a local endpoint then begin the leader Election, unless it's already running + // + + // Check that we have local endpoints + if len(localendpoints) != 0 { + // if we haven't populated one, then do so + if lastKnownGoodEndpoint != "" { + + // check out previous endpoint exists + stillExists := false + + for x := range localendpoints { + if localendpoints[x] == lastKnownGoodEndpoint { + stillExists = true + } + } + // If the last endpoint no longer exists, we cancel our leader Election + if !stillExists && leaderElectionActive { + log.Warnf("[endpoint] existing [%s] has been removed, restarting leaderElection", lastKnownGoodEndpoint) + // Stop the existing leaderElection + cancel() + // Set our active endpoint to an existing one + lastKnownGoodEndpoint = localendpoints[0] + // disable last leaderElection flag + leaderElectionActive = false + } + + } else { + lastKnownGoodEndpoint = localendpoints[0] + } + + // Set the service accordingly + if service.Annotations["kube-vip.io/egress"] == "true" { + service.Annotations["kube-vip.io/active-endpoint"] = lastKnownGoodEndpoint + } + + if !leaderElectionActive { + go func() { + leaderContext, cancel = context.WithCancel(context.Background()) + + // This is a blocking function, that will restart (in the event of failure) + for { + // if the context isn't cancelled restart + if leaderContext.Err() != context.Canceled { + leaderElectionActive = true + err = sm.StartServicesLeaderElection(leaderContext, service, wg) + if err != nil { + log.Error(err) + } + leaderElectionActive = false + } else { + leaderElectionActive = false + break + } + } + }() + } + } else { + // If there are no local endpoints, and we had one then remove it and stop the leaderElection + if lastKnownGoodEndpoint != "" { + log.Warnf("[endpoint] existing [%s] has been removed, no remaining endpoints for leaderElection", lastKnownGoodEndpoint) + lastKnownGoodEndpoint = "" // reset endpoint + cancel() // stop services watcher + leaderElectionActive = false + } + } + log.Debugf("[endpoint watcher] local endpoint(s) [%d], known good [%s], active election [%t]", len(localendpoints), lastKnownGoodEndpoint, leaderElectionActive) + + case watch.Deleted: + // Close the goroutine that will end the retry watcher, then exit the endpoint watcher function + close(exitFunction) + log.Infof("[endpoints] deleted stopping watching for [%s] in namespace [%s]", service.Name, service.Namespace) + return nil + case watch.Error: + errObject := apierrors.FromObject(event.Object) + statusErr, _ := errObject.(*apierrors.StatusError) + log.Errorf("[endpoint] -> %v", statusErr) + } + } + close(exitFunction) + log.Infof("[endpoints] stopping watching for [%s] in namespace [%s]", service.Name, service.Namespace) + return nil //nolint:govet +} + +func (sm *Manager) updateServiceEndpointAnnotation(endpoint string, service *v1.Service) error { + retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { + // Retrieve the latest version of Deployment before attempting update + // RetryOnConflict uses exponential backoff to avoid exhausting the apiserver + currentService, err := sm.clientSet.CoreV1().Services(service.Namespace).Get(context.TODO(), service.Name, metav1.GetOptions{}) + if err != nil { + return err + } + + currentServiceCopy := currentService.DeepCopy() + if currentServiceCopy.Annotations == nil { + currentServiceCopy.Annotations = make(map[string]string) + } + + currentServiceCopy.Annotations["kube-vip.io/active-endpoint"] = endpoint + + _, err = sm.clientSet.CoreV1().Services(currentService.Namespace).Update(context.TODO(), currentServiceCopy, metav1.UpdateOptions{}) + if err != nil { + log.Errorf("Error updating Service Spec [%s] : %v", currentServiceCopy.Name, err) + return err + } + return nil + }) + + if retryErr != nil { + log.Errorf("Failed to set Services: %v", retryErr) + return retryErr + } + return nil +} + +// returns just the shortname (or first bit) of a FQDN +func getShortname(hostname string) (string, error) { + if len(hostname) == 0 { + return "", fmt.Errorf("unable to find shortname from %s", hostname) + } + hostParts := strings.Split(hostname, ".") + if len(hostParts) > 1 { + return hostParts[0], nil + } + return "", fmt.Errorf("unable to find shortname from %s", hostname) +} diff --git a/pkg/manager/watch_services.go b/pkg/manager/watch_services.go new file mode 100644 index 00000000..e086fb2e --- /dev/null +++ b/pkg/manager/watch_services.go @@ -0,0 +1,240 @@ +package manager + +import ( + "context" + "fmt" + "os" + "sync" + + "github.com/davecgh/go-spew/spew" + "github.com/kube-vip/kube-vip/pkg/vip" + "github.com/prometheus/client_golang/prometheus" + log "github.com/sirupsen/logrus" + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/tools/cache" + watchtools "k8s.io/client-go/tools/watch" +) + +// TODO: Fix the naming of these contexts + +// activeServiceLoadBalancer keeps track of services that already have a leaderElection in place +var activeServiceLoadBalancer map[string]context.Context + +// activeServiceLoadBalancer keeps track of services that already have a leaderElection in place +var activeServiceLoadBalancerCancel map[string]func() + +// activeService keeps track of services that already have a leaderElection in place +var activeService map[string]bool + +// watchedService keeps track of services that are already being watched +var watchedService map[string]bool + +func init() { + // Set up the caches for monitoring existing active or watched services + activeServiceLoadBalancerCancel = make(map[string]func()) + activeServiceLoadBalancer = make(map[string]context.Context) + activeService = make(map[string]bool) + watchedService = make(map[string]bool) +} + +// This function handles the watching of a services endpoints and updates a load balancers endpoint configurations accordingly +func (sm *Manager) servicesWatcher(ctx context.Context, serviceFunc func(context.Context, *v1.Service, *sync.WaitGroup) error) error { + // Watch function + var wg sync.WaitGroup + + id, err := os.Hostname() + if err != nil { + return err + } + if sm.config.ServiceNamespace == "" { + // v1.NamespaceAll is actually "", but we'll stay with the const in case things change upstream + sm.config.ServiceNamespace = v1.NamespaceAll + log.Infof("(svcs) starting services watcher for all namespaces") + } else { + log.Infof("(svcs) starting services watcher for services in namespace [%s]", sm.config.ServiceNamespace) + } + + // Use a restartable watcher, as this should help in the event of etcd or timeout issues + rw, err := watchtools.NewRetryWatcher("1", &cache.ListWatch{ + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return sm.clientSet.CoreV1().Services(sm.config.ServiceNamespace).Watch(ctx, metav1.ListOptions{}) + }, + }) + if err != nil { + return fmt.Errorf("error creating services watcher: %s", err.Error()) + } + exitFunction := make(chan struct{}) + go func() { + select { + case <-sm.shutdownChan: + log.Debug("(svcs) shutdown called") + // Stop the retry watcher + rw.Stop() + return + case <-exitFunction: + log.Debug("(svcs) function ending") + // Stop the retry watcher + rw.Stop() + return + } + }() + ch := rw.ResultChan() + + // Used for tracking an active endpoint / pod + for event := range ch { + sm.countServiceWatchEvent.With(prometheus.Labels{"type": string(event.Type)}).Add(1) + + // We need to inspect the event and get ResourceVersion out of it + switch event.Type { + case watch.Added, watch.Modified: + // log.Debugf("Endpoints for service [%s] have been Created or modified", s.service.ServiceName) + svc, ok := event.Object.(*v1.Service) + if !ok { + return fmt.Errorf("unable to parse Kubernetes services from API watcher") + } + + // We only care about LoadBalancer services + if svc.Spec.Type != v1.ServiceTypeLoadBalancer { + break + } + + // We only care about LoadBalancer services that have been allocated an address + if fetchServiceAddress(svc) == "" { + break + } + + // Check the loadBalancer class + if svc.Spec.LoadBalancerClass != nil { + // if this isn't nil then it has been configured, check if it the kube-vip loadBalancer class + if *svc.Spec.LoadBalancerClass != sm.config.LoadBalancerClassName { + log.Infof("(svcs) [%s] specified the loadBalancer class [%s], ignoring", svc.Name, *svc.Spec.LoadBalancerClass) + break + } + } else if sm.config.LoadBalancerClassOnly { + // if kube-vip is configured to only recognize services with kube-vip's lb class, then ignore the services without any lb class + log.Infof("(svcs) kube-vip configured to only recognize services with kube-vip's lb class but the service [%s] didn't specify any loadBalancer class, ignoring", svc.Name) + break + } + + // Check if we ignore this service + if svc.Annotations["kube-vip.io/ignore"] == "true" { + log.Infof("(svcs) [%s] has an ignore annotation for kube-vip", svc.Name) + break + } + + // The modified event should only be triggered if the service has been modified (i.e. moved somewhere else) + if event.Type == watch.Modified { + //log.Debugf("(svcs) Retreiving local addresses, to ensure that this modified address doesn't exist") + f, err := vip.GarbageCollect(sm.config.Interface, svc.Spec.LoadBalancerIP) + if err != nil { + log.Errorf("(svcs) cleaning existing address error: [%s]", err.Error()) + } + if f { + log.Warnf("(svcs) already found existing address [%s] on adapter [%s]", svc.Spec.LoadBalancerIP, sm.config.Interface) + } + } + // Scenarios: + // 1. + if !activeService[string(svc.UID)] { + log.Debugf("(svcs) [%s] has been added/modified with addresses [%s]", svc.Name, fetchServiceAddress(svc)) + + wg.Add(1) + activeServiceLoadBalancer[string(svc.UID)], activeServiceLoadBalancerCancel[string(svc.UID)] = context.WithCancel(context.TODO()) + // Background the services election + if sm.config.EnableServicesElection { + if svc.Spec.ExternalTrafficPolicy == v1.ServiceExternalTrafficPolicyTypeLocal { + // Start an endpoint watcher if we're not watching it already + if !watchedService[string(svc.UID)] { + // background the endpoint watcher + go func() { + if svc.Spec.ExternalTrafficPolicy == v1.ServiceExternalTrafficPolicyTypeLocal { + // Add Endpoint watcher + wg.Add(1) + err = sm.watchEndpoint(activeServiceLoadBalancer[string(svc.UID)], id, svc, &wg) + if err != nil { + log.Error(err) + } + wg.Done() + } + }() + // We're now watching this service + watchedService[string(svc.UID)] = true + } + } else { + // Increment the waitGroup before the service Func is called (Done is completed in there) + wg.Add(1) + go func() { + err = serviceFunc(activeServiceLoadBalancer[string(svc.UID)], svc, &wg) + if err != nil { + log.Error(err) + } + wg.Done() + }() + } + activeService[string(svc.UID)] = true + } else { + // Increment the waitGroup before the service Func is called (Done is completed in there) + wg.Add(1) + err = serviceFunc(activeServiceLoadBalancer[string(svc.UID)], svc, &wg) + if err != nil { + log.Error(err) + } + wg.Done() + } + } + case watch.Deleted: + svc, ok := event.Object.(*v1.Service) + if !ok { + return fmt.Errorf("unable to parse Kubernetes services from API watcher") + } + if activeService[string(svc.UID)] { + + // We only care about LoadBalancer services + if svc.Spec.Type != v1.ServiceTypeLoadBalancer { + break + } + + // We can ignore this service + if svc.Annotations["kube-vip.io/ignore"] == "true" { + log.Infof("(svcs) [%s] has an ignore annotation for kube-vip", svc.Name) + break + } + // If this is an active service then and additional leaderElection will handle stopping + err := sm.deleteService(string(svc.UID)) + if err != nil { + log.Error(err) + } + + // Calls the cancel function of the context + if activeServiceLoadBalancerCancel[string(svc.UID)] != nil { + + activeServiceLoadBalancerCancel[string(svc.UID)]() + } + activeService[string(svc.UID)] = false + watchedService[string(svc.UID)] = false + } + log.Infof("(svcs) [%s/%s] has been deleted", svc.Namespace, svc.Name) + case watch.Bookmark: + // Un-used + case watch.Error: + log.Error("Error attempting to watch Kubernetes services") + + // This round trip allows us to handle unstructured status + errObject := apierrors.FromObject(event.Object) + statusErr, ok := errObject.(*apierrors.StatusError) + if !ok { + log.Errorf(spew.Sprintf("Received an error which is not *metav1.Status but %#+v", event.Object)) + } + + status := statusErr.ErrStatus + log.Errorf("services -> %v", status) + default: + } + } + close(exitFunction) + log.Warnln("Stopping watching services for type: LoadBalancer in all namespaces") + return nil +} diff --git a/pkg/manager/watcher_test.go b/pkg/manager/watcher_test.go index 7b12383f..99149f2d 100644 --- a/pkg/manager/watcher_test.go +++ b/pkg/manager/watcher_test.go @@ -7,7 +7,6 @@ import ( "github.com/kube-vip/kube-vip/pkg/bgp" "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -59,9 +58,50 @@ func TestParseBgpAnnotations(t *testing.T) { assert.Equal(t, "password", bgpPeer.Password, "bgpPeer.Password parsed incorrectly") } +// Node, or local, ASN, default annotation metal.equinix.com/bgp-peers-{{n}}-node-asn +// Peer ASN, default annotation metal.equinix.com/bgp-peers-{{n}}-peer-asn +// Peer IP, default annotation metal.equinix.com/bgp-peers-{{n}}-peer-ip +// Source IP to use when communicating with peer, default annotation metal.equinix.com/bgp-peers-{{n}}-src-ip +// BGP password for peer, default annotation metal.equinix.com/bgp-peers-{{n}}-bgp-pass + +func TestParseNewBgpAnnotations(t *testing.T) { + node := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Annotations: map[string]string{}}, + } + + _, _, err := parseBgpAnnotations(node, "bgp") + if err == nil { + t.Fatal("Parsing BGP annotations should return an error when no annotations exist") + } + + node.Annotations = map[string]string{ + "bgp/bgp-peers-0-node-asn": "65000", + "bgp/bgp-peers-0-peer-asn": "64000", + "bgp/bgp-peers-0-peer-ip": "10.0.0.1,10.0.0.2,10.0.0.3", + "bgp/bgp-peers-0-src-ip": "10.0.0.254", + "bgp/bgp-peers-0-bgp-pass": "cGFzc3dvcmQ=", // password + } + + bgpConfig, bgpPeer, err := parseBgpAnnotations(node, "bgp") + if err != nil { + t.Fatalf("Parsing BGP annotations should return nil when minimum config is met [%v]", err) + } + + bgpPeers := []bgp.Peer{ + {Address: "10.0.0.1", AS: uint32(64000), Password: "password"}, + {Address: "10.0.0.2", AS: uint32(64000), Password: "password"}, + {Address: "10.0.0.3", AS: uint32(64000), Password: "password"}, + } + assert.Equal(t, bgpPeers, bgpConfig.Peers, "bgpConfig.Peers parsed incorrectly") + assert.Equal(t, "10.0.0.254", bgpConfig.SourceIP, "bgpConfig.SourceIP parsed incorrectly") + assert.Equal(t, "10.0.0.254", bgpConfig.RouterID, "bgpConfig.RouterID parsed incorrectly") + assert.Equal(t, "10.0.0.3", bgpPeer.Address, "bgpPeer.Address parsed incorrectly") + assert.Equal(t, "password", bgpPeer.Password, "bgpPeer.Password parsed incorrectly") +} + func Test_parseBgpAnnotations(t *testing.T) { type args struct { - node *v1.Node + node *corev1.Node prefix string } tests := []struct { diff --git a/pkg/service/manager.go b/pkg/service/manager.go deleted file mode 100644 index 69a448bd..00000000 --- a/pkg/service/manager.go +++ /dev/null @@ -1,106 +0,0 @@ -package service - -import ( - "fmt" - "io/ioutil" - "os" - "strings" - - "github.com/kamhlos/upnp" - "github.com/kube-vip/kube-vip/pkg/bgp" - "github.com/kube-vip/kube-vip/pkg/cluster" - "github.com/kube-vip/kube-vip/pkg/kubevip" - "github.com/kube-vip/kube-vip/pkg/vip" - "github.com/prometheus/client_golang/prometheus" - log "github.com/sirupsen/logrus" - "k8s.io/client-go/kubernetes" -) - -const plunderLock = "plunder-lock" - -var signalChan chan os.Signal - -// Manager defines the manager of the load-balancing services -type Manager struct { - clientSet *kubernetes.Clientset - configMap string - config *kubevip.Config - - // Keeps track of all running instances - serviceInstances []Instance - - // Additional functionality - upnp *upnp.Upnp - - //BGP Manager, this is a singleton that manages all BGP advertisements - bgpServer *bgp.Server - - // This channel is used to signal a shutdown - signalChan chan os.Signal - - // This is a prometheus counter used to count the number of events received - // from the service watcher - countServiceWatchEvent *prometheus.CounterVec -} - -// Instance defines an instance of everything needed to manage a vip -type Instance struct { - // Virtual IP / Load Balancer configuration - vipConfig kubevip.Config - - // cluster instance - cluster cluster.Cluster - - // Service uses DHCP - isDHCP bool - dhcpInterface string - dhcpInterfaceHwaddr string - dhcpInterfaceIP string - dhcpClient *vip.DHCPClient - - // Kubernetes service mapping - Vip string - Port int32 - UID string - Type string - - ServiceName string -} - -// NewManager will create a new managing object -func NewManager(configMap string, config *kubevip.Config, clientset *kubernetes.Clientset) (*Manager, error) { - return &Manager{ - clientSet: clientset, - configMap: configMap, - config: config, - }, nil -} - -// Start will begin the Manager, which will start services and watch the configmap -func (sm *Manager) Start() error { - - // If BGP is enabled then we start a server instance that will broadcast VIPs - if sm.config.EnableBGP { - log.Infoln("Starting loadBalancer Service with the BGP engine") - return sm.startBGP() - } - - // If ARP is enabled then we start a LeaderElection that will use ARP to advertise VIPs - if sm.config.EnableARP { - log.Infoln("Starting loadBalancer Service with the ARP engine") - return sm.startARP() - } - - log.Infoln("Prematurely exiting Load-balancer as neither Layer2 or Layer3 is enabled") - return nil -} - -func returnNameSpace() (string, error) { - if data, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace"); err == nil { - if ns := strings.TrimSpace(string(data)); len(ns) > 0 { - return ns, nil - } - return "", err - } - return "", fmt.Errorf("Unable to find Namespace") -} diff --git a/pkg/service/manager_arp.go b/pkg/service/manager_arp.go deleted file mode 100644 index 3705cc21..00000000 --- a/pkg/service/manager_arp.go +++ /dev/null @@ -1,142 +0,0 @@ -package service - -import ( - "context" - "os" - "os/signal" - "strconv" - "syscall" - "time" - - "github.com/kamhlos/upnp" - log "github.com/sirupsen/logrus" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/tools/leaderelection" - "k8s.io/client-go/tools/leaderelection/resourcelock" -) - -// Start will begin the Manager, which will start services and watch the configmap -func (sm *Manager) startARP() error { - ns, err := returnNameSpace() - if err != nil { - return err - } - - id, err := os.Hostname() - if err != nil { - return err - } - - // Before starting the leader Election enable any additional functionality - upnpEnabled, _ := strconv.ParseBool(os.Getenv("enableUPNP")) - - if upnpEnabled { - sm.upnp = new(upnp.Upnp) - err := sm.upnp.ExternalIPAddr() - if err != nil { - log.Errorf("Error Enabling UPNP %s", err.Error()) - // Set the struct to nil so nothing should use it in future - sm.upnp = nil - } else { - log.Infof("Successfully enabled UPNP, Gateway address [%s]", sm.upnp.GatewayOutsideIP) - } - } - - // // If BGP is enabled then we start the server that will broadcast VIPs - // if sm.config.EnableBGP { - // // Lets start BGP - // log.Info("Starting the BGP server to advertise VIP routes to VGP peers") - // sm.bgpServer, err = bgp.NewBGPServer(&sm.config.BGPConfig) - // if err != nil { - // log.Error(err) - // } - // } - - // // Defer a function to check if the bgpServer has been created and if so attempt to close it - // defer func() { - // if sm.bgpServer != nil { - // sm.bgpServer.Close() - // } - // }() - - log.Infof("Beginning cluster membership, namespace [%s], lock name [%s], id [%s]", ns, plunderLock, id) - // we use the Lease lock type since edits to Leases are less common - // and fewer objects in the cluster watch "all Leases". - lock := &resourcelock.LeaseLock{ - LeaseMeta: metav1.ObjectMeta{ - Name: plunderLock, - Namespace: ns, - }, - Client: sm.clientSet.CoordinationV1(), - LockConfig: resourcelock.ResourceLockConfig{ - Identity: id, - }, - } - - // use a Go context so we can tell the leaderelection code when we - // want to step down - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - // listen for interrupts or the Linux SIGTERM signal and cancel - // our context, which the leader election code will observe and - // step down - signalChan := make(chan os.Signal, 1) - // Add Notification for Userland interrupt - signal.Notify(signalChan, syscall.SIGINT) - - // Add Notification for SIGTERM (sent from Kubernetes) - signal.Notify(signalChan, syscall.SIGTERM) - - // Add Notification for SIGKILL (sent from Kubernetes) - //nolint - signal.Notify(signalChan, syscall.SIGKILL) - go func() { - <-signalChan - log.Info("Received termination, signaling shutdown") - // Cancel the context, which will in turn cancel the leadership - cancel() - }() - - // start the leader election code loop - leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{ - Lock: lock, - // IMPORTANT: you MUST ensure that any code you have that - // is protected by the lease must terminate **before** - // you call cancel. Otherwise, you could have a background - // loop still running and another process could - // get elected before your background loop finished, violating - // the stated goal of the lease. - ReleaseOnCancel: true, - LeaseDuration: 10 * time.Second, - RenewDeadline: 5 * time.Second, - RetryPeriod: 1 * time.Second, - Callbacks: leaderelection.LeaderCallbacks{ - OnStartedLeading: func(ctx context.Context) { - if err := sm.servicesWatcher(ctx); err != nil { - log.Fatalf("error starting services watcher: %v", err) - } - }, - OnStoppedLeading: func() { - // we can do cleanup here - log.Infof("leader lost: %s", id) - for x := range sm.serviceInstances { - sm.serviceInstances[x].cluster.Stop() - } - }, - OnNewLeader: func(identity string) { - // we're notified when new leader elected - if identity == id { - // I just got the lock - return - } - log.Infof("new leader elected: %s", identity) - }, - }, - }) - - //<-signalChan - log.Infof("Shutting down Kube-Vip") - - return nil -} diff --git a/pkg/service/manager_bgp.go b/pkg/service/manager_bgp.go deleted file mode 100644 index 11d017e1..00000000 --- a/pkg/service/manager_bgp.go +++ /dev/null @@ -1,82 +0,0 @@ -package service - -import ( - "context" - "os" - "os/signal" - "syscall" - - "github.com/kube-vip/kube-vip/pkg/bgp" - "github.com/kube-vip/kube-vip/pkg/packet" - "github.com/packethost/packngo" - log "github.com/sirupsen/logrus" -) - -// Start will begin the Manager, which will start services and watch the configmap -func (sm *Manager) startBGP() error { - - // If Packet is enabled then we can begin our preparation work - var packetClient *packngo.Client - var err error - if sm.config.EnableMetal { - packetClient, err = packngo.NewClient() - if err != nil { - log.Error(err) - } - - // We're using Packet with BGP, popuplate the Peer information from the API - if sm.config.EnableBGP { - log.Infoln("Looking up the BGP configuration from packet") - err = packet.BGPLookup(packetClient, sm.config) - if err != nil { - log.Error(err) - } - } - } - - log.Info("Starting the BGP server to advertise VIP routes to VGP peers") - sm.bgpServer, err = bgp.NewBGPServer(&sm.config.BGPConfig) - if err != nil { - return err - } - - // Defer a function to check if the bgpServer has been created and if so attempt to close it - defer func() { - if sm.bgpServer != nil { - sm.bgpServer.Close() - } - }() - - // use a Go context so we can tell the leaderelection code when we - // want to step down - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - // listen for interrupts or the Linux SIGTERM signal and cancel - // our context, which the leader election code will observe and - // step down - signalChan = make(chan os.Signal, 1) - // Add Notification for Userland interrupt - signal.Notify(signalChan, syscall.SIGINT) - - // Add Notification for SIGTERM (sent from Kubernetes) - signal.Notify(signalChan, syscall.SIGTERM) - - // Add Notification for SIGKILL (sent from Kubernetes) - //nolint - signal.Notify(signalChan, syscall.SIGKILL) - go func() { - <-signalChan - log.Info("Received termination, signaling shutdown") - // Cancel the context, which will in turn cancel the leadership - cancel() - }() - - if err := sm.servicesWatcher(ctx); err != nil { - log.Fatalf("error starting services watcher: %v", err) - } - - log.Infof("Shutting down Kube-Vip") - - return nil -} diff --git a/pkg/service/prom.go b/pkg/service/prom.go deleted file mode 100644 index 2b316691..00000000 --- a/pkg/service/prom.go +++ /dev/null @@ -1,8 +0,0 @@ -package service - -import "github.com/prometheus/client_golang/prometheus" - -//PrometheusCollector - required for statistics // TODO - improve monitoring -func (sm *Manager) PrometheusCollector() []prometheus.Collector { - return []prometheus.Collector{sm.countServiceWatchEvent} -} diff --git a/pkg/service/services.go b/pkg/service/services.go deleted file mode 100644 index 499d6df6..00000000 --- a/pkg/service/services.go +++ /dev/null @@ -1,183 +0,0 @@ -package service - -import ( - "context" - "fmt" - "strings" - - "github.com/kube-vip/kube-vip/pkg/cluster" - "github.com/kube-vip/kube-vip/pkg/kubevip" - log "github.com/sirupsen/logrus" - "github.com/vishvananda/netlink" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - hwAddrKey = "kube-vip.io/hwaddr" - requestedIP = "kube-vip.io/requestedIP" -) - -func (sm *Manager) stopService(uid string) error { - found := false - for x := range sm.serviceInstances { - if sm.serviceInstances[x].UID == uid { - found = true - sm.serviceInstances[x].cluster.Stop() - } - } - if !found { - return fmt.Errorf("unable to find/stop service [%s]", uid) - } - return nil -} - -func (sm *Manager) deleteService(uid string) error { - var updatedInstances []Instance - found := false - for x := range sm.serviceInstances { - // Add the running services to the new array - if sm.serviceInstances[x].UID != uid { - updatedInstances = append(updatedInstances, sm.serviceInstances[x]) - } else { - // Flip the found when we match - found = true - if sm.serviceInstances[x].isDHCP { - macvlan, err := netlink.LinkByName(sm.serviceInstances[x].dhcpInterface) - if err != nil { - return fmt.Errorf("error finding VIP Interface, for deleting DHCP Link : %v", err) - } - if err := netlink.LinkDel(macvlan); err != nil { - return fmt.Errorf("error deleing link: %v", err) - } - } - if sm.serviceInstances[x].vipConfig.EnableBGP { - cidrVip := fmt.Sprintf("%s/%s", sm.serviceInstances[x].vipConfig.VIP, sm.serviceInstances[x].vipConfig.VIPCIDR) - err := sm.bgpServer.DelHost(cidrVip) - return err - } - } - } - // If we've been through all services and not found the correct one then error - if !found { - return fmt.Errorf("unable to find/stop service [%s]", uid) - } - - // Update the service array - sm.serviceInstances = updatedInstances - - log.Infof("Removed [%s] from manager, [%d] advertised services remain", uid, len(sm.serviceInstances)) - - return nil -} - -func (sm *Manager) syncServices(service *v1.Service) error { - log.Debugf("[STARTING] Service Sync") - // Iterate through the synchronising services - foundInstance := false - newServiceAddress := service.Spec.LoadBalancerIP - newServiceUID := string(service.UID) - - for x := range sm.serviceInstances { - if sm.serviceInstances[x].UID == newServiceUID { - // We have found this instance in the manager, we can determine if it needs updating - foundInstance = true - } - - } - - // Generate new Virtual IP configuration - newVip := kubevip.Config{ - VIP: newServiceAddress, //TODO support more than one vip? - Interface: sm.config.Interface, - SingleNode: true, - EnableARP: sm.config.EnableARP, - EnableBGP: sm.config.EnableBGP, - VIPCIDR: sm.config.VIPCIDR, - } - - // This instance wasn't found, we need to add it to the manager - if !foundInstance { - // Create new service - var newService Instance - newService.UID = newServiceUID - newService.Vip = newServiceAddress - newService.Type = string(service.Spec.Ports[0].Protocol) //TODO - support multiple port types - newService.Port = service.Spec.Ports[0].Port - newService.ServiceName = service.Name - newService.dhcpInterfaceHwaddr = service.Annotations[hwAddrKey] - newService.dhcpInterfaceIP = service.Annotations[requestedIP] - - // If this was purposely created with the address 0.0.0.0 then we will create a macvlan on the main interface and try DHCP - if newServiceAddress == "0.0.0.0" { - err := sm.createDHCPService(newServiceUID, &newVip, &newService, service) - if err != nil { - return err - } - return nil - } - - log.Infof("New VIP [%s] for [%s/%s] ", newService.Vip, newService.ServiceName, newService.UID) - - // Generate Load Balancer config - newLB := kubevip.LoadBalancer{ - Name: fmt.Sprintf("%s-load-balancer", newService.ServiceName), - Port: int(newService.Port), - Type: newService.Type, - BindToVip: true, - } - - // Add Load Balancer Configuration - newVip.LoadBalancers = append(newVip.LoadBalancers, newLB) - - // Create Add configuration to the new service - newService.vipConfig = newVip - - // TODO - start VIP - c, err := cluster.InitCluster(&newService.vipConfig, false) - if err != nil { - log.Errorf("Failed to add Service [%s] / [%s]", newService.ServiceName, newService.UID) - return err - } - err = c.StartLoadBalancerService(&newService.vipConfig, sm.bgpServer) - if err != nil { - log.Errorf("Failed to add Service [%s] / [%s]", newService.ServiceName, newService.UID) - return err - } - - sm.upnpMap(newService) - - newService.cluster = *c - - // Begin watching this service - // TODO - we may need this - // go sm.serviceWatcher(&newService, sm.config.Namespace) - - // Update the "Status" of the LoadBalancer (one or many may do this), as long as one does it - service.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{IP: newVip.VIP}} - _, err = sm.clientSet.CoreV1().Services(service.Namespace).UpdateStatus(context.TODO(), service, metav1.UpdateOptions{}) - if err != nil { - log.Errorf("Error updating Service [%s] Status: %v", newService.ServiceName, err) - } - sm.serviceInstances = append(sm.serviceInstances, newService) - } - - log.Debugf("[COMPLETE] Service Sync") - - return nil -} - -func (sm *Manager) upnpMap(s Instance) { - // If upnp is enabled then update the gateway/router with the address - // TODO - work out if we need to mapping.Reclaim() - if sm.upnp != nil { - - log.Infof("[UPNP] Adding map to [%s:%d - %s]", s.Vip, s.Port, s.ServiceName) - if err := sm.upnp.AddPortMapping(int(s.Port), int(s.Port), 0, s.Vip, strings.ToUpper(s.Type), s.ServiceName); err == nil { - log.Infof("Service should be accessible externally on port [%d]", s.Port) - } else { - sm.upnp.Reclaim() - log.Errorf("Unable to map port to gateway [%s]", err.Error()) - } - } -} diff --git a/pkg/service/services_dhcp.go b/pkg/service/services_dhcp.go deleted file mode 100644 index dc8a6ed1..00000000 --- a/pkg/service/services_dhcp.go +++ /dev/null @@ -1,146 +0,0 @@ -package service - -import ( - "context" - "fmt" - "net" - - "github.com/insomniacslk/dhcp/dhcpv4/nclient4" - log "github.com/sirupsen/logrus" - "github.com/vishvananda/netlink" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/util/retry" - - "github.com/kube-vip/kube-vip/pkg/cluster" - "github.com/kube-vip/kube-vip/pkg/kubevip" - "github.com/kube-vip/kube-vip/pkg/vip" -) - -func (sm *Manager) createDHCPService(newServiceUID string, newVip *kubevip.Config, newService *Instance, service *v1.Service) error { - parent, err := netlink.LinkByName(sm.config.Interface) - if err != nil { - return fmt.Errorf("Error finding VIP Interface, for building DHCP Link : %v", err) - } - - // Create macvlan - - // Generate name from UID - interfaceName := fmt.Sprintf("vip-%s", newServiceUID[0:8]) - - // Check if the interface doesn't exist first - iface, err := net.InterfaceByName(interfaceName) - if err != nil { - log.Infof("Creating new macvlan interface for DHCP [%s]", interfaceName) - - hwaddr, err := net.ParseMAC(newService.dhcpInterfaceHwaddr) - if newService.dhcpInterfaceHwaddr != "" && err != nil { - return err - } - - mac := &netlink.Macvlan{ - LinkAttrs: netlink.LinkAttrs{ - Name: interfaceName, - ParentIndex: parent.Attrs().Index, - HardwareAddr: hwaddr, - }, - Mode: netlink.MACVLAN_MODE_DEFAULT, - } - - err = netlink.LinkAdd(mac) - if err != nil { - return fmt.Errorf("Could not add %s: %v", interfaceName, err) - } - - err = netlink.LinkSetUp(mac) - if err != nil { - return fmt.Errorf("Could not bring up interface [%s] : %v", interfaceName, err) - } - iface, err = net.InterfaceByName(interfaceName) - if err != nil { - return fmt.Errorf("Error finding new DHCP interface by name [%v]", err) - } - } else { - log.Infof("Using existing macvlan interface for DHCP [%s]", interfaceName) - } - - var initRebootFlag bool - if newService.dhcpInterfaceHwaddr != "" { - initRebootFlag = true - } - - client := vip.NewDHCPClient(iface, initRebootFlag, newService.dhcpInterfaceIP, func(lease *nclient4.Lease) { - newVip.VIP = lease.ACK.YourIPAddr.String() - - log.Infof("DHCP VIP [%s] for [%s/%s] ", newVip.VIP, newService.ServiceName, newServiceUID) - - // Create Add configuration to the new service - newService.vipConfig = *newVip - - // TODO - start VIP - c, err := cluster.InitCluster(&newService.vipConfig, false) - if err != nil { - log.Errorf("Failed to add Service [%s] / [%s]: %v", newService.ServiceName, newService.UID, err) - return - } - err = c.StartLoadBalancerService(&newService.vipConfig, sm.bgpServer) - if err != nil { - log.Errorf("Failed to add Load Balancer service Service [%s] / [%s]: %v", newService.ServiceName, newService.UID, err) - return - } - newService.cluster = *c - - retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { - // Retrieve the latest version of Deployment before attempting update - // RetryOnConflict uses exponential backoff to avoid exhausting the apiserver - currentService, err := sm.clientSet.CoreV1().Services(service.Namespace).Get(context.TODO(), service.Name, metav1.GetOptions{}) - if err != nil { - return err - } - - currentServiceCopy := currentService.DeepCopy() - if currentServiceCopy.Annotations == nil { - currentServiceCopy.Annotations = make(map[string]string) - } - currentServiceCopy.Annotations[hwAddrKey] = iface.HardwareAddr.String() - currentServiceCopy.Annotations[requestedIP] = newVip.VIP - updatedService, err := sm.clientSet.CoreV1().Services(currentService.Namespace).Update(context.TODO(), currentServiceCopy, metav1.UpdateOptions{}) - if err != nil { - log.Errorf("Error updating Service Spec [%s] : %v", newService.ServiceName, err) - return err - } - - updatedService.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{IP: newVip.VIP}} - _, err = sm.clientSet.CoreV1().Services(updatedService.Namespace).UpdateStatus(context.TODO(), updatedService, metav1.UpdateOptions{}) - if err != nil { - log.Errorf("Error updating Service [%s] Status: %v", newService.ServiceName, err) - return err - } - return nil - }) - - if retryErr != nil { - log.Errorf("Failed to set Services: %v", retryErr) - } - // Find an update our array - - for x := range sm.serviceInstances { - if sm.serviceInstances[x].UID == newServiceUID { - sm.serviceInstances[x] = *newService - } - } - sm.upnpMap(*newService) - }) - // Set that DHCP is enabled - newService.isDHCP = true - // Set the name of the interface so that it can be removed on Service deletion - newService.dhcpInterface = interfaceName - // Add the client so that we can call it's stop function - newService.dhcpClient = client - - sm.serviceInstances = append(sm.serviceInstances, *newService) - - go client.Start() - - return nil -} diff --git a/pkg/service/watcher.go b/pkg/service/watcher.go deleted file mode 100644 index bbea8af4..00000000 --- a/pkg/service/watcher.go +++ /dev/null @@ -1,94 +0,0 @@ -package service - -import ( - "fmt" - - "github.com/davecgh/go-spew/spew" - log "github.com/sirupsen/logrus" - "golang.org/x/net/context" - v1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/tools/cache" - watchtools "k8s.io/client-go/tools/watch" -) - -// This file handles the watching of a services endpoints and updates a load balancers endpoint configurations accordingly -func (sm *Manager) servicesWatcher(ctx context.Context) error { - // Watch function - - // Use a restartable watcher, as this should help in the event of etcd or timeout issues - rw, err := watchtools.NewRetryWatcher("1", &cache.ListWatch{ - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - return sm.clientSet.CoreV1().Services(v1.NamespaceAll).Watch(ctx, metav1.ListOptions{}) - }, - }) - if err != nil { - return fmt.Errorf("error creating services watcher: %s", err.Error()) - } - go func() { - <-sm.signalChan - // Cancel the context - rw.Stop() - }() - ch := rw.ResultChan() - //defer rw.Stop() - log.Infoln("Beginning watching services for type: LoadBalancer in all namespaces") - - for event := range ch { - //sm.countServiceWatchEvent.With(prometheus.Labels{"type": string(event.Type)}).Add(1) - - // We need to inspect the event and get ResourceVersion out of it - switch event.Type { - case watch.Added, watch.Modified: - // log.Debugf("Endpoints for service [%s] have been Created or modified", s.service.ServiceName) - svc, ok := event.Object.(*v1.Service) - if !ok { - return fmt.Errorf("Unable to parse Kubernetes services from API watcher") - } - if svc.Spec.LoadBalancerIP == "" { - log.Infof("Service [%s] has been addded/modified, it has no assigned external addresses", svc.Name) - } else { - log.Infof("Service [%s] has been addded/modified, it has an assigned external addresses [%s]", svc.Name, svc.Spec.LoadBalancerIP) - err = sm.syncServices(svc) - if err != nil { - log.Error(err) - } - } - case watch.Deleted: - svc, ok := event.Object.(*v1.Service) - if !ok { - return fmt.Errorf("Unable to parse Kubernetes services from API watcher") - } - err = sm.stopService(string(svc.UID)) - if err != nil { - log.Error(err) - } - err = sm.deleteService(string(svc.UID)) - if err != nil { - log.Error(err) - } - log.Infof("Service [%s] has been deleted", svc.Name) - - case watch.Bookmark: - // Un-used - case watch.Error: - log.Error("Error attempting to watch Kubernetes services") - - // This round trip allows us to handle unstructured status - errObject := apierrors.FromObject(event.Object) - statusErr, ok := errObject.(*apierrors.StatusError) - if !ok { - log.Errorf(spew.Sprintf("Received an error which is not *metav1.Status but %#+v", event.Object)) - - } - - status := statusErr.ErrStatus - log.Errorf("%v", status) - default: - } - } - log.Warnln("Stopping watching services for type: LoadBalancer in all namespaces") - return nil -} diff --git a/pkg/vip/address.go b/pkg/vip/address.go index 2a3eb814..086b76a3 100644 --- a/pkg/vip/address.go +++ b/pkg/vip/address.go @@ -1,25 +1,39 @@ package vip import ( + "fmt" + "os" + "strconv" + "strings" "sync" "github.com/pkg/errors" + log "github.com/sirupsen/logrus" "github.com/vishvananda/netlink" "golang.org/x/sys/unix" + v1 "k8s.io/api/core/v1" + + "github.com/kube-vip/kube-vip/pkg/iptables" ) const ( - defaultValidLft = 60 + defaultValidLft = 60 + iptablesComment = "%s kube-vip load balancer IP" + ignoreServiceSecurityAnnotation = "kube-vip.io/ignore-service-security" ) // Network is an interface that enable managing operations for a given IP type Network interface { AddIP() error + AddRoute() error DeleteIP() error + DeleteRoute() error IsSet() (bool, error) IP() string SetIP(ip string) error + SetServicePorts(service *v1.Service) Interface() string + IsDADFAIL() bool IsDNS() bool IsDDNS() bool DDNSHostName() string @@ -30,11 +44,17 @@ type Network interface { type network struct { mu sync.Mutex - address *netlink.Addr - link netlink.Link + address *netlink.Addr + link netlink.Link + ports []v1.ServicePort + serviceName string + ignoreSecurity bool dnsName string isDDNS bool + + routeTable int + routingTableType int } func netlinkParse(addr string) (*netlink.Addr, error) { @@ -46,19 +66,30 @@ func netlinkParse(addr string) (*netlink.Addr, error) { } // NewConfig will attempt to provide an interface to the kernel network configuration -func NewConfig(address string, iface string, isDDNS bool) (Network, error) { +func NewConfig(address string, iface string, subnet string, isDDNS bool, tableID int, tableType int) (Network, error) { result := &network{} link, err := netlink.LinkByName(iface) if err != nil { return result, errors.Wrapf(err, "could not get link for interface '%s'", iface) } + result.link = link + result.routeTable = tableID + result.routingTableType = tableType if IsIP(address) { - result.address, err = netlinkParse(address) - if err != nil { - return result, errors.Wrapf(err, "could not parse address '%s'", address) + // Check if the subnet needs overriding + if subnet != "" { + result.address, err = netlink.ParseAddr(address + subnet) + if err != nil { + return result, errors.Wrapf(err, "could not parse address '%s'", address) + } + } else { + result.address, err = netlinkParse(address) + if err != nil { + return result, errors.Wrapf(err, "could not parse address '%s'", address) + } } // Ensure we don't have a global address on loopback if iface == "lo" { @@ -70,7 +101,6 @@ func NewConfig(address string, iface string, isDDNS bool) (Network, error) { // address is DNS result.isDDNS = isDDNS result.dnsName = address - // try to resolve the address ip, err := lookupHost(address) if err != nil { @@ -92,11 +122,178 @@ func NewConfig(address string, iface string, isDDNS bool) (Network, error) { return result, err } +// AddRoute - Add an IP address to a route table +func (configurator *network) AddRoute() error { + routeScope := netlink.SCOPE_UNIVERSE + if configurator.routingTableType == unix.RTN_LOCAL { + routeScope = netlink.SCOPE_LINK + } + route := &netlink.Route{ + Scope: routeScope, + Dst: configurator.address.IPNet, + LinkIndex: configurator.link.Attrs().Index, + Table: configurator.routeTable, + Type: configurator.routingTableType, + } + return netlink.RouteAdd(route) +} + +// DeleteRoute - Delete an IP address from a route table +func (configurator *network) DeleteRoute() error { + routeScope := netlink.SCOPE_UNIVERSE + if configurator.routingTableType == unix.RTN_LOCAL { + routeScope = netlink.SCOPE_LINK + } + route := &netlink.Route{ + Scope: routeScope, + Dst: configurator.address.IPNet, + LinkIndex: configurator.link.Attrs().Index, + Table: configurator.routeTable, + Type: configurator.routingTableType, + } + return netlink.RouteDel(route) +} + // AddIP - Add an IP address to the interface func (configurator *network) AddIP() error { if err := netlink.AddrReplace(configurator.link, configurator.address); err != nil { return errors.Wrap(err, "could not add ip") } + + if os.Getenv("enable_service_security") == "true" && !configurator.ignoreSecurity { + if err := configurator.addIptablesRulesToLimitTrafficPorts(); err != nil { + return errors.Wrap(err, "could not add iptables rules to limit traffic ports") + } + } + + return nil +} + +func (configurator *network) addIptablesRulesToLimitTrafficPorts() error { + ipt, err := iptables.New() + if err != nil { + return errors.Wrap(err, "could not create iptables client") + } + + vip := configurator.address.IP.String() + comment := fmt.Sprintf(iptablesComment, configurator.serviceName) + if err := insertCommonIPTablesRules(ipt, vip, comment); err != nil { + return fmt.Errorf("could not add common iptables rules: %w", err) + } + log.Debugf("add iptables rules, vip: %s, ports: %+v", vip, configurator.ports) + if err := configurator.insertIPTablesRulesForServicePorts(ipt, vip, comment); err != nil { + return fmt.Errorf("could not add iptables rules for service ports: %v", err) + } + + return nil +} + +func (configurator *network) insertIPTablesRulesForServicePorts(ipt *iptables.IPTables, vip, comment string) error { + isPortsRuleExisting := make([]bool, len(configurator.ports)) + + // delete rules of ports that are not in the service + rules, err := ipt.List(iptables.TableFilter, iptables.ChainInput) + if err != nil { + return fmt.Errorf("could not list iptables rules: %w", err) + } + for _, rule := range rules { + // only handle rules with kube-vip comment + if iptables.GetIPTablesRuleSpecification(rule, "--comment") != comment { + continue + } + // if the rule is not for the vip, delete it + if iptables.GetIPTablesRuleSpecification(rule, "-d") != vip { + if err := ipt.Delete(iptables.TableFilter, iptables.ChainInput, rule); err != nil { + return fmt.Errorf("could not delete iptables rule: %w", err) + } + } + + protocol := iptables.GetIPTablesRuleSpecification(rule, "-p") + port := iptables.GetIPTablesRuleSpecification(rule, "--dport") + // ignore DHCP client port + if protocol == string(v1.ProtocolUDP) && port == dhcpClientPort { + continue + } + // if the rule is for the vip, but its protocol and port are not in the service, delete it + toBeDeleted := true + for i, p := range configurator.ports { + if string(p.Protocol) == protocol && strconv.Itoa(int(p.Port)) == port { + // the rule is for the vip and its protocol and port are in the service, keep it and mark it as existing + toBeDeleted = false + isPortsRuleExisting[i] = true + } + } + if toBeDeleted { + if err := ipt.Delete(iptables.TableFilter, iptables.ChainInput, strings.Split(rule, "")...); err != nil { + return fmt.Errorf("could not delete iptables rule: %w", err) + } + } + } + // add rules of ports that are not existing + // iptables -A INPUT -d -p --dport -j ACCEPT -m comment β€”comment β€œ kube-vip load balancer IP” + for i, ok := range isPortsRuleExisting { + if !ok { + if err := ipt.InsertUnique(iptables.TableFilter, iptables.ChainInput, 1, "-d", vip, "-p", + string(configurator.ports[i].Protocol), "--dport", strconv.Itoa(int(configurator.ports[i].Port)), + "-m", "comment", "--comment", comment, "-j", "ACCEPT"); err != nil { + return fmt.Errorf("could not add iptables rule to accept the traffic to VIP %s for allowed "+ + "port %d: %v", vip, configurator.ports[i].Port, err) + } + } + } + + return nil +} + +func insertCommonIPTablesRules(ipt *iptables.IPTables, vip, comment string) error { + if err := ipt.InsertUnique(iptables.TableFilter, iptables.ChainInput, 1, "-d", vip, "-p", + string(v1.ProtocolUDP), "--dport", dhcpClientPort, "-m", "comment", "--comment", comment, "-j", "ACCEPT"); err != nil { + return fmt.Errorf("could not add iptables rule to accept the traffic to VIP %s for DHCP client port: %w", vip, err) + } + // add rule to drop the traffic to VIP that is not allowed + // iptables -A INPUT -d -j DROP + if err := ipt.InsertUnique(iptables.TableFilter, iptables.ChainInput, 2, "-d", vip, "-m", + "comment", "--comment", comment, "-j", "DROP"); err != nil { + return fmt.Errorf("could not add iptables rule to drop the traffic to VIP %s: %v", vip, err) + } + return nil +} + +func deleteCommonIPTablesRules(ipt *iptables.IPTables, vip, comment string) error { + if err := ipt.DeleteIfExists(iptables.TableFilter, iptables.ChainInput, "-d", vip, "-p", + string(v1.ProtocolUDP), "--dport", dhcpClientPort, "-m", "comment", "--comment", comment, "-j", "ACCEPT"); err != nil { + return fmt.Errorf("could not delete iptables rule to accept the traffic to VIP %s for DHCP client port: %w", vip, err) + } + // add rule to drop the traffic to VIP that is not allowed + // iptables -A INPUT -d -j DROP + if err := ipt.DeleteIfExists(iptables.TableFilter, iptables.ChainInput, "-d", vip, "-m", "comment", + "--comment", comment, "-j", "DROP"); err != nil { + return fmt.Errorf("could not delete iptables rule to drop the traffic to VIP %s: %v", vip, err) + } + return nil +} + +func (configurator *network) removeIptablesRuleToLimitTrafficPorts() error { + ipt, err := iptables.New() + if err != nil { + return errors.Wrap(err, "could not create iptables client") + } + vip := configurator.address.IP.String() + comment := fmt.Sprintf(iptablesComment, configurator.serviceName) + + if err := deleteCommonIPTablesRules(ipt, vip, comment); err != nil { + return fmt.Errorf("could not delete common iptables rules: %w", err) + } + + log.Debugf("remove iptables rules, vip: %s, ports: %+v", vip, configurator.ports) + for _, port := range configurator.ports { + // iptables -D INPUT -d -p --dport -j ACCEPT + if err := ipt.DeleteIfExists(iptables.TableFilter, iptables.ChainInput, "-d", vip, "-p", string(port.Protocol), + "--dport", strconv.Itoa(int(port.Port)), "-m", "comment", "--comment", comment, "-j", "ACCEPT"); err != nil { + return fmt.Errorf("could not delete iptables rule to accept the traffic to VIP %s for allowed port %d: %v", vip, port.Port, err) + } + } + return nil } @@ -116,9 +313,41 @@ func (configurator *network) DeleteIP() error { return errors.Wrap(err, "could not delete ip") } + if os.Getenv("enable_service_security") == "true" && !configurator.ignoreSecurity { + if err := configurator.removeIptablesRuleToLimitTrafficPorts(); err != nil { + return errors.Wrap(err, "could not remove iptables rules to limit traffic ports") + } + } + return nil } +// IsDADFAIL - Returns true if the address is IPv6 and has DADFAILED flag +func (configurator *network) IsDADFAIL() bool { + if configurator.address == nil || !IsIPv6(configurator.address.IP.String()) { + return false + } + + // Get all the address + addresses, err := netlink.AddrList(configurator.link, netlink.FAMILY_V6) + if err != nil { + return false + } + + // Find the VIP and check if it is DADFAILED + for _, address := range addresses { + if address.IP.Equal(configurator.address.IP) && addressHasDADFAILEDFlag(address) { + return true + } + } + + return false +} + +func addressHasDADFAILEDFlag(address netlink.Addr) bool { + return address.Flags&unix.IFA_F_DADFAILED != 0 +} + // IsSet - Check to see if VIP is set func (configurator *network) IsSet() (result bool, err error) { var addresses []netlink.Addr @@ -159,6 +388,17 @@ func (configurator *network) SetIP(ip string) error { return nil } +// SetServicePorts updates the service ports from the service +// If you want to limit traffic to the VIP to only the service ports, add service ports to the network firstly. +func (configurator *network) SetServicePorts(service *v1.Service) { + configurator.mu.Lock() + defer configurator.mu.Unlock() + + configurator.ports = service.Spec.Ports + configurator.serviceName = service.Namespace + "/" + service.Name + configurator.ignoreSecurity = service.Annotations[ignoreServiceSecurityAnnotation] == "true" +} + // IP - return the IP Address func (configurator *network) IP() string { configurator.mu.Lock() @@ -194,3 +434,32 @@ func (configurator *network) DDNSHostName() string { func (configurator *network) Interface() string { return configurator.link.Attrs().Name } + +func GarbageCollect(adapter, address string) (found bool, err error) { + + // Get adapter + link, err := netlink.LinkByName(adapter) + if err != nil { + return true, errors.Wrapf(err, "could not get link for interface '%s'", adapter) + } + + // Get addresses on adapter + addrs, err := netlink.AddrList(link, netlink.FAMILY_ALL) + if err != nil { + return false, err + } + + // Compare all addresses to new service address, and remove if needed + for _, existing := range addrs { + if existing.IP.String() == address { + // We've found the existing address + found = true + // linting issue + existing := existing + if err = netlink.AddrDel(link, &existing); err != nil { + return true, errors.Wrap(err, "could not delete ip") + } + } + } + return // Didn't find the address on the adapter +} diff --git a/pkg/vip/arp.go b/pkg/vip/arp.go index 6038f0a8..7a8cee2f 100644 --- a/pkg/vip/arp.go +++ b/pkg/vip/arp.go @@ -12,8 +12,6 @@ import ( "net" "syscall" "unsafe" - - log "github.com/sirupsen/logrus" ) const ( @@ -137,7 +135,7 @@ func sendARP(iface *net.Interface, m *arpMessage) error { Halen: m.hardwareAddressLength, } target := ethernetBroadcast - for i := 0; i < len(target); i++ { + for i := 0; i < len(target); i++ { //nolint ll.Addr[i] = target[i] } @@ -168,7 +166,7 @@ func ARPSendGratuitous(address, ifaceName string) error { return fmt.Errorf("failed to parse address %s", ip) } - log.Infof("Broadcasting ARP update for %s (%s) via %s", address, iface.HardwareAddr, iface.Name) + // This is a debug message, enable debugging to ensure that the gratuitous arp is repeating m, err := gratuitousARP(ip, iface.HardwareAddr) if err != nil { return err diff --git a/pkg/vip/arp_unsupported.go b/pkg/vip/arp_unsupported.go index 5d732582..c6230b53 100644 --- a/pkg/vip/arp_unsupported.go +++ b/pkg/vip/arp_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package vip diff --git a/pkg/vip/ddns.go b/pkg/vip/ddns.go index 0595a92b..baea01ee 100644 --- a/pkg/vip/ddns.go +++ b/pkg/vip/ddns.go @@ -5,7 +5,6 @@ import ( "net" "time" - "github.com/insomniacslk/dhcp/dhcpv4/nclient4" "github.com/pkg/errors" log "github.com/sirupsen/logrus" ) @@ -39,12 +38,11 @@ func (ddns *ddnsManager) Start() (string, error) { return "", err } - // channel to wait for IP - ipCh := make(chan string) + client := NewDHCPClient(iface, false, "") - client := NewDHCPClient(iface, false, "", func(lease *nclient4.Lease) { - ipCh <- lease.ACK.YourIPAddr.String() - }) + client.WithHostName(ddns.network.DDNSHostName()) + + go client.Start() client.WithHostName(ddns.network.DDNSHostName()) @@ -60,7 +58,7 @@ func (ddns *ddnsManager) Start() (string, error) { case <-timeout: client.Stop() return "", errors.New("failed to get IP from dhcp for ddns in 1 minutes") - case ip = <-ipCh: + case ip = <-client.IPChannel(): log.Info("got ip from dhcp: ", ip) } @@ -79,7 +77,7 @@ func (ddns *ddnsManager) Start() (string, error) { log.Info("stop dhclient for ddns") client.Stop() return - case ip := <-ipCh: + case ip := <-client.IPChannel(): log.Info("got ip from dhcp: ", ip) } } diff --git a/pkg/vip/dhcp.go b/pkg/vip/dhcp.go index 4c1c081b..1512535d 100644 --- a/pkg/vip/dhcp.go +++ b/pkg/vip/dhcp.go @@ -14,8 +14,9 @@ import ( log "github.com/sirupsen/logrus" ) -// Callback is a function called on certain events -type Callback func(*nclient4.Lease) +const dhcpClientPort = "68" +const defaultDHCPRenew = time.Hour +const maxBackoffAttempts = 3 // DHCPClient is responsible for maintaining ipv4 lease for one specified interface type DHCPClient struct { @@ -24,18 +25,22 @@ type DHCPClient struct { lease *nclient4.Lease initRebootFlag bool requestedIP net.IP - stopChan chan struct{} - onBound Callback + stopChan chan struct{} // used as a signal to release the IP and stop the dhcp client daemon + releasedChan chan struct{} // indicate that the IP has been released + errorChan chan error // indicates there was an error on the IP request + ipChan chan string } // NewDHCPClient returns a new DHCP Client. -func NewDHCPClient(iface *net.Interface, initRebootFlag bool, requestedIP string, onBound Callback) *DHCPClient { +func NewDHCPClient(iface *net.Interface, initRebootFlag bool, requestedIP string) *DHCPClient { return &DHCPClient{ iface: iface, stopChan: make(chan struct{}), + releasedChan: make(chan struct{}), + errorChan: make(chan error), initRebootFlag: initRebootFlag, requestedIP: net.ParseIP(requestedIP), - onBound: onBound, + ipChan: make(chan string), } } @@ -46,7 +51,19 @@ func (c *DHCPClient) WithHostName(hostname string) *DHCPClient { // Stop state-transition process and close dhcp client func (c *DHCPClient) Stop() { + close(c.ipChan) close(c.stopChan) + <-c.releasedChan +} + +// Gets the IPChannel for consumption +func (c *DHCPClient) IPChannel() chan string { + return c.ipChan +} + +// Gets the ErrorChannel for consumption +func (c *DHCPClient) ErrorChannel() chan error { + return c.errorChan } // Start state-transition process of dhcp client @@ -104,189 +121,170 @@ func (c *DHCPClient) Stop() { // ---------- // Figure: State-transition diagram for DHCP clients func (c *DHCPClient) Start() { + lease := c.requestWithBackoff() + + c.initRebootFlag = false + c.lease = lease + + log.Info("DHCP lease: ", lease) + + // Set up two ticker to renew/rebind regularly + t1Timeout := c.lease.ACK.IPAddressLeaseTime(defaultDHCPRenew) / 2 + t2Timeout := (c.lease.ACK.IPAddressLeaseTime(defaultDHCPRenew) / 8) * 7 + log.Debugf("t1 %v t2 %v", t1Timeout, t2Timeout) + t1, t2 := time.NewTicker(t1Timeout), time.NewTicker(t2Timeout) + + for { + select { + case <-t1.C: + // renew is a unicast request of the IP renewal + // A point on renew is: the library does not return the right message (NAK) + // on renew error due to IP Change, but instead it returns a different error + // This way there's not much to do other than log and continue, as the renew error + // may be an offline server, or may be an incorrect package match + lease, err := c.renew() + if err == nil { + c.lease = lease + log.Infof("renew, lease: %+v", lease) + t2.Reset(t2Timeout) + } else { + log.Errorf("renew failed, error: %s", err.Error()) + } + case <-t2.C: + // rebind is just like a request, but forcing to provide a new IP address + lease, err := c.request(true) + if err == nil { + c.lease = lease + log.Infof("rebind, lease: %+v", lease) + } else { + if _, ok := err.(*nclient4.ErrNak); !ok { + t1.Stop() + t2.Stop() + log.Errorf("rebind failed, error: %s", err.Error()) + return + } + log.Warnf("ip %s may have changed: %s", c.lease.ACK.YourIPAddr, err.Error()) + c.initRebootFlag = false + c.lease = c.requestWithBackoff() + } + t1.Reset(t1Timeout) + t2.Reset(t2Timeout) + + case <-c.stopChan: + // release is a unicast request of the IP release. + if err := c.release(); err != nil { + log.Errorf("release lease failed, error: %s, lease: %+v", err.Error(), c.lease) + } else { + log.Infof("release, lease: %+v", c.lease) + } + t1.Stop() + t2.Stop() + + close(c.releasedChan) + return + } + } +} + +// -------------------------------------------------------- +// | |INIT-REBOOT | RENEWING |REBINDING | +// -------------------------------------------------------- +// |broad/unicast |broadcast | unicast |broadcast | +// |server-ip |MUST NOT | MUST NOT |MUST NOT | +// |requested-ip |MUST | MUST NOT |MUST NOT | +// |ciaddr |zero | IP address |IP address| +// -------------------------------------------------------- + +func (c *DHCPClient) requestWithBackoff() *nclient4.Lease { backoff := backoff.Backoff{ Factor: 2, Jitter: true, Min: 10 * time.Second, Max: 1 * time.Minute, } + + var lease *nclient4.Lease + var err error + for { - var lease *nclient4.Lease - var err error - if c.initRebootFlag { - // DHCP State-transition: INIT-REBOOT --> BOUND - lease, err = c.initReboot() - } else { - // DHCP State-transition: INIT --> BOUND - lease, err = c.request() - } + log.Debugf("trying to get a new IP, attempt %f", backoff.Attempt()) + lease, err = c.request(false) if err != nil { dur := backoff.Duration() + if backoff.Attempt() > maxBackoffAttempts-1 { + errMsg := fmt.Errorf("failed to get an IP address after %d attempts, error %s, giving up", maxBackoffAttempts, err.Error()) + log.Error(errMsg) + c.errorChan <- errMsg + c.Stop() + return nil + } log.Errorf("request failed, error: %s (waiting %v)", err.Error(), dur) time.Sleep(dur) continue - } else { - backoff.Reset() - } - - c.initRebootFlag = false - c.lease = lease - c.onBound(lease) - - log.Info("DHCP lease: %v", lease) - - // Set up two ticker to renew/rebind regularly - t1Timeout := c.lease.ACK.IPAddressLeaseTime(0) / 2 - t2Timeout := c.lease.ACK.IPAddressLeaseTime(0) / 8 * 7 - - t1, t2 := time.NewTicker(t1Timeout), time.NewTicker(t2Timeout) - - for { - select { - case <-t1.C: - // renew - lease, err := c.renew() - if err == nil { - c.lease = lease - log.Infof("renew, lease: %+v", lease) - t2.Reset(t2Timeout) - } else { - log.Errorf("renew failed, error: %s", err.Error()) - } - case <-t2.C: - // rebind - lease, err := c.rebind() - if err == nil { - c.lease = lease - log.Infof("rebind, lease: %+v", lease) - t1.Reset(t1Timeout) - } else { - log.Errorf("rebind failed, error: %s", err.Error()) - t1.Stop() - t2.Stop() - break - } - case <-c.stopChan: - // release - if err := c.release(); err != nil { - log.Errorf("release lease failed, error: %s, lease: %+v", err.Error(), c.lease) - } - log.Infof("release, lease: %+v", c.lease) - t1.Stop() - t2.Stop() - return - } } + backoff.Reset() + break } -} -func (c *DHCPClient) request() (*nclient4.Lease, error) { - broadcast, err := nclient4.New(c.iface.Name) - if err != nil { - return nil, fmt.Errorf("create a broadcast client for iface %s failed, error: %w", c.iface.Name, err) - } - - defer broadcast.Close() - - if c.ddnsHostName != "" { - return broadcast.Request(context.TODO(), - dhcpv4.WithOption(dhcpv4.OptHostName(c.ddnsHostName)), - dhcpv4.WithOption(dhcpv4.OptClientIdentifier([]byte(c.ddnsHostName)))) + if c.ipChan != nil { + log.Debugf("using channel") + c.ipChan <- lease.ACK.YourIPAddr.String() } - return broadcast.Request(context.TODO()) + return lease } -func (c *DHCPClient) release() error { - unicast, err := nclient4.New(c.iface.Name, nclient4.WithUnicast(&net.UDPAddr{IP: c.lease.ACK.YourIPAddr, Port: nclient4.ClientPort}), - nclient4.WithServerAddr(&net.UDPAddr{IP: c.lease.ACK.ServerIPAddr, Port: nclient4.ServerPort})) +func (c *DHCPClient) request(rebind bool) (*nclient4.Lease, error) { + dhclient, err := nclient4.New(c.iface.Name) if err != nil { - return fmt.Errorf("create unicast client failed, error: %w, server ip: %v", err, c.lease.ACK.ServerIPAddr) + return nil, fmt.Errorf("create a client for iface %s failed, error: %w", c.iface.Name, err) } - defer unicast.Close() - // TODO modify lease - return unicast.Release(c.lease) -} + defer dhclient.Close() -// -------------------------------------------------------- -// | |INIT-REBOOT | RENEWING |REBINDING | -// -------------------------------------------------------- -// |broad/unicast |broadcast | unicast |broadcast | -// |server-ip |MUST NOT | MUST NOT |MUST NOT | -// |requested-ip |MUST | MUST NOT |MUST NOT | -// |ciaddr |zero | IP address |IP address| -// -------------------------------------------------------- -func (c *DHCPClient) initReboot() (*nclient4.Lease, error) { - broadcast, err := nclient4.New(c.iface.Name) - if err != nil { - return nil, fmt.Errorf("create a broadcast client for iface %s failed, error: %w", c.iface.Name, err) - } - defer broadcast.Close() - message, err := dhcpv4.New( - dhcpv4.WithMessageType(dhcpv4.MessageTypeRequest), - dhcpv4.WithHwAddr(c.iface.HardwareAddr), - dhcpv4.WithOption(dhcpv4.OptRequestedIPAddress(c.requestedIP))) - if err != nil { - return nil, fmt.Errorf("new dhcp message failed, error: %w", err) - } + modifiers := make([]dhcpv4.Modifier, 0) - return sendMessage(broadcast, message) -} + if c.ddnsHostName != "" { + modifiers = append(modifiers, + dhcpv4.WithOption(dhcpv4.OptHostName(c.ddnsHostName)), + dhcpv4.WithOption(dhcpv4.OptClientIdentifier([]byte(c.ddnsHostName))), + ) + } -func (c *DHCPClient) renew() (*nclient4.Lease, error) { - unicast, err := nclient4.New(c.iface.Name, nclient4.WithUnicast(&net.UDPAddr{IP: c.lease.ACK.YourIPAddr, Port: nclient4.ClientPort}), - nclient4.WithServerAddr(&net.UDPAddr{IP: c.lease.ACK.ServerIPAddr, Port: nclient4.ServerPort})) - if err != nil { - return nil, fmt.Errorf("create unicast client failed, error: %w, server ip: %v", err, c.lease.ACK.ServerIPAddr) + // if initRebootFlag is set, this means we have an IP already set on c.requestedIP that should be used + if c.initRebootFlag { + log.Debugf("init-reboot ip %s", c.requestedIP) + modifiers = append(modifiers, dhcpv4.WithOption(dhcpv4.OptRequestedIPAddress(c.requestedIP))) } - defer unicast.Close() - message, err := dhcpv4.New( - dhcpv4.WithMessageType(dhcpv4.MessageTypeRequest), - dhcpv4.WithHwAddr(c.iface.HardwareAddr), - dhcpv4.WithClientIP(c.lease.ACK.ClientIPAddr)) - if err != nil { - return nil, fmt.Errorf("new dhcp message failed, error: %w", err) + // if this is a rebind, then the IP we should set is the one that already exists in lease + if rebind { + log.Debugf("rebinding ip %s", c.lease.ACK.YourIPAddr) + modifiers = append(modifiers, dhcpv4.WithOption(dhcpv4.OptRequestedIPAddress(c.lease.ACK.YourIPAddr))) } - return sendMessage(unicast, message) + return dhclient.Request(context.TODO(), modifiers...) } -func (c *DHCPClient) rebind() (*nclient4.Lease, error) { - broadcast, err := nclient4.New(c.iface.Name) - if err != nil { - return nil, fmt.Errorf("create a broadcast client for iface %s failed, error: %s", c.iface.Name, err) - } - defer broadcast.Close() - message, err := dhcpv4.New( - dhcpv4.WithMessageType(dhcpv4.MessageTypeRequest), - dhcpv4.WithHwAddr(c.iface.HardwareAddr), - dhcpv4.WithClientIP(c.lease.ACK.ClientIPAddr)) +func (c *DHCPClient) release() error { + dhclient, err := nclient4.New(c.iface.Name, nclient4.WithUnicast(&net.UDPAddr{IP: c.lease.ACK.YourIPAddr, Port: nclient4.ClientPort})) if err != nil { - return nil, fmt.Errorf("new dhcp message failed, error: %w", err) + return fmt.Errorf("create release client failed, error: %w, iface: %s, server ip: %v", err, c.iface.Name, c.lease.ACK.ServerIPAddr) } + defer dhclient.Close() - return sendMessage(broadcast, message) + // TODO modify lease + return dhclient.Release(c.lease) } -func sendMessage(client *nclient4.Client, message *dhcpv4.DHCPv4) (*nclient4.Lease, error) { - response, err := client.SendAndRead(context.TODO(), client.RemoteAddr(), message, - nclient4.IsMessageType(dhcpv4.MessageTypeAck, dhcpv4.MessageTypeNak)) +func (c *DHCPClient) renew() (*nclient4.Lease, error) { + // renew needs a unicast client. This is due to some servers (like dnsmasq) require the exact request coming from the vip interface + dhclient, err := nclient4.New(c.iface.Name, + nclient4.WithUnicast(&net.UDPAddr{IP: c.lease.ACK.YourIPAddr, Port: nclient4.ClientPort})) if err != nil { - return nil, fmt.Errorf("got an error while processing the request: %w", err) + return nil, fmt.Errorf("create renew client failed, error: %w, server ip: %v", err, c.lease.ACK.ServerIPAddr) } - if response.MessageType() == dhcpv4.MessageTypeNak { - return nil, &nclient4.ErrNak{ - Offer: message, - Nak: response, - } - } - - lease := &nclient4.Lease{} - lease.ACK = response - lease.Offer = message - lease.CreationTime = time.Now() + defer dhclient.Close() - return lease, nil + return dhclient.Renew(context.TODO(), c.lease) } diff --git a/pkg/vip/egress.go b/pkg/vip/egress.go new file mode 100644 index 00000000..36138194 --- /dev/null +++ b/pkg/vip/egress.go @@ -0,0 +1,272 @@ +package vip + +import ( + "fmt" + "strings" + + iptables "github.com/kube-vip/kube-vip/pkg/iptables" + log "github.com/sirupsen/logrus" + + ct "github.com/florianl/go-conntrack" +) + +//Notes: https://github.com/cloudnativelabs/kube-router/issues/434 + +// This file contains all of the functions related to changing SNAT for a +// pod so that it appears to be coming from a VIP. + +// 1. Create a new chain in the mangle table +// 2. Ignore (or RETURN) packets going to a service or other pod address +// 3. Mark packets coming from a pod +// 4. Add a rule in the mangle chain PREROUTING to jump to the new chain created above +// 5. Mark packets going through this host (not originating) (might not be needed) +// 6. Perform source nating on marked packets + +// Create new iptables client +// Test to find out what exists before hand + +const MangleChainName = "KUBE-VIP-EGRESS" +const Comment = "a3ViZS12aXAK=kube-vip" + +type Egress struct { + ipTablesClient *iptables.IPTables + comment string +} + +func CreateIptablesClient(nftables bool, namespace string) (*Egress, error) { + log.Infof("[egress] Creating an iptables client, nftables mode [%t]", nftables) + e := new(Egress) + var err error + e.ipTablesClient, err = iptables.New(iptables.EnableNFTables(nftables)) + e.comment = Comment + "-" + namespace + return e, err +} + +func (e *Egress) CheckMangleChain(name string) (bool, error) { + log.Infof("[egress] Checking for Chain [%s]", name) + return e.ipTablesClient.ChainExists("mangle", name) +} + +func (e *Egress) DeleteMangleChain(name string) error { + return e.ipTablesClient.ClearAndDeleteChain("mangle", name) +} + +func (e *Egress) DeleteManglePrerouting(name string) error { + return e.ipTablesClient.Delete("mangle", "PREROUTING", "-j", name) +} + +func (e *Egress) DeleteMangleMarking(podIP, name string) error { + log.Infof("[egress] Stopping marking packets on network [%s]", podIP) + + exists, _ := e.ipTablesClient.Exists("mangle", name, "-s", podIP, "-j", "MARK", "--set-mark", "64/64", "-m", "comment", "--comment", e.comment) + + if !exists { + return fmt.Errorf("unable to find source Mangle rule for [%s]", podIP) + } + return e.ipTablesClient.Delete("mangle", name, "-s", podIP, "-j", "MARK", "--set-mark", "64/64", "-m", "comment", "--comment", e.comment) +} + +func (e *Egress) DeleteSourceNat(podIP, vip string) error { + log.Infof("[egress] Removing source nat from [%s] => [%s]", podIP, vip) + + exists, _ := e.ipTablesClient.Exists("nat", "POSTROUTING", "-s", podIP+"/32", "-m", "mark", "--mark", "64/64", "-j", "SNAT", "--to-source", vip, "-m", "comment", "--comment", e.comment) + + if !exists { + return fmt.Errorf("unable to find source Nat rule for [%s]", podIP) + } + return e.ipTablesClient.Delete("nat", "POSTROUTING", "-s", podIP+"/32", "-m", "mark", "--mark", "64/64", "-j", "SNAT", "--to-source", vip, "-m", "comment", "--comment", e.comment) +} + +func (e *Egress) DeleteSourceNatForDestinationPort(podIP, vip, port, proto string) error { + log.Infof("[egress] Adding source nat from [%s] => [%s]", podIP, vip) + + exists, _ := e.ipTablesClient.Exists("nat", "POSTROUTING", "-s", podIP+"/32", "-m", "mark", "--mark", "64/64", "-j", "SNAT", "--to-source", vip, "-p", proto, "--dport", port, "-m", "comment", "--comment", e.comment) + + if !exists { + return fmt.Errorf("unable to find source Nat rule for [%s], with destination port [%s]", podIP, port) + } + return e.ipTablesClient.Delete("nat", "POSTROUTING", "-s", podIP+"/32", "-m", "mark", "--mark", "64/64", "-j", "SNAT", "--to-source", vip, "-p", proto, "--dport", port, "-m", "comment", "--comment", e.comment) +} + +func (e *Egress) CreateMangleChain(name string) error { + + log.Infof("[egress] Creating Chain [%s]", name) + // Creates a new chain in the mangle table + return e.ipTablesClient.NewChain("mangle", name) + +} +func (e *Egress) AppendReturnRulesForDestinationSubnet(name, subnet string) error { + log.Infof("[egress] Adding jump for subnet [%s] to RETURN to previous chain/rules", subnet) + exists, _ := e.ipTablesClient.Exists("mangle", name, "-d", subnet, "-j", "RETURN", "-m", "comment", "--comment", e.comment) + if !exists { + return e.ipTablesClient.Append("mangle", name, "-d", subnet, "-j", "RETURN", "-m", "comment", "--comment", e.comment) + } + return nil +} + +func (e *Egress) AppendReturnRulesForMarking(name, subnet string) error { + log.Infof("[egress] Marking packets on network [%s]", subnet) + exists, _ := e.ipTablesClient.Exists("mangle", name, "-s", subnet, "-j", "MARK", "--set-mark", "64/64", "-m", "comment", "--comment", e.comment) + if !exists { + return e.ipTablesClient.Append("mangle", name, "-s", subnet, "-j", "MARK", "--set-mark", "64/64", "-m", "comment", "--comment", e.comment) + } + return nil +} + +func (e *Egress) InsertMangeTableIntoPrerouting(name string) error { + log.Infof("[egress] Adding jump from mangle prerouting to [%s]", name) + if exists, err := e.ipTablesClient.Exists("mangle", "PREROUTING", "-j", name, "-m", "comment", "--comment", e.comment); err != nil { + return err + } else if exists { + if err2 := e.ipTablesClient.Delete("mangle", "PREROUTING", "-j", name, "-m", "comment", "--comment", e.comment); err2 != nil { + return err2 + } + } + + return e.ipTablesClient.Insert("mangle", "PREROUTING", 1, "-j", name, "-m", "comment", "--comment", e.comment) +} + +func (e *Egress) InsertSourceNat(vip, podIP string) error { + log.Infof("[egress] Adding source nat from [%s] => [%s]", podIP, vip) + if exists, err := e.ipTablesClient.Exists("nat", "POSTROUTING", "-s", podIP+"/32", "-m", "mark", "--mark", "64/64", "-j", "SNAT", "--to-source", vip, "-m", "comment", "--comment", e.comment); err != nil { + return err + } else if exists { + if err2 := e.ipTablesClient.Delete("nat", "POSTROUTING", "-s", podIP+"/32", "-m", "mark", "--mark", "64/64", "-j", "SNAT", "--to-source", vip, "-m", "comment", "--comment", e.comment); err2 != nil { + return err2 + } + } + + return e.ipTablesClient.Insert("nat", "POSTROUTING", 1, "-s", podIP+"/32", "-m", "mark", "--mark", "64/64", "-j", "SNAT", "--to-source", vip, "-m", "comment", "--comment", e.comment) +} + +func (e *Egress) InsertSourceNatForDestinationPort(vip, podIP, port, proto string) error { + log.Infof("[egress] Adding source nat from [%s] => [%s], with destination port [%s]", podIP, vip, port) + if exists, err := e.ipTablesClient.Exists("nat", "POSTROUTING", "-s", podIP+"/32", "-m", "mark", "--mark", "64/64", "-j", "SNAT", "--to-source", vip, "-p", proto, "--dport", port, "-m", "comment", "--comment", e.comment); err != nil { + return err + } else if exists { + if err2 := e.ipTablesClient.Delete("nat", "POSTROUTING", "-s", podIP+"/32", "-m", "mark", "--mark", "64/64", "-j", "SNAT", "--to-source", vip, "-p", proto, "--dport", port, "-m", "comment", "--comment", e.comment); err2 != nil { + return err2 + } + } + + return e.ipTablesClient.Insert("nat", "POSTROUTING", 1, "-s", podIP+"/32", "-m", "mark", "--mark", "64/64", "-j", "SNAT", "--to-source", vip, "-p", proto, "--dport", port, "-m", "comment", "--comment", e.comment) +} + +func DeleteExistingSessions(sessionIP string, destination bool) error { + + nfct, err := ct.Open(&ct.Config{}) + if err != nil { + log.Errorf("could not create nfct: %v", err) + return err + } + defer nfct.Close() + sessions, err := nfct.Dump(ct.Conntrack, ct.IPv4) + if err != nil { + log.Errorf("could not dump sessions: %v", err) + return err + } + // by default we only clear source (i.e. connections going from the vip (egress)) + if !destination { + for _, session := range sessions { + //fmt.Printf("Looking for [%s] found [%s]\n", podIP, session.Origin.Dst.String()) + + if session.Origin.Src.String() == sessionIP /*&& *session.Origin.Proto.DstPort == uint16(destinationPort)*/ { + //fmt.Printf("Source -> %s Destination -> %s:%d\n", session.Origin.Src.String(), session.Origin.Dst.String(), *session.Origin.Proto.DstPort) + err = nfct.Delete(ct.Conntrack, ct.IPv4, session) + if err != nil { + log.Errorf("could not delete sessions: %v", err) + } + } + } + } else { + // This will clear any "dangling" outbound connections. + for _, session := range sessions { + //fmt.Printf("Looking for [%s] found [%s]\n", podIP, session.Origin.Dst.String()) + + if session.Origin.Dst.String() == sessionIP /*&& *session.Origin.Proto.DstPort == uint16(destinationPort)*/ { + //fmt.Printf("Source -> %s Destination -> %s:%d\n", session.Origin.Src.String(), session.Origin.Dst.String(), *session.Origin.Proto.DstPort) + err = nfct.Delete(ct.Conntrack, ct.IPv4, session) + if err != nil { + log.Errorf("could not delete sessions: %v", err) + } + } + } + } + + return nil +} + +// Debug functions + +func (e *Egress) DumpChain(name string) error { + log.Infof("Dumping chain [%s]", name) + c, err := e.ipTablesClient.List("mangle", name) + if err != nil { + return err + } + for x := range c { + log.Infof("Rule -> %s", c[x]) + } + return nil +} + +func (e *Egress) CleanIPtables() error { + natRules, err := e.ipTablesClient.List("nat", "POSTROUTING") + if err != nil { + return err + } + foundNatRules := e.findRules(natRules) + log.Warnf("[egress] Cleaning [%d] dangling postrouting nat rules", len(foundNatRules)) + for x := range foundNatRules { + err = e.ipTablesClient.Delete("nat", "POSTROUTING", foundNatRules[x][2:]...) + if err != nil { + log.Errorf("[egress] Error removing rule [%v]", err) + } + } + exists, err := e.CheckMangleChain(MangleChainName) + if err != nil { + log.Debugf("[egress] No Mangle chain exists [%v]", err) + } + if exists { + mangleRules, err := e.ipTablesClient.List("mangle", MangleChainName) + if err != nil { + return err + } + foundNatRules = e.findRules(mangleRules) + log.Warnf("[egress] Cleaning [%d] dangling prerouting mangle rules", len(foundNatRules)) + for x := range foundNatRules { + err = e.ipTablesClient.Delete("mangle", MangleChainName, foundNatRules[x][2:]...) + if err != nil { + log.Errorf("[egress] Error removing rule [%v]", err) + } + } + + // For unknown reasons RHEL and the nftables wrapper sometimes leave dangling rules + // So we shall nuke them from orbit (just to be sure) + + // err = e.ipTablesClient.ClearChain("mangle", MangleChainName) + // if err != nil { + // log.Errorf("[egress] Error removing flushing table [%v]", err) + // } + } else { + log.Warnf("No existing mangle chain [%s] exists", MangleChainName) + } + return nil +} + +func (e *Egress) findRules(rules []string) [][]string { + var foundRules [][]string + + for i := range rules { + r := strings.Split(rules[i], " ") + for x := range r { + if r[x] == "\""+e.comment+"\"" { + // Remove the quotes around the comment + r[x] = strings.Trim(r[x], "\"") + foundRules = append(foundRules, r) + } + } + } + + return foundRules +} diff --git a/pkg/vip/egress_test.go b/pkg/vip/egress_test.go new file mode 100644 index 00000000..8466e437 --- /dev/null +++ b/pkg/vip/egress_test.go @@ -0,0 +1,36 @@ +package vip + +import ( + "fmt" + "reflect" + "testing" +) + +func Test_findRules(t *testing.T) { + e := Egress{comment: Comment + "-" + "default"} + type args struct { + rules []string + } + tests := []struct { + name string + args args + want [][]string + }{ + { + "test", + args{[]string{ + "-A PREROUTING -m comment --comment \"cali:6gwbT8clXdHdC1b1\" -j cali-PREROUTING", + fmt.Sprintf("-A KUBE-VIP-EGRESS -s 172.17.88.190/32 -m comment --comment \"%s\" -j MARK --set-xmark 0x40/0x40", e.comment), + fmt.Sprintf("-A POSTROUTING -m comment --comment \"%s\" -j RETURN", e.comment), + }}, + [][]string{{"-A", "KUBE-VIP-EGRESS", "-s", "172.17.88.190/32", "-m", "comment", "--comment", e.comment, "-j", "MARK", "--set-xmark", "0x40/0x40"}, {"-A", "POSTROUTING", "-m", "comment", "--comment", e.comment, "-j", "RETURN"}}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := e.findRules(tt.args.rules); !reflect.DeepEqual(got, tt.want) { + t.Errorf("findRules() = \n%v, want \n%v", got, tt.want) + } + }) + } +} diff --git a/pkg/vip/ndp.go b/pkg/vip/ndp.go index 73fec41e..d7ebec35 100644 --- a/pkg/vip/ndp.go +++ b/pkg/vip/ndp.go @@ -3,6 +3,7 @@ package vip import ( "fmt" "net" + "net/netip" "github.com/mdlayher/ndp" @@ -24,7 +25,7 @@ func NewNDPResponder(ifaceName string) (*NdpResponder, error) { } // Use link-local address as the source IPv6 address for NDP communications. - conn, _, err := ndp.Dial(iface, ndp.LinkLocal) + conn, _, err := ndp.Listen(iface, ndp.LinkLocal) if err != nil { return nil, fmt.Errorf("creating NDP responder for %q: %s", iface.Name, err) } @@ -44,16 +45,16 @@ func (n *NdpResponder) Close() error { // SendGratuitous broadcasts an NDP update or returns error if encountered. func (n *NdpResponder) SendGratuitous(address string) error { - ip := net.ParseIP(address) - if ip == nil { + ip, err := netip.ParseAddr(address) + if err != nil { return fmt.Errorf("failed to parse address %s", ip) } log.Infof("Broadcasting NDP update for %s (%s) via %s", address, n.hardwareAddr, n.intf) - return n.advertise(net.IPv6linklocalallnodes, ip, true) + return n.advertise(netip.IPv6LinkLocalAllNodes(), ip, true) } -func (n *NdpResponder) advertise(dst, target net.IP, gratuitous bool) error { +func (n *NdpResponder) advertise(dst, target netip.Addr, gratuitous bool) error { m := &ndp.NeighborAdvertisement{ Solicited: !gratuitous, Override: gratuitous, // Should clients replace existing cache entries diff --git a/pkg/vip/util.go b/pkg/vip/util.go index a7335a46..8608c5f1 100644 --- a/pkg/vip/util.go +++ b/pkg/vip/util.go @@ -1,12 +1,15 @@ package vip import ( + "context" + "crypto/rand" "fmt" "net" "strings" "syscall" "github.com/pkg/errors" + log "github.com/sirupsen/logrus" "github.com/vishvananda/netlink" ) @@ -85,3 +88,41 @@ func GetDefaultGatewayInterface() (*net.Interface, error) { return nil, errors.New("Unable to find default route") } + +// MonitorDefaultInterface monitor the default interface and catch the event of the default route +func MonitorDefaultInterface(ctx context.Context, defaultIF *net.Interface) error { + routeCh := make(chan netlink.RouteUpdate) + if err := netlink.RouteSubscribe(routeCh, ctx.Done()); err != nil { + return fmt.Errorf("subscribe route failed, error: %w", err) + } + + for { + select { + case r := <-routeCh: + log.Debugf("type: %d, route: %+v", r.Type, r.Route) + if r.Type == syscall.RTM_DELROUTE && (r.Dst == nil || r.Dst.String() == "0.0.0.0/0") && r.LinkIndex == defaultIF.Index { + return fmt.Errorf("default route deleted and the default interface may be invalid") + } + case <-ctx.Done(): + return nil + } + } +} + +func GenerateMac() (mac string) { + buf := make([]byte, 3) + _, err := rand.Read(buf) + if err != nil { + return + } + + /** + * The first 3 bytes need to match a real manufacturer + * you can refer to the following lists for examples: + * - https://gist.github.com/aallan/b4bb86db86079509e6159810ae9bd3e4 + * - https://macaddress.io/database-download + */ + mac = fmt.Sprintf("%s:%s:%s:%02x:%02x:%02x", "00", "00", "6C", buf[0], buf[1], buf[2]) + log.Infof("Generated mac: %s", mac) + return mac +} diff --git a/pkg/wireguard/architecture.md b/pkg/wireguard/architecture.md new file mode 100644 index 00000000..47de96f9 --- /dev/null +++ b/pkg/wireguard/architecture.md @@ -0,0 +1,29 @@ +# Wireguard Architecture + +This brief document is largely for my own notes about how this functionality is added to `kube-vip`. + +## Overview + +- New Flags +- Startup +- Secret(s) + +### New Flags + +A `--wireguard` flag or `vip_wireguard` environment variable will determine if the Wireguard mode is enabled, if this is the case then it will start the wireguard manager process. + +###Β Startup + +This will require `kube-vip` starting as a daemonset as it will need to read existing data (secrets) from inside the cluster. + +### Secrets + +Create a private key for the cluster: + +``` +PRIKEY=$(wg genkey) +PUBKEY=$(echo $PRIKEY | wg pubkey) +PEERKEY=$(sudo wg show wg0 public-key) +echo "kubectl create -n kube-system secret generic wireguard --from-literal=privateKey=$PRIKEY --from-literal=peerPublicKey=$PEERKEY --from-literal=peerEndpoint=192.168.0.179" +sudo wg set wg0 peer $PUBKEY allowed-ips 10.0.0.0/8 +``` diff --git a/pkg/wireguard/wireguard.go b/pkg/wireguard/wireguard.go new file mode 100644 index 00000000..0eac845b --- /dev/null +++ b/pkg/wireguard/wireguard.go @@ -0,0 +1,64 @@ +package wireguard + +import ( + "fmt" + "net" + "os" + "time" + + "golang.zx2c4.com/wireguard/wgctrl" + "golang.zx2c4.com/wireguard/wgctrl/wgtypes" +) + +func ConfigureInterface(priKey, peerPublicKey, endpoint string) error { + + client, err := wgctrl.New() + if err != nil { + return fmt.Errorf("failed to open client: %v", err) + } + defer client.Close() + + pri, err := wgtypes.ParseKey(priKey) + if err != nil { + return fmt.Errorf("failed to generate private key: %v", err) + } + + pub, err := wgtypes.ParseKey(peerPublicKey) // Should be generated by the remote peer + if err != nil { + return fmt.Errorf("failed to parse public key: %v", err) + } + + //log.Printf("Public Key [%s]", pri.PublicKey()) + + port := 51820 + ka := 20 * time.Second + + conf := wgtypes.Config{ + PrivateKey: &pri, + ListenPort: &port, + ReplacePeers: true, + Peers: []wgtypes.PeerConfig{{ + PublicKey: pub, + Remove: false, + UpdateOnly: false, + Endpoint: &net.UDPAddr{ + IP: net.ParseIP(endpoint), + Port: 51820, + }, + PersistentKeepaliveInterval: &ka, + ReplaceAllowedIPs: true, + AllowedIPs: []net.IPNet{{ + IP: net.ParseIP("10.0.0.0"), + Mask: net.ParseIP("0.0.0.0").DefaultMask(), + }}, + }}, + } + + if err := client.ConfigureDevice("wg0", conf); err != nil { + if os.IsNotExist(err) { + return fmt.Errorf("wg0 doesn't exist [%s]", err) + } + return fmt.Errorf("unknown config error: %v", err) + } + return nil +} diff --git a/testing/e2e/e2e/Dockerfile b/testing/e2e/e2e/Dockerfile new file mode 100644 index 00000000..74db1526 --- /dev/null +++ b/testing/e2e/e2e/Dockerfile @@ -0,0 +1,16 @@ +# syntax=docker/dockerfile:experimental + +FROM golang:1.20-alpine as dev +RUN apk add --no-cache git ca-certificates +RUN adduser -D appuser +COPY . /src/ +WORKDIR /src + +ENV GO111MODULE=on +RUN --mount=type=cache,sharing=locked,id=gomod,target=/go/pkg/mod/cache \ + --mount=type=cache,sharing=locked,id=goroot,target=/root/.cache/go-build \ + CGO_ENABLED=0 GOOS=linux go build -ldflags '-s -w -extldflags -static' -o e2eClient /src/main.go + +FROM scratch +COPY --from=dev /src/e2eClient / +CMD ["/e2eClient"] diff --git a/testing/e2e/e2e/Makefile b/testing/e2e/e2e/Makefile new file mode 100644 index 00000000..2ea6e738 --- /dev/null +++ b/testing/e2e/e2e/Makefile @@ -0,0 +1,61 @@ + +SHELL := /bin/bash + +# The name of the executable (default is current directory name) +TARGET := e2e +.DEFAULT_GOAL: $(TARGET) + +# These will be provided to the target +VERSION := 0.0.1 +BUILD := `git rev-parse HEAD` + +# Operating System Default (LINUX) +TARGETOS=linux + +# Use linker flags to provide version/build settings to the target +LDFLAGS=-ldflags "-X=main.Version=$(VERSION) -X=main.Build=$(BUILD) -s" + +# go source files, ignore vendor directory +SRC = $(shell find . -type f -name '*.go' -not -path "./vendor/*") + +DOCKERTAG ?= $(VERSION) +REPOSITORY = plndr + +.PHONY: all build clean install uninstall fmt simplify check run + +all: check install + +$(TARGET): $(SRC) + @go build $(LDFLAGS) -o $(TARGET) + +build: $(TARGET) + @true + +clean: + @rm -f $(TARGET) + +install: + @echo Building and Installing project + @go install $(LDFLAGS) + +uninstall: clean + @rm -f $$(which ${TARGET}) + +fmt: + @gofmt -l -w $(SRC) + +docker: + # @docker buildx build --platform linux/amd64,linux/arm64,linux/arm/v7,linux/ppc64le,linux/s390x --push -t $(REPOSITORY)/$(TARGET):$(DOCKERTAG) . + @docker buildx build --platform linux/amd64 --push -t $(REPOSITORY)/$(TARGET):$(DOCKERTAG) . + @echo New Multi Architecture Docker image created + +simplify: + @gofmt -s -l -w $(SRC) + +check: + @test -z $(shell gofmt -l main.go | tee /dev/stderr) || echo "[WARN] Fix formatting issues with 'make fmt'" + @for d in $$(go list ./... | grep -v /vendor/); do golint $${d}; done + @go tool vet ${SRC} + +run: install + @$(TARGET) diff --git a/testing/e2e/e2e/go.mod b/testing/e2e/e2e/go.mod new file mode 100644 index 00000000..d25994bc --- /dev/null +++ b/testing/e2e/e2e/go.mod @@ -0,0 +1,7 @@ +module github.com/kube-vip/kube-vip/testing/e2e/servicesClient + +go 1.19 + +require github.com/sirupsen/logrus v1.9.0 + +require golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 // indirect diff --git a/testing/e2e/e2e/go.sum b/testing/e2e/e2e/go.sum new file mode 100644 index 00000000..ed655373 --- /dev/null +++ b/testing/e2e/e2e/go.sum @@ -0,0 +1,15 @@ +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= +github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 h1:0A+M6Uqn+Eje4kHMK80dtF3JCXC4ykBgQG4Fe06QRhQ= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/testing/e2e/e2e/main.go b/testing/e2e/e2e/main.go new file mode 100644 index 00000000..61a620ba --- /dev/null +++ b/testing/e2e/e2e/main.go @@ -0,0 +1,66 @@ +package main + +// This is largely to test outbound (egress) connections +import ( + "fmt" + "net" + "net/http" + "os" + "strings" + "time" + + log "github.com/sirupsen/logrus" +) + +func main() { + // Lookup environment variables + mode, exists := os.LookupEnv("E2EMODE") + if !exists { + log.Fatal("The environment variable E2ESERVER, was not set") + } + + switch mode { + case strings.ToUpper("SERVER"): + http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, "Hello!") + }) + + log.Info("Starting server at port 80") + if err := http.ListenAndServe(":80", nil); err != nil { + log.Fatal(err) + } + case strings.ToUpper("CLIENT"): + address, exists := os.LookupEnv("E2EADDRESS") + if !exists { + log.Fatal("The environment variable E2EADDRESS, was not set") + } + for { + + // Connect to e2e endpoint with a second timeout + conn, err := net.DialTimeout("tcp", address+":12345", time.Second) + if err != nil { + log.Fatalf("Dial failed: %v", err.Error()) + } + _, err = conn.Write([]byte("The Grid, a digital frontier")) + if err != nil { + log.Fatalf("Write data failed: %v ", err.Error()) + } + + // buffer to get data + received := make([]byte, 1024) + _, err = conn.Read(received) + if err != nil { + log.Fatalf("Read data failed:", err.Error()) + } + + println("Received message: %s", string(received)) + + conn.Close() + // Wait for a second and connect again + time.Sleep(time.Second) + } + default: + log.Fatalf("Unknown mode [%s]", mode) + } + +} diff --git a/testing/e2e/e2e_suite_test.go b/testing/e2e/e2e_suite_test.go index 6ac6be60..a9092081 100644 --- a/testing/e2e/e2e_suite_test.go +++ b/testing/e2e/e2e_suite_test.go @@ -4,17 +4,12 @@ package e2e_test import ( - "fmt" - "os/exec" "testing" - "time" - kindconfigv1alpha4 "sigs.k8s.io/kind/pkg/apis/config/v1alpha4" - "sigs.k8s.io/kind/pkg/cluster" - - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - "github.com/onsi/gomega/gexec" + + "github.com/kube-vip/kube-vip/testing/e2e" ) func TestE2E(t *testing.T) { @@ -22,43 +17,9 @@ func TestE2E(t *testing.T) { RunSpecs(t, "E2E Suite") } -var _ = SynchronizedBeforeSuite(func() []byte { - ensureKindNetwork() - return []byte{} -}, func(_ []byte) {}) - -func ensureKindNetwork() { - By("checking if the Docker \"kind\" network exists") - cmd := exec.Command("docker", "inspect", "kind") - session, err := gexec.Start(cmd, GinkgoWriter, GinkgoWriter) - Expect(err).NotTo(HaveOccurred()) - Eventually(session).Should(gexec.Exit()) - if session.ExitCode() == 0 { - return - } - - By("Docker \"kind\" network was not found. Creating dummy Kind cluster to ensure creation") - clusterConfig := kindconfigv1alpha4.Cluster{ - Networking: kindconfigv1alpha4.Networking{ - IPFamily: kindconfigv1alpha4.IPv6Family, - }, - } - - provider := cluster.NewProvider( - cluster.ProviderWithDocker(), - ) - dummyClusterName := fmt.Sprintf("dummy-cluster-%d", time.Now().Unix()) - Expect(provider.Create( - dummyClusterName, - cluster.CreateWithV1Alpha4Config(&clusterConfig), - )).To(Succeed()) - - By("deleting dummy Kind cluster") - Expect(provider.Delete(dummyClusterName, "")) - - By("checking if the Docker \"kind\" network was successfully created") - cmd = exec.Command("docker", "inspect", "kind") - session, err = gexec.Start(cmd, GinkgoWriter, GinkgoWriter) - Expect(err).NotTo(HaveOccurred()) - Eventually(session).Should(gexec.Exit(0)) -} +var _ = SynchronizedBeforeSuite( + func() { + e2e.EnsureKindNetwork() + }, + func() {}, +) diff --git a/testing/e2e/e2e_test.go b/testing/e2e/e2e_test.go index 68e7171f..1de6d5f7 100644 --- a/testing/e2e/e2e_test.go +++ b/testing/e2e/e2e_test.go @@ -4,14 +4,10 @@ package e2e_test import ( - "bufio" "bytes" "crypto/tls" - "encoding/binary" "fmt" - "io" "io/ioutil" - "net" "net/http" "os" "os/exec" @@ -24,19 +20,15 @@ import ( "sigs.k8s.io/kind/pkg/apis/config/v1alpha4" kindconfigv1alpha4 "sigs.k8s.io/kind/pkg/apis/config/v1alpha4" "sigs.k8s.io/kind/pkg/cluster" - "sigs.k8s.io/kind/pkg/cmd" - load "sigs.k8s.io/kind/pkg/cmd/kind/load/docker-image" "sigs.k8s.io/kind/pkg/log" - . "github.com/onsi/ginkgo" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + "github.com/onsi/gomega/format" "github.com/onsi/gomega/gexec" -) -type kubevipManifestValues struct { - ControlPlaneVIP string - ImagePath string -} + "github.com/kube-vip/kube-vip/testing/e2e" +) var _ = Describe("kube-vip broadcast neighbor", func() { var ( @@ -49,7 +41,7 @@ var _ = Describe("kube-vip broadcast neighbor", func() { BeforeEach(func() { klog.SetOutput(GinkgoWriter) - logger = TestLogger{} + logger = e2e.TestLogger{} imagePath = os.Getenv("E2E_IMAGE_PATH") @@ -99,6 +91,7 @@ var _ = Describe("kube-vip broadcast neighbor", func() { for i := 0; i < 3; i++ { clusterConfig.Nodes = append(clusterConfig.Nodes, kindconfigv1alpha4.Node{ + Role: kindconfigv1alpha4.ControlPlaneRole, ExtraMounts: []kindconfigv1alpha4.Mount{ { HostPath: manifestPath, @@ -113,9 +106,9 @@ var _ = Describe("kube-vip broadcast neighbor", func() { defer manifestFile.Close() - ipv4VIP = generateIPv4VIP() + ipv4VIP = e2e.GenerateIPv4VIP() - Expect(kubeVIPManifestTemplate.Execute(manifestFile, kubevipManifestValues{ + Expect(kubeVIPManifestTemplate.Execute(manifestFile, e2e.KubevipManifestValues{ ControlPlaneVIP: ipv4VIP, ImagePath: imagePath, })).To(Succeed()) @@ -126,7 +119,7 @@ var _ = Describe("kube-vip broadcast neighbor", func() { createKindCluster(logger, &clusterConfig, clusterName) By(withTimestamp("loading local docker image to kind cluster")) - loadDockerImageToKind(logger, imagePath, clusterName) + e2e.LoadDockerImageToKind(logger, imagePath, clusterName) By(withTimestamp("checking that the Kubernetes control plane nodes are accessible via the assigned IPv4 VIP")) // Allow enough time for control plane nodes to load the docker image and @@ -171,14 +164,14 @@ var _ = Describe("kube-vip broadcast neighbor", func() { }) } - ipv6VIP = generateIPv6VIP() + ipv6VIP = e2e.GenerateIPv6VIP() manifestFile, err := os.Create(manifestPath) Expect(err).NotTo(HaveOccurred()) defer manifestFile.Close() - Expect(kubeVIPManifestTemplate.Execute(manifestFile, kubevipManifestValues{ + Expect(kubeVIPManifestTemplate.Execute(manifestFile, e2e.KubevipManifestValues{ ControlPlaneVIP: ipv6VIP, ImagePath: imagePath, })).To(Succeed()) @@ -189,7 +182,7 @@ var _ = Describe("kube-vip broadcast neighbor", func() { createKindCluster(logger, &clusterConfig, clusterName) By(withTimestamp("loading local docker image to kind cluster")) - loadDockerImageToKind(logger, imagePath, clusterName) + e2e.LoadDockerImageToKind(logger, imagePath, clusterName) By(withTimestamp("checking that the Kubernetes control plane nodes are accessible via the assigned IPv6 VIP")) // Allow enough time for control plane nodes to load the docker image and @@ -211,19 +204,14 @@ func createKindCluster(logger log.Logger, config *v1alpha4.Cluster, clusterName cluster.ProviderWithLogger(logger), cluster.ProviderWithDocker(), ) - + format.UseStringerRepresentation = true // Otherwise error stacks have binary format. Expect(provider.Create( clusterName, cluster.CreateWithV1Alpha4Config(config), + cluster.CreateWithRetain(os.Getenv("E2E_PRESERVE_CLUSTER") == "true"), // If create fails, we'll need the cluster alive to debug )).To(Succeed()) } -func loadDockerImageToKind(logger log.Logger, imagePath string, clusterName string) { - loadImageCmd := load.NewCommand(logger, cmd.StandardIOStreams()) - loadImageCmd.SetArgs([]string{"--name", clusterName, imagePath}) - Expect(loadImageCmd.Execute()).To(Succeed()) -} - func assertControlPlaneIsRoutable(controlPlaneVIP string, transportTimeout, eventuallyTimeout time.Duration) { if strings.Contains(controlPlaneVIP, ":") { controlPlaneVIP = fmt.Sprintf("[%s]", controlPlaneVIP) @@ -278,104 +266,3 @@ func killLeader(leaderIPAddr string, clusterName string) { func withTimestamp(text string) string { return fmt.Sprintf("%s: %s", time.Now(), text) } - -func getKindNetworkSubnetCIDRs() []string { - cmd := exec.Command( - "docker", "inspect", "kind", - "--format", `{{ range $i, $a := .IPAM.Config }}{{ println .Subnet }}{{ end }}`, - ) - cmdOut := new(bytes.Buffer) - cmd.Stdout = cmdOut - Expect(cmd.Run()).To(Succeed(), "The Docker \"kind\" network was not found.") - reader := bufio.NewReader(cmdOut) - - cidrs := []string{} - for { - line, readErr := reader.ReadString('\n') - if readErr != nil && readErr != io.EOF { - Expect(readErr).NotTo(HaveOccurred(), "Error finding subnet CIDRs in the Docker \"kind\" network") - } - - cidrs = append(cidrs, strings.TrimSpace(line)) - if readErr == io.EOF { - break - } - } - - return cidrs -} - -func generateIPv4VIP() string { - cidrs := getKindNetworkSubnetCIDRs() - - for _, cidr := range cidrs { - ip, ipNet, parseErr := net.ParseCIDR(cidr) - Expect(parseErr).NotTo(HaveOccurred()) - - if ip.To4() != nil { - mask := binary.BigEndian.Uint32(ipNet.Mask) - start := binary.BigEndian.Uint32(ipNet.IP) - end := (start & mask) | (^mask) - - chosenVIP := make([]byte, 4) - binary.BigEndian.PutUint32(chosenVIP, end-5) - return net.IP(chosenVIP).String() - } - } - Fail("Could not find any IPv4 CIDRs in the Docker \"kind\" network") - return "" -} - -func generateIPv6VIP() string { - cidrs := getKindNetworkSubnetCIDRs() - - for _, cidr := range cidrs { - ip, ipNet, parseErr := net.ParseCIDR(cidr) - Expect(parseErr).NotTo(HaveOccurred()) - - if ip.To4() == nil { - lowerMask := binary.BigEndian.Uint64(ipNet.Mask[8:]) - lowerStart := binary.BigEndian.Uint64(ipNet.IP[8:]) - lowerEnd := (lowerStart & lowerMask) | (^lowerMask) - - chosenVIP := make([]byte, 16) - // Copy upper half into chosenVIP - copy(chosenVIP, ipNet.IP[0:8]) - // Copy lower half into chosenVIP - binary.BigEndian.PutUint64(chosenVIP[8:], lowerEnd-5) - return net.IP(chosenVIP).String() - } - } - Fail("Could not find any IPv6 CIDRs in the Docker \"kind\" network") - return "" -} - -type TestLogger struct{} - -func (t TestLogger) Warnf(format string, args ...interface{}) { - klog.Warningf(format, args...) -} - -func (t TestLogger) Warn(message string) { - klog.Warning(message) -} - -func (t TestLogger) Error(message string) { - klog.Error(message) -} - -func (t TestLogger) Errorf(format string, args ...interface{}) { - klog.Errorf(format, args...) -} - -func (t TestLogger) V(level log.Level) log.InfoLogger { - return TestInfoLogger{Verbose: klog.V(klog.Level(level))} -} - -type TestInfoLogger struct { - klog.Verbose -} - -func (t TestInfoLogger) Info(message string) { - t.Verbose.Info(message) -} diff --git a/testing/e2e/etcd/README.md b/testing/e2e/etcd/README.md new file mode 100644 index 00000000..d7e0f3c5 --- /dev/null +++ b/testing/e2e/etcd/README.md @@ -0,0 +1,32 @@ +# Running etcd Tests +## Prerequisites: +* Docker + +If you want to use an image that only exists in your local docker cache, use this env var (modify registry and tag accordingly): +```sh +export E2E_IMAGE_PATH=plndr/kube-vip:v0.6.2 +``` + +If you want to preserve the etcd nodes after a test run, use the following: +```sh +export E2E_PRESERVE_CLUSTER=true +``` + +Note that you'll need to delete them before being able to run the test again, this is only for debugging. You can use `kind delete cluster` or just `docker rm` the containers. + +Tu run the tests: +```sh +ginkgo -vv --tags=e2e testing/e2e/etcd + +``` + +The E2E tests: +1. Start 3 kind nodes (using docker) +2. Load the local docker image into kind +3. Init the etcd cluster and join all nodes +4. Verify the etcd API can be accessed through the VIP + 1. This proves leader election through etcd in kube-vip is working. +5. Removes the first node (which is probably the VIP leader) +4. Verify the etcd API can be accessed through the VIP + +> Note: this has only been tested on Linux but it might work on Mac \ No newline at end of file diff --git a/testing/e2e/etcd/cluster.go b/testing/e2e/etcd/cluster.go new file mode 100644 index 00000000..0cf68673 --- /dev/null +++ b/testing/e2e/etcd/cluster.go @@ -0,0 +1,341 @@ +//go:build e2e +// +build e2e + +package etcd + +import ( + "context" + "path/filepath" + "strings" + "time" + + . "github.com/onsi/gomega" + "go.etcd.io/etcd/client/pkg/v3/transport" + clientv3 "go.etcd.io/etcd/client/v3" + "golang.org/x/exp/slices" + kindconfigv1alpha4 "sigs.k8s.io/kind/pkg/apis/config/v1alpha4" + "sigs.k8s.io/kind/pkg/cluster" + "sigs.k8s.io/kind/pkg/cluster/nodes" + "sigs.k8s.io/kind/pkg/cluster/nodeutils" + + "github.com/kube-vip/kube-vip/testing/e2e" +) + +type ClusterSpec struct { + Nodes int + Name string + VIP string + KubeVIPImage string + KubeVIPpManifestPath string + KubeletManifestPath string + KubeletFlagsPath string + EtcdCertsFolder string + Logger e2e.TestLogger +} + +type Cluster struct { + *ClusterSpec + Nodes []nodes.Node + + provider *cluster.Provider +} + +func CreateCluster(ctx context.Context, spec *ClusterSpec) *Cluster { + c := &Cluster{ + ClusterSpec: spec, + } + + c.provider = cluster.NewProvider( + cluster.ProviderWithLogger(spec.Logger), + cluster.ProviderWithDocker(), + ) + + c.Logger.Printf("Creating kind nodes") + c.initKindCluster() + + c.Logger.Printf("Loading kube-vip image into nodes") + e2e.LoadDockerImageToKind(spec.Logger, spec.KubeVIPImage, spec.Name) + + c.Logger.Printf("Starting etcd cluster") + c.initEtcd(ctx) + + c.Logger.Printf("Checking 1 node etcd is available through VIP") + c.VerifyEtcdThroughVIP(ctx, 15*time.Second) + + c.Logger.Printf("Adding the rest of the nodes to the etcd cluster") + c.joinRestOfNodes(ctx) + + c.Logger.Printf("Checking health for all nodes") + for _, node := range c.Nodes { + c.expectEtcdNodeHealthy(ctx, node, 15*time.Second) + } + + c.Logger.Printf("Checking %d nodes etcd is available through VIP", c.ClusterSpec.Nodes) + c.VerifyEtcdThroughVIP(ctx, 15*time.Second) + + return c +} + +func (c *Cluster) initKindCluster() { + kindCluster := &kindconfigv1alpha4.Cluster{ + Networking: kindconfigv1alpha4.Networking{ + IPFamily: kindconfigv1alpha4.IPv4Family, + }, + } + + for i := 0; i < c.ClusterSpec.Nodes; i++ { + kindCluster.Nodes = append(kindCluster.Nodes, kindconfigv1alpha4.Node{ + Role: kindconfigv1alpha4.ControlPlaneRole, + ExtraMounts: []kindconfigv1alpha4.Mount{ + { + HostPath: c.ClusterSpec.KubeVIPpManifestPath, + ContainerPath: "/etc/kubernetes/manifests/kube-vip.yaml", + }, + { + HostPath: c.ClusterSpec.KubeletManifestPath, + ContainerPath: "/var/lib/kubelet/config.yaml", + }, + { + HostPath: c.ClusterSpec.KubeletFlagsPath, + ContainerPath: "/etc/default/kubelet", + }, + }, + }) + } + + Expect(c.provider.Create( + c.Name, + cluster.CreateWithV1Alpha4Config(kindCluster), + cluster.CreateWithRetain(true), + cluster.CreateWithStopBeforeSettingUpKubernetes(true), + cluster.CreateWithWaitForReady(2*time.Minute), + cluster.CreateWithNodeImage("public.ecr.aws/eks-anywhere/kubernetes-sigs/kind/node:v1.26.7-eks-d-1-26-16-eks-a-47"), + )).To(Succeed()) +} + +func (c *Cluster) initEtcd(ctx context.Context) { + var err error + c.Nodes, err = c.provider.ListInternalNodes(c.Name) + slices.SortFunc(c.Nodes, func(a, b nodes.Node) int { + aName := a.String() + bName := b.String() + if aName < bName { + return 1 + } else if aName > bName { + return -1 + } + + return 0 + }) + + Expect(err).NotTo(HaveOccurred()) + firstNode := c.Nodes[0] + + createCerts(firstNode) + + // We need to run all phases individually to be able to re-run the health phase + // In CI it can take longer than the 30 seconds timeout that is hardcoded in etcdadm + // If etcdadm added the option to configure this timeout, we could change this to just + // call etcdadm init to run all phases. + + flags := []string{ + "--init-system", "kubelet", + "--certs-dir", "/etc/kubernetes/pki/etcd", + "--server-cert-extra-sans", strings.Join([]string{"etcd", c.VIP, e2e.NodeIPv4(firstNode)}, ","), + "--version", "3.5.8-eks-1-26-16", + "--image-repository", "public.ecr.aws/eks-distro/etcd-io/etcd", + } + + e2e.RunInNode(firstNode, + "etcdadm", + initArgsForPhase("init", "install", flags)..., + ) + + e2e.RunInNode(firstNode, + "etcdadm", + initArgsForPhase("init", "certificates", flags)..., + ) + + e2e.RunInNode(firstNode, + "etcdadm", + initArgsForPhase("init", "snapshot", flags)..., + ) + + e2e.RunInNode(firstNode, + "etcdadm", + initArgsForPhase("init", "configure", flags)..., + ) + + e2e.RunInNode(firstNode, + "etcdadm", + initArgsForPhase("init", "start", flags)..., + ) + + e2e.RunInNode(firstNode, + "etcdadm", + initArgsForPhase("init", "etcdctl", flags)..., + ) + + Eventually(func() error { + return runInNode(firstNode, + "etcdadm", + initArgsForPhase("init", "health", flags)..., + ) + }, 3*time.Minute).Should(Succeed(), "etcd should become healthy in node in less than 3 minutes") + + e2e.RunInNode(firstNode, + "etcdadm", + initArgsForPhase("init", "post-init-instructions", flags)..., + ) + + bindEtcdListenerToAllIPs(firstNode) + + e2e.CopyFolderFromNodeToDisk(firstNode, "/etc/kubernetes/pki/etcd", c.EtcdCertsFolder) + + c.expectEtcdNodeHealthy(ctx, firstNode, 15*time.Second) +} + +func runInNode(node nodes.Node, command string, args ...string) error { + return e2e.PrintCommandOutputIfErr(node.Command(command, args...).Run()) +} + +func initArgsForPhase(command, phaseName string, flags []string) []string { + c := make([]string, 0, 3+len(flags)) + c = append(c, command, "phase", phaseName) + c = append(c, flags...) + return c +} + +func (c *Cluster) joinRestOfNodes(ctx context.Context) { + for _, node := range c.Nodes[1:] { + c.joinNode(ctx, c.Nodes[0], node) + } +} + +func (c *Cluster) joinNode(ctx context.Context, firstNode, node nodes.Node) { + nodeutils.CopyNodeToNode(firstNode, node, "/etc/kubernetes/pki/ca.crt") + nodeutils.CopyNodeToNode(firstNode, node, "/etc/kubernetes/pki/ca.key") + nodeutils.CopyNodeToNode(firstNode, node, "/etc/kubernetes/pki/etcd/ca.crt") + nodeutils.CopyNodeToNode(firstNode, node, "/etc/kubernetes/pki/etcd/ca.key") + + e2e.RunInNode(node, + "etcdadm", + "join", + "https://"+e2e.NodeIPv4(firstNode)+":2379", + "--init-system", "kubelet", + "--certs-dir", "/etc/kubernetes/pki/etcd", + "--server-cert-extra-sans", strings.Join([]string{"etcd", c.VIP, e2e.NodeIPv4(node)}, ","), + "--version", "3.5.8-eks-1-26-16", + "--image-repository", "public.ecr.aws/eks-distro/etcd-io/etcd", + ) + + bindEtcdListenerToAllIPs(node) + + c.expectEtcdNodeHealthy(ctx, node, 30*time.Second) +} + +func (c *Cluster) DeleteEtcdMember(ctx context.Context, toDelete, toKeep nodes.Node) { + // point client to the node we are keeping because we are going to use it to remove the other node + // and vip is possibly pointing to that node + client := c.newEtcdClient(e2e.NodeIPv4(toDelete)) + defer client.Close() + members, err := client.MemberList(ctx) + Expect(err).NotTo(HaveOccurred()) + c.Logger.Printf("Members: %v", members.Members) + + nodeName := toDelete.String() + for _, m := range members.Members { + if m.Name == nodeName { + c.Logger.Printf("Removing node %s with memberID %d", m.Name, m.ID) + // We need to retry this request because etcd will reject it if the + // server doesn't have recent connections to enough active members + // to protect the quorum. (active - 1) >= 1+((members-1)/2) + Eventually(func() error { + _, err := client.MemberRemove(ctx, m.ID) + return err + }).WithPolling(time.Second).WithTimeout(10*time.Second).Should( + Succeed(), "removing member should succeed once all members have connections to each other", + ) + + break + } + } + + e2e.DeleteNodes(toDelete) +} + +func (c *Cluster) Delete() { + Expect(c.provider.Delete(c.Name, "")).To(Succeed()) +} + +func startKubeletForEtcd(node nodes.Node) { + e2e.RunInNode(node, + "kubeadm", "init", "phase", "kubeconfig", "admin", "--config", "/kind/kubeadm.conf", + ) + e2e.RunInNode(node, + "kubeadm", "init", "phase", "kubelet-start", "--config", "/kind/kubeadm.conf", + ) +} + +func createCerts(node nodes.Node) { + e2e.RunInNode(node, + "kubeadm", + "init", + "phase", "certs", "ca", "--config", "/kind/kubeadm.conf", + ) + + e2e.RunInNode(node, + "kubeadm", + "init", + "phase", "certs", "etcd-ca", "--config", "/kind/kubeadm.conf", + ) +} + +func bindEtcdListenerToAllIPs(node nodes.Node) { + // There is no easy way to make etcdadm configure etcd to bind to 0.0.0.0 + // so we just manually update the manifest after it's created and restart it + // We want to listen in 0.0.0.0 so our kube-vip can connect to it. + e2e.RunInNode(node, + "sed", "-i", `s/https:\/\/.*,https:\/\/127.0.0.1:2379/https:\/\/0.0.0.0:2379/g`, "/etc/kubernetes/manifests/etcd.manifest", + ) + + e2e.StopPodInNode(node, "etcd") + + e2e.RunInNode(node, + "systemctl", "restart", "kubelet", + ) +} + +func (c *Cluster) newEtcdClient(serverIPs ...string) *clientv3.Client { + tlsInfo := transport.TLSInfo{ + TrustedCAFile: filepath.Join(c.EtcdCertsFolder, "ca.crt"), + CertFile: filepath.Join(c.EtcdCertsFolder, "etcdctl-etcd-client.crt"), + KeyFile: filepath.Join(c.EtcdCertsFolder, "etcdctl-etcd-client.key"), + } + + clientTLS, err := tlsInfo.ClientConfig() + Expect(err).NotTo(HaveOccurred()) + + endpoints := make([]string, 0, len(serverIPs)) + for _, ip := range serverIPs { + endpoints = append(endpoints, ip+":2379") + } + + client, err := clientv3.New(clientv3.Config{ + Endpoints: endpoints, + TLS: clientTLS, + DialTimeout: time.Second, + }) + Expect(err).NotTo(HaveOccurred()) + + return client +} + +func (c *Cluster) VerifyEtcdThroughVIP(ctx context.Context, timeout time.Duration) { + etcdClient := c.newEtcdClient(c.VIP) + defer etcdClient.Close() + rCtx, cancel := context.WithTimeout(ctx, timeout) + _, err := etcdClient.MemberList(rCtx) + Expect(err).NotTo(HaveOccurred()) + cancel() +} diff --git a/testing/e2e/etcd/election_test.go b/testing/e2e/etcd/election_test.go new file mode 100644 index 00000000..c415d383 --- /dev/null +++ b/testing/e2e/etcd/election_test.go @@ -0,0 +1,113 @@ +//go:build e2e +// +build e2e + +package etcd_test + +import ( + "context" + "os" + "path/filepath" + "text/template" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/format" + "k8s.io/klog/v2" + + "github.com/kube-vip/kube-vip/testing/e2e" + "github.com/kube-vip/kube-vip/testing/e2e/etcd" +) + +type testConfig struct { + logger e2e.TestLogger + kubeVipImage string + kubeVipManifestPath string + clusterName string + vip string + etcdCertsFolder string + currentDir string + cluster *etcd.Cluster +} + +func (t *testConfig) cleanup() { + if os.Getenv("E2E_PRESERVE_CLUSTER") == "true" { + return + } + + t.cluster.Delete() + Expect(os.RemoveAll(t.kubeVipManifestPath)).To(Succeed()) + Expect(os.RemoveAll(t.etcdCertsFolder)).To(Succeed()) +} + +var _ = Describe("kube-vip with etcd leader election", func() { + ctx := context.Background() + test := &testConfig{} + + AfterEach(func() { + test.cleanup() + }) + + BeforeEach(func() { + By("configuring test", func() { + var err error + format.UseStringerRepresentation = true // Otherwise error stacks have binary format. + klog.SetOutput(GinkgoWriter) + + test.clusterName = "kube-vip-etcd-test" // this needs to unique per it block + test.logger = e2e.TestLogger{} + test.etcdCertsFolder = "certs" + + test.kubeVipImage = os.Getenv("E2E_IMAGE_PATH") + + test.vip = e2e.GenerateIPv4VIP() + test.logger.Printf("Selected VIP %s", test.vip) + + test.currentDir, err = os.Getwd() + Expect(err).NotTo(HaveOccurred()) + + tempDirPath, err := os.MkdirTemp("", "kube-vip-test") + Expect(err).NotTo(HaveOccurred()) + + test.kubeVipManifestPath = filepath.Join(tempDirPath, "etcd-vip-ipv4.yaml") + manifestFile, err := os.Create(test.kubeVipManifestPath) + Expect(err).NotTo(HaveOccurred()) + defer manifestFile.Close() + + templatePath := filepath.Join(test.currentDir, "kube-etcd-vip.yaml.tmpl") + kubeVIPManifestTemplate, err := template.New("kube-etcd-vip.yaml.tmpl").ParseFiles(templatePath) + Expect(err).NotTo(HaveOccurred()) + Expect(kubeVIPManifestTemplate.Execute(manifestFile, e2e.KubevipManifestValues{ + ControlPlaneVIP: test.vip, + ImagePath: test.kubeVipImage, + })).To(Succeed()) + }) + + By("creating etcd cluster", func() { + spec := &etcd.ClusterSpec{ + Name: test.clusterName, + Nodes: 2, + VIP: test.vip, + KubeVIPImage: test.kubeVipImage, + KubeVIPpManifestPath: test.kubeVipManifestPath, + KubeletManifestPath: filepath.Join(test.currentDir, "kubelet.yaml"), + KubeletFlagsPath: filepath.Join(test.currentDir, "kubelet-flags.env"), + EtcdCertsFolder: filepath.Join(test.currentDir, test.etcdCertsFolder), + Logger: test.logger, + } + + test.cluster = etcd.CreateCluster(ctx, spec) + }) + }) + + When("an etcd node is removed", func() { + It("elects a new kube-vip leader and provides a VIP to the second node", func() { + By("removing as member and killing the first node", func() { + test.cluster.DeleteEtcdMember(ctx, test.cluster.Nodes[0], test.cluster.Nodes[1]) + }) + By("verifying etcd is up and accessible through the vip", func() { + test.cluster.VerifyEtcdThroughVIP(ctx, 40*time.Second) + }) + }) + }) +}) diff --git a/testing/e2e/etcd/etcd_suite_test.go b/testing/e2e/etcd/etcd_suite_test.go new file mode 100644 index 00000000..365a09df --- /dev/null +++ b/testing/e2e/etcd/etcd_suite_test.go @@ -0,0 +1,25 @@ +//go:build e2e +// +build e2e + +package etcd_test + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/kube-vip/kube-vip/testing/e2e" +) + +func TestEtcd(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Etcd Suite") +} + +var _ = SynchronizedBeforeSuite( + func() { + e2e.EnsureKindNetwork() + }, + func() {}, +) diff --git a/testing/e2e/etcd/health.go b/testing/e2e/etcd/health.go new file mode 100644 index 00000000..8cbda7ea --- /dev/null +++ b/testing/e2e/etcd/health.go @@ -0,0 +1,121 @@ +//go:build e2e +// +build e2e + +package etcd + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "path/filepath" + "time" + + "github.com/kube-vip/kube-vip/testing/e2e" + . "github.com/onsi/gomega" + "github.com/pkg/errors" + "go.etcd.io/etcd/client/pkg/v3/transport" + "sigs.k8s.io/kind/pkg/cluster/nodes" +) + +func (c *Cluster) expectEtcdNodeHealthy(ctx context.Context, node nodes.Node, timeout time.Duration) { + httpClient := c.newEtcdHTTPClient() + client := c.newEtcdClient(e2e.NodeIPv4(node)) + nodeEtcdEndpoint := etcdEndpointForNode(node) + Eventually(func(g Gomega) error { + health, err := getEtcdHealth(httpClient, node) + g.Expect(err).NotTo(HaveOccurred()) + if !health.Healthy() { + c.Logger.Printf("Member %s is not healthy with reason: %s", node.String(), health.Reason) + } + g.Expect(health.Healthy()).To(BeTrue(), "member is not healthy with reason: %s", health.Reason) + statusCtx, statusCancel := context.WithTimeout(ctx, 2*time.Second) + defer statusCancel() + status, err := client.Status(statusCtx, nodeEtcdEndpoint) + g.Expect(err).NotTo(HaveOccurred()) + + g.Expect(status.Errors).To(BeEmpty(), "member should not have any errors in status") + g.Expect(status.IsLearner).To(BeFalse(), "member should not be a learner") + + alarmsCtx, alarmsCancel := context.WithTimeout(ctx, 2*time.Second) + defer alarmsCancel() + alarms, err := client.AlarmList(alarmsCtx) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(alarms.Alarms).To(BeEmpty(), "cluster should not have any alarms") + + return nil + }, timeout).Should(Succeed(), "node %s should eventually be healthy", node.String()) +} + +func (c *Cluster) newEtcdHTTPClient() *http.Client { + tlsInfo := transport.TLSInfo{ + TrustedCAFile: filepath.Join(c.EtcdCertsFolder, "ca.crt"), + CertFile: filepath.Join(c.EtcdCertsFolder, "etcdctl-etcd-client.crt"), + KeyFile: filepath.Join(c.EtcdCertsFolder, "etcdctl-etcd-client.key"), + } + + clientTLS, err := tlsInfo.ClientConfig() + Expect(err).NotTo(HaveOccurred()) + + return &http.Client{ + Timeout: 2 * time.Second, + Transport: &http.Transport{ + TLSClientConfig: clientTLS, + }, + } +} + +type etcdHealthCheckResponse struct { + Health string `json:"health"` + Reason string `json:"reason"` +} + +func (h *etcdHealthCheckResponse) Healthy() bool { + return h.Health == "true" +} + +func getEtcdHealth(c *http.Client, node nodes.Node) (*etcdHealthCheckResponse, error) { + req, err := http.NewRequest("GET", etcdHealthEndpoint(node), nil) + if err != nil { + return nil, err + } + + resp, err := c.Do(req) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusOK { + return nil, errors.Wrapf(err, "etcd member not ready, returned http status %d", resp.StatusCode) + } + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + health, err := parseEtcdHealthResponse(body) + if err != nil { + return nil, err + } + + return health, nil +} + +func etcdEndpointForNode(node nodes.Node) string { + return e2e.NodeIPv4(node) + ":2379" +} + +func etcdHealthEndpoint(node nodes.Node) string { + return fmt.Sprintf("https://%s:2379/health", e2e.NodeIPv4(node)) +} + +func parseEtcdHealthResponse(data []byte) (*etcdHealthCheckResponse, error) { + obj := &etcdHealthCheckResponse{} + if err := json.Unmarshal(data, obj); err != nil { + return nil, err + } + return obj, nil +} diff --git a/testing/e2e/etcd/kube-etcd-vip.yaml.tmpl b/testing/e2e/etcd/kube-etcd-vip.yaml.tmpl new file mode 100644 index 00000000..a5257c91 --- /dev/null +++ b/testing/e2e/etcd/kube-etcd-vip.yaml.tmpl @@ -0,0 +1,51 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: kube-vip + namespace: kube-system +spec: + containers: + - name: kube-vip + args: + - manager + - --leaderElectionType=etcd + - --etcdCACert=/etc/kubernetes/pki/etcd/ca.crt + - --etcdCert=/etc/kubernetes/pki/etcd/server.crt + - --etcdKey=/etc/kubernetes/pki/etcd/server.key + - --etcdEndpoints=127.0.0.1:2379 + env: + - name: vip_arp + value: "true" + - name: vip_interface + value: eth0 + - name: vip_leaderelection + value: "true" + - name: address + value: "{{ .ControlPlaneVIP }}" + - name: vip_leaseduration + value: "2" + - name: vip_renewdeadline + value: "3" + - name: vip_retryperiod + value: "1" + - name: cp_enable + value: "true" + - name: vip_loglevel + value: "5" + image: "{{ .ImagePath }}" + imagePullPolicy: Never + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + volumeMounts: + # force kube-vip to use CP ip from admin.conf instead of localhost + - mountPath: /etc/kubernetes/pki/etcd + name: etcd-certs + hostNetwork: true + volumes: + - hostPath: + path: /etc/kubernetes/pki/etcd + name: etcd-certs diff --git a/testing/e2e/etcd/kubelet-flags.env b/testing/e2e/etcd/kubelet-flags.env new file mode 100644 index 00000000..12833606 --- /dev/null +++ b/testing/e2e/etcd/kubelet-flags.env @@ -0,0 +1 @@ +KUBELET_EXTRA_ARGS="--kubeconfig='' --bootstrap-kubeconfig='' --container-runtime-endpoint=unix:///run/containerd/containerd.sock --node-labels= --pod-infra-container-image=registry.k8s.io/pause:3.9" \ No newline at end of file diff --git a/testing/e2e/etcd/kubelet.yaml b/testing/e2e/etcd/kubelet.yaml new file mode 100644 index 00000000..952a250d --- /dev/null +++ b/testing/e2e/etcd/kubelet.yaml @@ -0,0 +1,26 @@ +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +authentication: + anonymous: + enabled: true + webhook: + enabled: false +authorization: + mode: AlwaysAllow +enableServer: false +logging: + flushFrequency: 0 + options: + json: + infoBufferSize: "0" + verbosity: 0 +podCIDR: 10.241.1.0/24 +staticPodPath: /etc/kubernetes/manifests +cgroupDriver: systemd +cgroupRoot: /kubelet +failSwapOn: false +imageGCHighThresholdPercent: 100 +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" diff --git a/testing/e2e/ip.go b/testing/e2e/ip.go new file mode 100644 index 00000000..df4a3d80 --- /dev/null +++ b/testing/e2e/ip.go @@ -0,0 +1,129 @@ +//go:build e2e +// +build e2e + +package e2e + +import ( + "bufio" + "bytes" + "encoding/binary" + "fmt" + "io" + "net" + "os/exec" + "strings" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/gexec" + kindconfigv1alpha4 "sigs.k8s.io/kind/pkg/apis/config/v1alpha4" + "sigs.k8s.io/kind/pkg/cluster" +) + +func EnsureKindNetwork() { + By("checking if the Docker \"kind\" network exists") + cmd := exec.Command("docker", "inspect", "kind") + session, err := gexec.Start(cmd, GinkgoWriter, GinkgoWriter) + Expect(err).NotTo(HaveOccurred()) + Eventually(session).Should(gexec.Exit()) + if session.ExitCode() == 0 { + return + } + + By("Docker \"kind\" network was not found. Creating dummy Kind cluster to ensure creation") + clusterConfig := kindconfigv1alpha4.Cluster{ + Networking: kindconfigv1alpha4.Networking{ + IPFamily: kindconfigv1alpha4.IPv6Family, + }, + } + + provider := cluster.NewProvider( + cluster.ProviderWithDocker(), + ) + dummyClusterName := fmt.Sprintf("dummy-cluster-%d", time.Now().Unix()) + Expect(provider.Create( + dummyClusterName, + cluster.CreateWithV1Alpha4Config(&clusterConfig), + )).To(Succeed()) + + By("deleting dummy Kind cluster") + Expect(provider.Delete(dummyClusterName, "")) + + By("checking if the Docker \"kind\" network was successfully created") + cmd = exec.Command("docker", "inspect", "kind") + session, err = gexec.Start(cmd, GinkgoWriter, GinkgoWriter) + Expect(err).NotTo(HaveOccurred()) + Eventually(session).Should(gexec.Exit(0)) +} + +func GenerateIPv6VIP() string { + cidrs := getKindNetworkSubnetCIDRs() + + for _, cidr := range cidrs { + ip, ipNet, parseErr := net.ParseCIDR(cidr) + Expect(parseErr).NotTo(HaveOccurred()) + + if ip.To4() == nil { + lowerMask := binary.BigEndian.Uint64(ipNet.Mask[8:]) + lowerStart := binary.BigEndian.Uint64(ipNet.IP[8:]) + lowerEnd := (lowerStart & lowerMask) | (^lowerMask) + + chosenVIP := make([]byte, 16) + // Copy upper half into chosenVIP + copy(chosenVIP, ipNet.IP[0:8]) + // Copy lower half into chosenVIP + binary.BigEndian.PutUint64(chosenVIP[8:], lowerEnd-5) + return net.IP(chosenVIP).String() + } + } + Fail("Could not find any IPv6 CIDRs in the Docker \"kind\" network") + return "" +} + +func GenerateIPv4VIP() string { + cidrs := getKindNetworkSubnetCIDRs() + + for _, cidr := range cidrs { + ip, ipNet, parseErr := net.ParseCIDR(cidr) + Expect(parseErr).NotTo(HaveOccurred()) + + if ip.To4() != nil { + mask := binary.BigEndian.Uint32(ipNet.Mask) + start := binary.BigEndian.Uint32(ipNet.IP) + end := (start & mask) | (^mask) + + chosenVIP := make([]byte, 4) + binary.BigEndian.PutUint32(chosenVIP, end-5) + return net.IP(chosenVIP).String() + } + } + Fail("Could not find any IPv4 CIDRs in the Docker \"kind\" network") + return "" +} + +func getKindNetworkSubnetCIDRs() []string { + cmd := exec.Command( + "docker", "inspect", "kind", + "--format", `{{ range $i, $a := .IPAM.Config }}{{ println .Subnet }}{{ end }}`, + ) + cmdOut := new(bytes.Buffer) + cmd.Stdout = cmdOut + Expect(cmd.Run()).To(Succeed(), "The Docker \"kind\" network was not found.") + reader := bufio.NewReader(cmdOut) + + cidrs := []string{} + for { + line, readErr := reader.ReadString('\n') + if readErr != nil && readErr != io.EOF { + Expect(readErr).NotTo(HaveOccurred(), "Error finding subnet CIDRs in the Docker \"kind\" network") + } + + cidrs = append(cidrs, strings.TrimSpace(line)) + if readErr == io.EOF { + break + } + } + + return cidrs +} diff --git a/testing/e2e/kind.go b/testing/e2e/kind.go new file mode 100644 index 00000000..59f0798d --- /dev/null +++ b/testing/e2e/kind.go @@ -0,0 +1,147 @@ +//go:build e2e +// +build e2e + +package e2e + +import ( + "bufio" + "bytes" + "fmt" + "os" + "path/filepath" + + . "github.com/onsi/gomega" + "github.com/pkg/errors" + "k8s.io/klog/v2" + "sigs.k8s.io/kind/pkg/cluster/nodes" + "sigs.k8s.io/kind/pkg/cluster/nodeutils" + "sigs.k8s.io/kind/pkg/cmd" + load "sigs.k8s.io/kind/pkg/cmd/kind/load/docker-image" + "sigs.k8s.io/kind/pkg/exec" + kindlog "sigs.k8s.io/kind/pkg/log" +) + +func DeleteNodes(n ...nodes.Node) { + Expect(deleteNodes(n...)).To(Succeed()) +} + +func deleteNodes(n ...nodes.Node) error { + if len(n) == 0 { + return nil + } + const command = "docker" + args := make([]string, 0, len(n)+3) // allocate once + args = append(args, + "rm", + "-f", // force the container to be delete now + "-v", // delete volumes + ) + for _, node := range n { + args = append(args, node.String()) + } + if err := exec.Command(command, args...).Run(); err != nil { + return errors.Wrap(err, "failed to delete nodes") + } + return nil +} + +func NodeIPv4(node nodes.Node) string { + ip, _, err := node.IP() + Expect(err).NotTo(HaveOccurred()) + return ip +} + +func LoadDockerImageToKind(logger kindlog.Logger, imagePath, clusterName string) { + loadImageCmd := load.NewCommand(logger, cmd.StandardIOStreams()) + loadImageCmd.SetArgs([]string{"--name", clusterName, imagePath}) + Expect(loadImageCmd.Execute()).To(Succeed()) +} + +func RunInNode(node nodes.Node, command string, args ...string) { + Expect(PrintCommandOutputIfErr( + node.Command(command, args...).Run(), + )).To(Succeed()) +} + +func StopPodInNode(node nodes.Node, containerName string) { + RunInNode(node, + "bash", "-c", + fmt.Sprintf( + "crictl pods --output json --name %s-%s | jq -r \".items[0].id\" | xargs crictl stopp", + containerName, + node.String(), + ), + ) +} + +func CopyFromNodeToDisk(node nodes.Node, org, dst string) { + dstFile, err := os.Create(dst) + Expect(err).NotTo(HaveOccurred()) + defer dstFile.Close() + + Expect(node.Command("cat", org).SetStdout(dstFile).Run()).To(Succeed()) +} + +func CopyFolderFromNodeToDisk(node nodes.Node, org, dst string) { + Expect(os.MkdirAll(dst, 0o755)).To(Succeed()) + + for _, file := range filesInNodeFolder(node, org) { + CopyFromNodeToDisk(node, file, filepath.Join(dst, filepath.Base(file))) + } +} + +func CopyFolderFromNodeToNode(org, dst nodes.Node, folder string) { + for _, folder := range foldersInNodeFolder(org, folder) { + CopyFolderFromNodeToNode(org, dst, folder) + } + + for _, file := range filesInNodeFolder(org, folder) { + Expect(nodeutils.CopyNodeToNode(org, dst, file)).To(Succeed()) + } +} + +func filesInNodeFolder(node nodes.Node, folder string) []string { + return commandOutputInLines( + node, + "find", folder, "-maxdepth", "1", "-mindepth", "1", "-type", "f", + ) +} + +func foldersInNodeFolder(node nodes.Node, folder string) []string { + return commandOutputInLines( + node, + "find", folder, "-maxdepth", "1", "-mindepth", "1", "-type", "d", + ) +} + +func commandOutputInLines(node nodes.Node, command string, args ...string) []string { + var linesB bytes.Buffer + Expect(node.Command( + command, args..., + ).SetStdout(&linesB).Run()).To(Succeed()) + + var lines []string + scanner := bufio.NewScanner(&linesB) + for scanner.Scan() { + if l := scanner.Text(); l != "" { + lines = append(lines, l) + } + } + Expect(scanner.Err()).To(Succeed()) + + return lines +} + +func PrintCommandOutputIfErr(err error) error { + tErr := err + for tErr != nil { + runErrP := &exec.RunError{} + runErr := &runErrP + if errors.As(tErr, runErr) { + klog.Errorf("Command failed %s:\n%s", (*runErr).Command, string((*runErr).Output)) + break + } + } + + return tErr +} diff --git a/testing/e2e/kube-vip.yaml.tmpl b/testing/e2e/kube-vip.yaml.tmpl index a8c77b32..c5620914 100644 --- a/testing/e2e/kube-vip.yaml.tmpl +++ b/testing/e2e/kube-vip.yaml.tmpl @@ -8,7 +8,9 @@ spec: containers: - name: kube-vip args: - - start + - manager + - --prometheusHTTPServer + - "" env: - name: vip_arp value: "true" @@ -24,16 +26,22 @@ spec: value: "3" - name: vip_retryperiod value: "1" + - name: cp_enable + value: "true" image: "{{ .ImagePath }}" imagePullPolicy: Never securityContext: capabilities: add: - NET_ADMIN - - SYS_TIME + - NET_RAW volumeMounts: - mountPath: /etc/kubernetes/admin.conf name: kubeconfig + hostAliases: + - hostnames: + - kubernetes + ip: 127.0.0.1 hostNetwork: true volumes: - hostPath: diff --git a/testing/e2e/logger.go b/testing/e2e/logger.go new file mode 100644 index 00000000..865ecbe8 --- /dev/null +++ b/testing/e2e/logger.go @@ -0,0 +1,43 @@ +//go:build e2e +// +build e2e + +package e2e + +import ( + "k8s.io/klog/v2" + "sigs.k8s.io/kind/pkg/log" +) + +type TestLogger struct{} + +func (t TestLogger) Warnf(format string, args ...interface{}) { + klog.Warningf(format, args...) +} + +func (t TestLogger) Warn(message string) { + klog.Warning(message) +} + +func (t TestLogger) Error(message string) { + klog.Error(message) +} + +func (t TestLogger) Errorf(format string, args ...interface{}) { + klog.Errorf(format, args...) +} + +func (t TestLogger) Printf(format string, args ...interface{}) { + klog.Infof(format, args...) +} + +func (t TestLogger) V(level log.Level) log.InfoLogger { + return TestInfoLogger{Verbose: klog.V(klog.Level(level))} +} + +type TestInfoLogger struct { + klog.Verbose +} + +func (t TestInfoLogger) Info(message string) { + t.Verbose.Info(message) +} diff --git a/testing/e2e/services/controlplane.go b/testing/e2e/services/controlplane.go new file mode 100644 index 00000000..9dcca25d --- /dev/null +++ b/testing/e2e/services/controlplane.go @@ -0,0 +1,157 @@ +package main + +import ( + "bufio" + "bytes" + "encoding/binary" + "fmt" + "html/template" + "io" + "net" + "os" + "os/exec" + "path/filepath" + "strings" + + log "github.com/sirupsen/logrus" +) + +func getKindNetworkSubnetCIDRs() ([]string, error) { + cmd := exec.Command( + "docker", "inspect", "kind", + "--format", `{{ range $i, $a := .IPAM.Config }}{{ println .Subnet }}{{ end }}`, + ) + cmdOut := new(bytes.Buffer) + cmd.Stdout = cmdOut + err := cmd.Run() + if err != nil { + return nil, err + } + reader := bufio.NewReader(cmdOut) + + cidrs := []string{} + for { + line, readErr := reader.ReadString('\n') + if readErr != nil && readErr != io.EOF { + return nil, fmt.Errorf("error finding subnet CIDRs in the Docker \"kind\" network, %s", err) + } + + cidrs = append(cidrs, strings.TrimSpace(line)) + if readErr == io.EOF { + break + } + } + + return cidrs, nil +} + +func generateIPv4VIP() (string, error) { + cidrs, err := getKindNetworkSubnetCIDRs() + if err != nil { + return "", err + } + for _, cidr := range cidrs { + ip, ipNet, parseErr := net.ParseCIDR(cidr) + if err != nil { + return "", parseErr + } + if ip.To4() != nil { + mask := binary.BigEndian.Uint32(ipNet.Mask) + start := binary.BigEndian.Uint32(ipNet.IP) + end := (start & mask) | (^mask) + + chosenVIP := make([]byte, 4) + binary.BigEndian.PutUint32(chosenVIP, end-5) + return net.IP(chosenVIP).String(), nil + } + } + return "", fmt.Errorf("could not find any IPv4 CIDRs in the Docker \"kind\" network") +} + +func generateIPv6VIP() (string, error) { + cidrs, err := getKindNetworkSubnetCIDRs() + if err != nil { + return "", err + } + for _, cidr := range cidrs { + ip, ipNet, parseErr := net.ParseCIDR(cidr) + if err != nil { + return "", parseErr + } + if ip.To4() == nil { + lowerMask := binary.BigEndian.Uint64(ipNet.Mask[8:]) + lowerStart := binary.BigEndian.Uint64(ipNet.IP[8:]) + lowerEnd := (lowerStart & lowerMask) | (^lowerMask) + + chosenVIP := make([]byte, 16) + // Copy upper half into chosenVIP + copy(chosenVIP, ipNet.IP[0:8]) + // Copy lower half into chosenVIP + binary.BigEndian.PutUint64(chosenVIP[8:], lowerEnd-5) + return net.IP(chosenVIP).String(), nil + } + } + return "", fmt.Errorf("could not find any IPv6 CIDRs in the Docker \"kind\" network") + +} + +func (config *testConfig) manifestGen() error { + curDir, err := os.Getwd() + if err != nil { + return err + } + templatePath := filepath.Join(curDir, "testing/e2e/kube-vip.yaml.tmpl") + + kubeVIPManifestTemplate, err := template.New("kube-vip.yaml.tmpl").ParseFiles(templatePath) + if err != nil { + return err + } + tempDirPath, err := os.MkdirTemp("", "kube-vip-test") + if err != nil { + return err + } + + var manifestFile *os.File + + if config.IPv6 { + config.Name = fmt.Sprintf("%s-ipv6", filepath.Base(tempDirPath)) + config.ManifestPath = filepath.Join(tempDirPath, "kube-vip-ipv6.yaml") + manifestFile, err = os.Create(config.ManifestPath) + if err != nil { + return err + } + defer manifestFile.Close() + + config.ControlPlaneAddress, err = generateIPv6VIP() + if err != nil { + return err + } + } else { + config.Name = fmt.Sprintf("%s-ipv4", filepath.Base(tempDirPath)) + config.ManifestPath = filepath.Join(tempDirPath, "kube-vip-ipv4.yaml") + manifestFile, err = os.Create(config.ManifestPath) + if err != nil { + return err + } + defer manifestFile.Close() + + config.ControlPlaneAddress, err = generateIPv4VIP() + if err != nil { + return err + } + } + log.Infof("πŸ—ƒοΈ Manifest path %s", config.ManifestPath) + err = kubeVIPManifestTemplate.Execute(manifestFile, kubevipManifestValues{ + ControlPlaneVIP: config.ControlPlaneAddress, + ImagePath: config.ImagePath, + }) + return err +} + +// func (config *testConfig) startTest(ctx context.Context, clientset *kubernetes.Clientset) error { +// if config.ControlPlaneAddress == "" { +// log.Fatal("no control plane address exists") +// } + +// return nil +// } diff --git a/testing/e2e/services/kind-config.yaml b/testing/e2e/services/kind-config.yaml new file mode 100644 index 00000000..2eaffe06 --- /dev/null +++ b/testing/e2e/services/kind-config.yaml @@ -0,0 +1,11 @@ +# three node (two workers) cluster config +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +nodes: +- role: control-plane +- role: control-plane +- role: control-plane +- role: worker +- role: worker +- role: worker + diff --git a/testing/e2e/services/kind.go b/testing/e2e/services/kind.go new file mode 100644 index 00000000..78310349 --- /dev/null +++ b/testing/e2e/services/kind.go @@ -0,0 +1,172 @@ +package main + +import ( + "bytes" + "fmt" + "os/exec" + "strings" + "time" + + log "github.com/sirupsen/logrus" + kindconfigv1alpha4 "sigs.k8s.io/kind/pkg/apis/config/v1alpha4" + "sigs.k8s.io/kind/pkg/cluster" + "sigs.k8s.io/kind/pkg/cmd" + load "sigs.k8s.io/kind/pkg/cmd/kind/load/docker-image" +) + +var provider *cluster.Provider + +type kubevipManifestValues struct { + ControlPlaneVIP string + ImagePath string +} + +type nodeAddresses struct { + node string + addresses []string +} + +func (config *testConfig) createKind() error { + + clusterConfig := kindconfigv1alpha4.Cluster{ + Networking: kindconfigv1alpha4.Networking{ + IPFamily: kindconfigv1alpha4.IPv4Family, + }, + Nodes: []kindconfigv1alpha4.Node{ + { + Role: kindconfigv1alpha4.ControlPlaneRole, + }, + }, + } + if config.IPv6 { + // Change Networking Family + clusterConfig.Networking.IPFamily = kindconfigv1alpha4.IPv6Family + } + + if config.ControlPlane { + err := config.manifestGen() + if err != nil { + return err + } + + // Add two additional control plane nodes (3) + clusterConfig.Nodes = append(clusterConfig.Nodes, kindconfigv1alpha4.Node{Role: kindconfigv1alpha4.ControlPlaneRole}) + clusterConfig.Nodes = append(clusterConfig.Nodes, kindconfigv1alpha4.Node{Role: kindconfigv1alpha4.ControlPlaneRole}) + + // Add the extra static pod manifest + mount := kindconfigv1alpha4.Mount{ + HostPath: config.ManifestPath, + ContainerPath: "/etc/kubernetes/manifests/kube-vip.yaml", + } + for x := range clusterConfig.Nodes { + if clusterConfig.Nodes[x].Role == kindconfigv1alpha4.ControlPlaneRole { + clusterConfig.Nodes[x].ExtraMounts = append(clusterConfig.Nodes[x].ExtraMounts, mount) + } + } + } else { + // Add three additional worker nodes + clusterConfig.Nodes = append(clusterConfig.Nodes, kindconfigv1alpha4.Node{Role: kindconfigv1alpha4.WorkerRole}) + clusterConfig.Nodes = append(clusterConfig.Nodes, kindconfigv1alpha4.Node{Role: kindconfigv1alpha4.WorkerRole}) + clusterConfig.Nodes = append(clusterConfig.Nodes, kindconfigv1alpha4.Node{Role: kindconfigv1alpha4.WorkerRole}) + } + + provider = cluster.NewProvider(cluster.ProviderWithLogger(cmd.NewLogger()), cluster.ProviderWithDocker()) + clusters, err := provider.List() + if err != nil { + return err + } + found := false + for x := range clusters { + if clusters[x] == "services" { + log.Infof("Cluster already exists") + found = true + } + } + if !found { + err := provider.Create("services", cluster.CreateWithV1Alpha4Config(&clusterConfig)) + if err != nil { + return err + + } + loadImageCmd := load.NewCommand(cmd.NewLogger(), cmd.StandardIOStreams()) + loadImageCmd.SetArgs([]string{"--name", "services", config.ImagePath}) + err = loadImageCmd.Execute() + if err != nil { + return err + } + nodes, err := provider.ListNodes("services") + if err != nil { + return err + } + + // HMMM, if we want to run workloads on the control planes (todo) + if config.ControlPlane { + for x := range nodes { + cmd := exec.Command("kubectl", "taint", "nodes", nodes[x].String(), "node-role.kubernetes.io/control-plane:NoSchedule-") //nolint:all + _, _ = cmd.CombinedOutput() + } + } + cmd := exec.Command("kubectl", "create", "configmap", "--namespace", "kube-system", "kubevip", "--from-literal", "range-global=172.18.100.10-172.18.100.30") + if _, err := cmd.CombinedOutput(); err != nil { + return err + } + cmd = exec.Command("kubectl", "create", "-f", "https://raw.githubusercontent.com/kube-vip/kube-vip-cloud-provider/main/manifest/kube-vip-cloud-controller.yaml") + if _, err := cmd.CombinedOutput(); err != nil { + return err + } + cmd = exec.Command("kubectl", "create", "-f", "https://kube-vip.io/manifests/rbac.yaml") + if _, err := cmd.CombinedOutput(); err != nil { + return err + } + log.Infof("πŸ’€ sleeping for a few seconds to let controllers start") + time.Sleep(time.Second * 5) + } + return nil +} + +func deleteKind() error { + log.Info("🧽 deleting Kind cluster") + return provider.Delete("services", "") +} + +func getAddressesOnNodes() ([]nodeAddresses, error) { + nodesConfig := []nodeAddresses{} + nodes, err := provider.ListNodes("services") + if err != nil { + return nodesConfig, err + } + for x := range nodes { + var b bytes.Buffer + + exec := nodes[x].Command("hostname", "--all-ip-addresses") + exec.SetStderr(&b) + exec.SetStdin(&b) + exec.SetStdout(&b) + err = exec.Run() + if err != nil { + return nodesConfig, err + } + nodesConfig = append(nodesConfig, nodeAddresses{ + node: nodes[x].String(), + addresses: strings.Split(b.String(), " "), + }) + } + return nodesConfig, nil +} + +func checkNodesForDuplicateAddresses(nodes []nodeAddresses, address string) error { + var foundOnNode []string + // Iterate over all nodes to find addresses, where there is an address match add to array + for x := range nodes { + for y := range nodes[x].addresses { + if nodes[x].addresses[y] == address { + foundOnNode = append(foundOnNode, nodes[x].node) + } + } + } + // If one address is on multiple nodes, then something has gone wrong + if len(foundOnNode) > 1 { + return fmt.Errorf("‼️ multiple nodes [%s] have address [%s]", strings.Join(foundOnNode, " "), address) + } + return nil +} diff --git a/testing/e2e/services/kubernetes.go b/testing/e2e/services/kubernetes.go new file mode 100644 index 00000000..114c204c --- /dev/null +++ b/testing/e2e/services/kubernetes.go @@ -0,0 +1,285 @@ +package main + +import ( + "context" + "fmt" + "time" + + appsv1 "k8s.io/api/apps/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" + + log "github.com/sirupsen/logrus" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/watch" + watchtools "k8s.io/client-go/tools/watch" +) + +// service defines the settings for a new service +type service struct { + name string + egress bool // enable egress + policyLocal bool // set the policy to local pods + testHTTP bool +} + +type deployment struct { + replicas int + server bool + client bool + address string + nodeAffinity string + name string +} + +func (d *deployment) createKVDs(ctx context.Context, clientset *kubernetes.Clientset, imagepath string) error { + ds := appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "kube-vip-ds", + Namespace: "kube-system", + Labels: map[string]string{ + "app.kubernetes.io/name": "kube-vip-ds", + }, + }, + Spec: appsv1.DaemonSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app.kubernetes.io/name": "kube-vip-ds", + }, + }, + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app.kubernetes.io/name": "kube-vip-ds", + }, + }, + Spec: v1.PodSpec{ + ServiceAccountName: "kube-vip", + HostNetwork: true, + Containers: []v1.Container{ + { + Args: []string{ + "manager", + }, + Env: []v1.EnvVar{ + { + Name: "vip_arp", + Value: "true", + }, + { + Name: "vip_cidr", + Value: "32", + }, + { + Name: "svc_enable", + Value: "true", + }, + { + Name: "svc_election", + Value: "true", + }, + { + Name: "EGRESS_CLEAN", + Value: "true", + }, + { + Name: "vip_loglevel", + Value: "5", + }, + { + Name: "egress_withnftables", + Value: "true", + }, + }, + Image: imagepath, + Name: "kube-vip", + SecurityContext: &v1.SecurityContext{ + Capabilities: &v1.Capabilities{ + Add: []v1.Capability{ + "NET_ADMIN", + "NET_RAW", + }, + }, + }, + }, + }, + }, + }, + }, + } + + _, err := clientset.AppsV1().DaemonSets("kube-system").Create(ctx, &ds, metav1.CreateOptions{}) + if err != nil { + return err + } + + return nil + +} +func (d *deployment) createDeployment(ctx context.Context, clientset *kubernetes.Clientset) error { + replicas := int32(d.replicas) + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: d.name, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: &replicas, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "kube-vip", + }, + }, + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "kube-vip", + }, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "kube-vip-web", + Image: "plndr/e2e:0.0.1", + Ports: []v1.ContainerPort{ + { + Name: "http", + Protocol: v1.ProtocolTCP, + ContainerPort: 80, + }, + }, + ImagePullPolicy: v1.PullAlways, + }, + }, + }, + }, + }, + } + + if d.server { + deployment.Spec.Template.Spec.Containers[0].Env = + []v1.EnvVar{ + { + Name: "E2EMODE", + Value: "SERVER", + }, + } + } + + if d.client && d.address != "" { + deployment.Spec.Template.Spec.Containers[0].Env = + []v1.EnvVar{ + { + Name: "E2EMODE", + Value: "CLIENT", + }, + { + Name: "E2EADDRESS", + Value: d.address, + }, + } + } + + if d.nodeAffinity != "" { + deployment.Spec.Template.Spec.NodeName = d.nodeAffinity + } + + result, err := clientset.AppsV1().Deployments(v1.NamespaceDefault).Create(ctx, deployment, metav1.CreateOptions{}) + if err != nil { + return err + } + + log.Infof("πŸ“ created deployment [%s]", result.GetObjectMeta().GetName()) + return nil +} + +func (s *service) createService(ctx context.Context, clientset *kubernetes.Clientset) (currentLeader string, loadBalancerAddress string, err error) { + svc := &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: s.name, + Namespace: "default", + Labels: map[string]string{ + "app": "kube-vip", + }, + }, + + Spec: v1.ServiceSpec{ + Ports: []v1.ServicePort{ + { + Port: 80, + Protocol: v1.ProtocolTCP, + }, + }, + Selector: map[string]string{ + "app": "kube-vip", + }, + ClusterIP: "", + Type: v1.ServiceTypeLoadBalancer, + }, + } + + if s.egress { + svc.Annotations = map[string]string{ //kube-vip.io/egress: "true" + "kube-vip.io/egress": "true", + } + } + if s.policyLocal { + svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal + } + + log.Infof("🌍 creating service [%s]", svc.Name) + _, err = clientset.CoreV1().Services(v1.NamespaceDefault).Create(ctx, svc, metav1.CreateOptions{}) + if err != nil { + log.Fatal(err) + } + // Use a restartable watcher, as this should help in the event of etcd or timeout issues + rw, err := watchtools.NewRetryWatcher("1", &cache.ListWatch{ + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return clientset.CoreV1().Services(v1.NamespaceDefault).Watch(ctx, metav1.ListOptions{}) + }, + }) + if err != nil { + log.Fatal(err) + } + ch := rw.ResultChan() + go func() { + time.Sleep(time.Second * 10) + rw.Stop() + }() + ready := false + + // Used for tracking an active endpoint / pod + for event := range ch { + + // We need to inspect the event and get ResourceVersion out of it + switch event.Type { + case watch.Added, watch.Modified: + // log.Debugf("Endpoints for service [%s] have been Created or modified", s.service.ServiceName) + svc, ok := event.Object.(*v1.Service) + if !ok { + log.Fatalf("unable to parse Kubernetes services from API watcher") + } + if svc.Name == s.name { + if len(svc.Status.LoadBalancer.Ingress) != 0 { + log.Infof("πŸ”Ž found load balancer address [%s] on node [%s]", svc.Status.LoadBalancer.Ingress[0].IP, svc.Annotations["kube-vip.io/vipHost"]) + ready = true + loadBalancerAddress = svc.Status.LoadBalancer.Ingress[0].IP + currentLeader = svc.Annotations["kube-vip.io/vipHost"] + } + } + default: + + } + if ready { + break + } + } + if s.testHTTP { + err = httpTest(loadBalancerAddress) + if err != nil { + return "", "", fmt.Errorf("web retrieval timeout ") + + } + } + return currentLeader, loadBalancerAddress, nil +} diff --git a/testing/e2e/services/services.go b/testing/e2e/services/services.go new file mode 100644 index 00000000..7bb0df99 --- /dev/null +++ b/testing/e2e/services/services.go @@ -0,0 +1,555 @@ +//nolint:govet +package main + +import ( + "context" + "fmt" + "net" + "net/http" + "os" + "time" + + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" + + log "github.com/sirupsen/logrus" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/watch" + watchtools "k8s.io/client-go/tools/watch" +) + +// Methodology + +// 1. Create a deployment +// 2. Expose the deployment +func (config *testConfig) startServiceTest(ctx context.Context, clientset *kubernetes.Clientset) { + nodeTolerate := os.Getenv("NODE_TOLERATE") + + d := "kube-vip-deploy" + s := "kube-vip-service" + l := "kube-vip-deploy-leader" + + if !config.ignoreSimple { + // Simple Deployment test + log.Infof("πŸ§ͺ ---> simple deployment <---") + deploy := deployment{ + name: d, + nodeAffinity: nodeTolerate, + replicas: 2, + server: true, + } + err := deploy.createDeployment(ctx, clientset) + if err != nil { + log.Fatal(err) + } + svc := service{ + name: s, + testHTTP: true, + } + _, _, err = svc.createService(ctx, clientset) + if err != nil { + log.Error(err) + } else { + config.successCounter++ + } + + log.Infof("🧹 deleting Service [%s], deployment [%s]", s, d) + err = clientset.CoreV1().Services(v1.NamespaceDefault).Delete(ctx, s, metav1.DeleteOptions{}) + if err != nil { + log.Fatal(err) + } + err = clientset.AppsV1().Deployments(v1.NamespaceDefault).Delete(ctx, d, metav1.DeleteOptions{}) + if err != nil { + log.Fatal(err) + } + } + if !config.ignoreDeployments { + // Multiple deployment tests + log.Infof("πŸ§ͺ ---> multiple deployments <---") + deploy := deployment{ + name: l, + nodeAffinity: nodeTolerate, + replicas: 2, + server: true, + } + err := deploy.createDeployment(ctx, clientset) + if err != nil { + log.Fatal(err) + } + if err != nil { + log.Fatal(err) + } + for i := 1; i < 5; i++ { + svc := service{ + name: fmt.Sprintf("%s-%d", s, i), + testHTTP: true, + } + _, _, err = svc.createService(ctx, clientset) + if err != nil { + log.Fatal(err) + } + config.successCounter++ + } + for i := 1; i < 5; i++ { + log.Infof("🧹 deleting service [%s]", fmt.Sprintf("%s-%d", s, i)) + err = clientset.CoreV1().Services(v1.NamespaceDefault).Delete(ctx, fmt.Sprintf("%s-%d", s, i), metav1.DeleteOptions{}) + if err != nil { + log.Fatal(err) + } + } + log.Infof("🧹 deleting deployment [%s]", d) + err = clientset.AppsV1().Deployments(v1.NamespaceDefault).Delete(ctx, l, metav1.DeleteOptions{}) + if err != nil { + log.Fatal(err) + } + } + if !config.ignoreLeaderFailover { + // Failover tests + log.Infof("πŸ§ͺ ---> leader failover deployment (local policy) <---") + + deploy := deployment{ + name: d, + nodeAffinity: nodeTolerate, + replicas: 2, + server: true, + } + err := deploy.createDeployment(ctx, clientset) + if err != nil { + log.Fatal(err) + } + svc := service{ + name: s, + egress: false, + policyLocal: true, + testHTTP: true, + } + leader, lbAddress, err := svc.createService(ctx, clientset) + if err != nil { + log.Error(err) + } + + err = leaderFailover(ctx, &s, &leader, clientset) + if err != nil { + log.Error(err) + } else { + config.successCounter++ + } + + // Get all addresses on all nodes + nodes, err := getAddressesOnNodes() + if err != nil { + log.Error(err) + } + // Make sure we don't exist in two places + err = checkNodesForDuplicateAddresses(nodes, lbAddress) + if err != nil { + log.Fatal(err) + } + + log.Infof("🧹 deleting Service [%s], deployment [%s]", s, d) + err = clientset.CoreV1().Services(v1.NamespaceDefault).Delete(ctx, s, metav1.DeleteOptions{}) + if err != nil { + log.Fatal(err) + } + + err = clientset.AppsV1().Deployments(v1.NamespaceDefault).Delete(ctx, d, metav1.DeleteOptions{}) + if err != nil { + log.Fatal(err) + } + } + + if !config.ignoreLeaderActive { + // pod Failover tests + log.Infof("πŸ§ͺ ---> active pod failover deployment (local policy) <---") + deploy := deployment{ + name: d, + nodeAffinity: nodeTolerate, + replicas: 1, + server: true, + } + err := deploy.createDeployment(ctx, clientset) + if err != nil { + log.Fatal(err) + } + svc := service{ + name: s, + policyLocal: true, + testHTTP: true, + } + leader, _, err := svc.createService(ctx, clientset) + if err != nil { + log.Error(err) + } + + err = podFailover(ctx, &s, &leader, clientset) + if err != nil { + log.Error(err) + } else { + config.successCounter++ + } + + log.Infof("🧹 deleting Service [%s], deployment [%s]", s, d) + err = clientset.CoreV1().Services(v1.NamespaceDefault).Delete(ctx, s, metav1.DeleteOptions{}) + if err != nil { + log.Fatal(err) + } + err = clientset.AppsV1().Deployments(v1.NamespaceDefault).Delete(ctx, d, metav1.DeleteOptions{}) + if err != nil { + log.Fatal(err) + } + } + if !config.ignoreLocalDeploy { + // Multiple deployment tests + log.Infof("πŸ§ͺ ---> multiple deployments (local policy) <---") + deploy := deployment{ + name: l, + nodeAffinity: nodeTolerate, + replicas: 2, + server: true, + } + err := deploy.createDeployment(ctx, clientset) + if err != nil { + log.Fatal(err) + } + for i := 1; i < 5; i++ { + svc := service{ + policyLocal: true, + name: fmt.Sprintf("%s-%d", s, i), + testHTTP: true, + } + _, lbAddress, err := svc.createService(ctx, clientset) + if err != nil { + log.Fatal(err) + } + config.successCounter++ + nodes, err := getAddressesOnNodes() + if err != nil { + log.Error(err) + } + err = checkNodesForDuplicateAddresses(nodes, lbAddress) + if err != nil { + log.Fatal(err) + } + } + for i := 1; i < 5; i++ { + log.Infof("🧹 deleting service [%s]", fmt.Sprintf("%s-%d", s, i)) + err = clientset.CoreV1().Services(v1.NamespaceDefault).Delete(ctx, fmt.Sprintf("%s-%d", s, i), metav1.DeleteOptions{}) + if err != nil { + log.Fatal(err) + } + } + log.Infof("🧹 deleting deployment [%s]", d) + err = clientset.AppsV1().Deployments(v1.NamespaceDefault).Delete(ctx, l, metav1.DeleteOptions{}) + if err != nil { + log.Fatal(err) + } + } + + if !config.ignoreEgress { + // pod Failover tests + log.Infof("πŸ§ͺ ---> egress IP re-write (local policy) <---") + var egress string + var found bool + // Set up a local listener + go func() { + found = tcpServer(&egress) + }() + + deploy := deployment{ + name: d, + nodeAffinity: nodeTolerate, + replicas: 1, + client: true, + } + + // Find this machines IP address + deploy.address = GetLocalIP() + if deploy.address == "" { + log.Fatalf("Unable to detect local IP address") + } + log.Infof("πŸ“  found local address [%s]", deploy.address) + // Create a deployment that connects back to this machines IP address + err := deploy.createDeployment(ctx, clientset) + if err != nil { + log.Fatal(err) + } + + svc := service{ + policyLocal: true, + name: s, + egress: true, + testHTTP: false, + } + + _, egress, err = svc.createService(ctx, clientset) + if err != nil { + log.Fatal(err) + } + + for i := 1; i < 5; i++ { + if found { + log.Infof("πŸ•΅οΈ egress has correct IP address") + config.successCounter++ + break + } + time.Sleep(time.Second * 1) + } + + if !found { + log.Error("😱 No traffic found from loadbalancer address ") + } + log.Infof("🧹 deleting Service [%s], deployment [%s]", s, d) + err = clientset.CoreV1().Services(v1.NamespaceDefault).Delete(ctx, s, metav1.DeleteOptions{}) + if err != nil { + log.Fatal(err) + } + err = clientset.AppsV1().Deployments(v1.NamespaceDefault).Delete(ctx, d, metav1.DeleteOptions{}) + if err != nil { + log.Fatal(err) + } + log.Infof("πŸ† Testing Complete [%d] passed", config.successCounter) + } +} + +func httpTest(address string) error { + Client := http.Client{ + Timeout: 1 * time.Second, + } + var err error + for i := 0; i < 5; i++ { + //nolint + r, err := Client.Get(fmt.Sprintf("http://%s", address)) //nolint + + if err == nil { + log.Infof("πŸ•ΈοΈ successfully retrieved web data in [%ds]", i) + r.Body.Close() + + return nil + } + time.Sleep(time.Second) + } + return err +} + +func leaderFailover(ctx context.Context, name, leaderNode *string, clientset *kubernetes.Clientset) error { + go func() { + log.Infof("πŸ’€ killing leader five times") + for i := 0; i < 5; i++ { + p, err := clientset.CoreV1().Pods("kube-system").List(ctx, metav1.ListOptions{}) + if err != nil { + log.Fatal(err) + } + + for x := range p.Items { + if p.Items[x].Spec.NodeName == *leaderNode { + if p.Items[x].Spec.Containers[0].Name == "kube-vip" { + err = clientset.CoreV1().Pods("kube-system").Delete(ctx, p.Items[x].Name, metav1.DeleteOptions{}) + if err != nil { + log.Fatal(err) + } + log.Infof("πŸ”ͺ leader pod [%s] has been deleted", p.Items[x].Name) + } + } + } + time.Sleep(time.Second * 5) + } + }() + + log.Infof("πŸ‘€ service [%s] for updates", *name) + + // Use a restartable watcher, as this should help in the event of etcd or timeout issues + rw, err := watchtools.NewRetryWatcher("1", &cache.ListWatch{ + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return clientset.CoreV1().Services(v1.NamespaceDefault).Watch(ctx, metav1.ListOptions{}) + }, + }) + if err != nil { + return err + } + ch := rw.ResultChan() + + go func() { + time.Sleep(time.Second * 30) + rw.Stop() + }() + + // Used for tracking an active endpoint / pod + for event := range ch { + // We need to inspect the event and get ResourceVersion out of it + switch event.Type { + case watch.Added: + // log.Debugf("Endpoints for service [%s] have been Created or modified", s.service.ServiceName) + svc, ok := event.Object.(*v1.Service) + if !ok { + log.Fatalf("unable to parse Kubernetes services from API watcher") + } + if svc.Name == *name { + if len(svc.Status.LoadBalancer.Ingress) != 0 { + log.Infof("πŸ”Ž found load balancer address [%s] on node [%s]", svc.Status.LoadBalancer.Ingress[0].IP, svc.Annotations["kube-vip.io/vipHost"]) + } + } + case watch.Modified: + svc, ok := event.Object.(*v1.Service) + if !ok { + log.Fatalf("unable to parse Kubernetes services from API watcher") + } + if svc.Name == *name { + if len(svc.Status.LoadBalancer.Ingress) != 0 { + log.Infof("πŸ” updated with address [%s] on node [%s]", svc.Status.LoadBalancer.Ingress[0].IP, svc.Annotations["kube-vip.io/vipHost"]) + err = httpTest(svc.Status.LoadBalancer.Ingress[0].IP) + if err != nil { + return err + } + *leaderNode = svc.Annotations["kube-vip.io/vipHost"] + } + } + default: + + } + } + return nil +} + +func podFailover(ctx context.Context, name, leaderNode *string, clientset *kubernetes.Clientset) error { + go func() { + log.Infof("πŸ’€ killing active pod five times") + for i := 0; i < 5; i++ { + p, err := clientset.CoreV1().Pods(v1.NamespaceDefault).List(ctx, metav1.ListOptions{}) + if err != nil { + log.Fatal(err) + } + found := false + for x := range p.Items { + if p.Items[x].Spec.NodeName == *leaderNode { + if p.Items[x].Spec.Containers[0].Name == "kube-vip-web" { + found = true + err = clientset.CoreV1().Pods(v1.NamespaceDefault).Delete(ctx, p.Items[x].Name, metav1.DeleteOptions{}) + if err != nil { + log.Fatal(err) + } + log.Infof("πŸ”ͺ active pod [%s] on [%s] has been deleted", p.Items[x].Name, p.Items[x].Spec.NodeName) + } + } + } + if !found { + log.Warnf("😱 No Pod found on [%s]", *leaderNode) + } + time.Sleep(time.Second * 5) + } + }() + + log.Infof("πŸ‘€ service [%s] for updates", *name) + + // Use a restartable watcher, as this should help in the event of etcd or timeout issues + rw, err := watchtools.NewRetryWatcher("1", &cache.ListWatch{ + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return clientset.CoreV1().Services(v1.NamespaceDefault).Watch(ctx, metav1.ListOptions{}) + }, + }) + if err != nil { + return err + } + ch := rw.ResultChan() + + go func() { + time.Sleep(time.Second * 30) + rw.Stop() + }() + + // Used for tracking an active endpoint / pod + for event := range ch { + // We need to inspect the event and get ResourceVersion out of it + switch event.Type { + case watch.Added: + // log.Debugf("Endpoints for service [%s] have been Created or modified", s.service.ServiceName) + svc, ok := event.Object.(*v1.Service) + if !ok { + log.Fatalf("unable to parse Kubernetes services from API watcher") + } + if svc.Name == *name { + if len(svc.Status.LoadBalancer.Ingress) != 0 { + log.Infof("πŸ”Ž found load balancer address [%s] on node [%s]", svc.Status.LoadBalancer.Ingress[0].IP, svc.Annotations["kube-vip.io/vipHost"]) + } + } + case watch.Modified: + svc, ok := event.Object.(*v1.Service) + if !ok { + log.Fatalf("unable to parse Kubernetes services from API watcher") + } + if svc.Name == *name { + if len(svc.Status.LoadBalancer.Ingress) != 0 { + log.Infof("πŸ” updated with address [%s] on node [%s]", svc.Status.LoadBalancer.Ingress[0].IP, svc.Annotations["kube-vip.io/vipHost"]) + err = httpTest(svc.Status.LoadBalancer.Ingress[0].IP) + if err != nil { + log.Fatal(err) + } + *leaderNode = svc.Annotations["kube-vip.io/vipHost"] + } + } + default: + + } + } + return nil +} + +func tcpServer(egressAddress *string) bool { + listen, err := net.Listen("tcp", ":12345") //nolint + if err != nil { + log.Error(err) + } + // close listener + go func() { + time.Sleep(time.Second * 10) + listen.Close() + }() + for { + conn, err := listen.Accept() + if err != nil { + return false + // log.Fatal(err) + } + remoteAddress, _, _ := net.SplitHostPort(conn.RemoteAddr().String()) + if remoteAddress == *egressAddress { + log.Infof("πŸ“ž πŸ‘ incoming from egress Address [%s]", remoteAddress) + return true + } + log.Infof("πŸ“ž πŸ‘Ž incoming from pod address [%s]", remoteAddress) + go handleRequest(conn) + } +} + +func handleRequest(conn net.Conn) { + // incoming request + buffer := make([]byte, 1024) + _, err := conn.Read(buffer) + if err != nil { + log.Error(err) + } + // write data to response + time := time.Now().Format(time.ANSIC) + responseStr := fmt.Sprintf("Your message is: %v. Received time: %v", string(buffer[:]), time) + _, err = conn.Write([]byte(responseStr)) + if err != nil { + log.Error(err) + } + // close conn + conn.Close() +} + +func GetLocalIP() string { + addrs, err := net.InterfaceAddrs() + if err != nil { + return "" + } + for _, address := range addrs { + // check the address type and if it is not a loopback the display it + if ipnet, ok := address.(*net.IPNet); ok && !ipnet.IP.IsLoopback() { + if ipnet.IP.To4() != nil { + return ipnet.IP.String() + } + } + } + return "" +} diff --git a/testing/e2e/services/tests.go b/testing/e2e/services/tests.go new file mode 100644 index 00000000..a5400fa9 --- /dev/null +++ b/testing/e2e/services/tests.go @@ -0,0 +1,123 @@ +package main + +import ( + "context" + "flag" + "os" + "path/filepath" + + "github.com/kube-vip/kube-vip/pkg/k8s" + log "github.com/sirupsen/logrus" +) + +type testConfig struct { + successCounter int + + ImagePath string + + ControlPlane bool + // control plane settings + Name string + ControlPlaneAddress string + ManifestPath string + IPv6 bool + + Services bool + // service tests + ignoreSimple bool + ignoreDeployments bool + ignoreLeaderFailover bool + ignoreLeaderActive bool + ignoreLocalDeploy bool + ignoreEgress bool + retainCluster bool +} + +func main() { + var t testConfig + + t.ImagePath = os.Getenv("E2E_IMAGE_PATH") + + _, t.ignoreSimple = os.LookupEnv("IGNORE_SIMPLE") + _, t.ignoreDeployments = os.LookupEnv("IGNORE_DEPLOY") + _, t.ignoreLeaderFailover = os.LookupEnv("IGNORE_LEADER") + _, t.ignoreLeaderActive = os.LookupEnv("IGNORE_ACTIVE") + _, t.ignoreLocalDeploy = os.LookupEnv("IGNORE_LOCALDEPLOY") + _, t.ignoreEgress = os.LookupEnv("IGNORE_EGRESS") + _, t.retainCluster = os.LookupEnv("RETAIN_CLUSTER") + + flag.StringVar(&t.ImagePath, "imagepath", "plndr/kube-vip:action", "") + flag.BoolVar(&t.ControlPlane, "ControlPlane", false, "") + flag.BoolVar(&t.Services, "Services", false, "") + + flag.Parse() + + log.Infof("πŸ”¬ beginning e2e tests, image: [%s]", t.ImagePath) + + if t.ControlPlane { + err := t.createKind() + if !t.retainCluster { + if err != nil { + log.Fatal(err) + } + defer func() { + err := deleteKind() + if err != nil { + log.Fatal(err) + } + }() + } else { + if err != nil { + log.Warn(err) + } + } + // ctx, cancel := context.WithCancel(context.TODO()) + // defer cancel() + // homeConfigPath := filepath.Join(os.Getenv("HOME"), ".kube", "config") + // clientset, err := k8s.NewClientset(homeConfigPath, false, "") + // if err != nil { + // log.Fatalf("could not create k8s clientset from external file: %q: %v", homeConfigPath, err) + // } + // log.Debugf("Using external Kubernetes configuration from file [%s]", homeConfigPath) + // err = t.startTest(ctx, clientset) + // if err != nil { + // log.Fatal(err) + // } + } + + if t.Services { + err := t.createKind() + if !t.retainCluster { + if err != nil { + log.Fatal(err) + } + defer func() { + err := deleteKind() + if err != nil { + log.Fatal(err) + } + }() + } else { + if err != nil { + log.Warn(err) + } + } + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + homeConfigPath := filepath.Join(os.Getenv("HOME"), ".kube", "config") + clientset, err := k8s.NewClientset(homeConfigPath, false, "") + if err != nil { + log.Fatalf("could not create k8s clientset from external file: %q: %v", homeConfigPath, err) + } + log.Debugf("Using external Kubernetes configuration from file [%s]", homeConfigPath) + + // Deplopy the daemonset for kube-vip + deploy := deployment{} + err = deploy.createKVDs(ctx, clientset, t.ImagePath) + if err != nil { + log.Error(err) + } + t.startServiceTest(ctx, clientset) + } + +} diff --git a/testing/e2e/template.go b/testing/e2e/template.go new file mode 100644 index 00000000..43fc9582 --- /dev/null +++ b/testing/e2e/template.go @@ -0,0 +1,9 @@ +//go:build e2e +// +build e2e + +package e2e + +type KubevipManifestValues struct { + ControlPlaneVIP string + ImagePath string +} diff --git a/testing/k3s/create.sh b/testing/k3s/create.sh index dcdac2f7..b9ebf0c8 100755 --- a/testing/k3s/create.sh +++ b/testing/k3s/create.sh @@ -1,4 +1,20 @@ #!/bin/bash + +set -e + +# Read node configuration +source ./testing/nodes + +# Read logging function +source ./testing/logging.bash + +## Main() + +# Ensure we have an entirely new logfile +reset_logfile + +logr "INFO" "Starting kube-vip.io testing with k3s" +logr "DEFAULT" "Creating Logfile $logfile" if [[ -z $1 && -z $2 && -z $3 && -z $4 ]]; then echo "Usage:" @@ -10,34 +26,34 @@ if [[ -z $1 && -z $2 && -z $3 && -z $4 ]]; then exit 1 fi -case "$2" in +# Sane variable renaming +kubernetes_version=$4 +kube_vip_version=$1 +kube_vip_vip=$3 -"controlplane") echo "Creating control plane only cluster" - mode="--controlplane" +case "$2" in +"controlplane") logr "INFO" "Creating in control plane only mode" + kube_vip_mode="--controlplane" ;; -"services") echo "Creating services only cluster" - mode="--services" +"services") logr "INFO" "Creating in services-only mode" + kube_vip_mode="--services" ;; -"hybrid") echo "Creating hybrid cluster" - mode="--controlplane --services" +"hybrid") logr "INFO" "Creating in hybrid mode" + kube_vip_mode="--controlplane --services" ;; -*) echo "Unknown kube-vip mode [$2]" +*) echo "Unknown kube-vip mode [$3]" exit -1 ;; esac -source ./testing/nodes - -echo "Creating First node!" - ssh $NODE01 "sudo mkdir -p /var/lib/rancher/k3s/server/manifests/" -ssh $NODE01 "sudo docker run --network host --rm plndr/kube-vip:$1 manifest daemonset $mode --interface ens160 --vip $3 --arp --leaderElection --inCluster --taint | sudo tee /var/lib/rancher/k3s/server/manifests/vip.yaml" +ssh $NODE01 "sudo docker run --network host --rm plndr/kube-vip:$kube_vip_version manifest daemonset $kube_vip_mode --interface ens160 --vip $kube_vip_vip --arp --leaderElection --inCluster --taint | sudo tee /var/lib/rancher/k3s/server/manifests/vip.yaml" ssh $NODE01 "sudo curl https://kube-vip.io/manifests/rbac.yaml | sudo tee /var/lib/rancher/k3s/server/manifests/rbac.yaml" -ssh $NODE01 "sudo screen -dmSL k3s k3s server --cluster-init --tls-san $3 --no-deploy servicelb --disable-cloud-controller --token=test" +ssh $NODE01 "sudo screen -dmSL k3s k3s server --cluster-init --tls-san $kube_vip_vip --no-deploy servicelb --disable-cloud-controller --token=test" echo "Started first node, sleeping for 60 seconds" sleep 60 echo "Adding additional nodes" -ssh $NODE02 "sudo screen -dmSL k3s k3s server --server https://$3:6443 --token=test" -ssh $NODE03 "sudo screen -dmSL k3s k3s server --server https://$3:6443 --token=test" +ssh $NODE02 "sudo screen -dmSL k3s k3s server --server https://$kube_vip_vip:6443 --token=test" +ssh $NODE03 "sudo screen -dmSL k3s k3s server --server https://$kube_vip_vip:6443 --token=test" sleep 20 ssh $NODE01 "sudo k3s kubectl get node -o wide" diff --git a/testing/kubeadm/create.sh b/testing/kubeadm/create.sh index 79ebc9ee..6e346386 100755 --- a/testing/kubeadm/create.sh +++ b/testing/kubeadm/create.sh @@ -1,4 +1,67 @@ #!/bin/bash +set -e + +# Read node configuration +source ./testing/nodes + +# Read logging function +source ./testing/logging.bash + +install_deps() { + echo "Installing Kubernetes dependencies for Kubernetes $kubernetes_version on all nodes" + ssh $NODE01 "curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add && sudo apt-add-repository \"deb http://apt.kubernetes.io/ kubernetes-xenial main\" && sudo apt-get update -q && sudo apt-get install -qy --allow-downgrades kubelet=$kubernetes_version-00 kubectl=$kubernetes_version-00 kubeadm=$kubernetes_version-00" + ssh $NODE02 "curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add && sudo apt-add-repository \"deb http://apt.kubernetes.io/ kubernetes-xenial main\" && sudo apt-get update -q && sudo apt-get install -qy --allow-downgrades kubelet=$kubernetes_version-00 kubectl=$kubernetes_version-00 kubeadm=$kubernetes_version-00" + ssh $NODE03 "curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add && sudo apt-add-repository \"deb http://apt.kubernetes.io/ kubernetes-xenial main\" && sudo apt-get update -q && sudo apt-get install -qy --allow-downgrades kubelet=$kubernetes_version-00 kubectl=$kubernetes_version-00 kubeadm=$kubernetes_version-00" + ssh $NODE04 "curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add && sudo apt-add-repository \"deb http://apt.kubernetes.io/ kubernetes-xenial main\" && sudo apt-get update -q && sudo apt-get install -qy --allow-downgrades kubelet=$kubernetes_version-00 kubectl=$kubernetes_version-00 kubeadm=$kubernetes_version-00" + ssh $NODE05 "curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add && sudo apt-add-repository \"deb http://apt.kubernetes.io/ kubernetes-xenial main\" && sudo apt-get update -q && sudo apt-get install -qy --allow-downgrades kubelet=$kubernetes_version-00 kubectl=$kubernetes_version-00 kubeadm=$kubernetes_version-00" +} + +first_node() { + logr "INFO" "Creating First node!" + #ssh $NODE01 "sudo modprobe ip_vs_rr" + #ssh $NODE01 "sudo modprobe nf_conntrack" + logr "INFO" "$(ssh $NODE01 "docker rmi ghcr.io/kube-vip/kube-vip:$kube_vip_version" 2>&1)" + + # echo "echo "ip_vs | tee -a /etc/modules" + logr "INFO" "Creating Kube-vip.io Manifest" + ssh $NODE01 "sudo docker run --network host --rm ghcr.io/kube-vip/kube-vip:$kube_vip_version manifest pod --interface ens160 --vip $kube_vip_vip --arp --leaderElection --enableLoadBalancer $kube_vip_mode | sed \"s/image:.*/image: plndr\/kube-vip:$kube_vip_version/\" | sudo tee /etc/kubernetes/manifests/vip.yaml" >> $logfile + logr "INFO" "Deploying first Kubernetes node $NODE01" + FIRST_NODE=$(ssh $NODE01 "sudo kubeadm init --kubernetes-version $kubernetes_version --control-plane-endpoint $kube_vip_vip --upload-certs --pod-network-cidr=10.0.0.0/16") + echo "$FIRST_NODE" >> $logfile + CONTROLPLANE_CMD=$(echo "$FIRST_NODE" | grep -m1 certificate-key) + #CONTROLPLANE_CMD=$(ssh $NODE01 "sudo kubeadm init --kubernetes-version $kubernetes_version --control-plane-endpoint $kube_vip_vip --upload-certs --pod-network-cidr=10.0.0.0/16 | grep -m1 certificate-key") + ssh $NODE01 "sudo rm -rf ~/.kube/" + ssh $NODE01 "mkdir -p .kube" + ssh $NODE01 "sudo cp -i /etc/kubernetes/admin.conf .kube/config" + ssh $NODE01 "sudo chown dan:dan .kube/config" + logr "INFO" "Enabling strict ARP on kube-proxy" + ssh $NODE01 "kubectl get configmap kube-proxy -n kube-system -o yaml | sed -e \"s/strictARP: false/strictARP: true/\" | kubectl apply -f - -n kube-system" + ssh $NODE01 "kubectl describe configmap -n kube-system kube-proxy | grep strictARP" + logr "INFO" "Deploying Calico to the Kubernetes Cluster" + ssh $NODE01 "kubectl apply -f https://docs.projectcalico.org/manifests/calico.yaml" >> $logfile + logr "INFO" "Retrieving Join command" + JOIN_CMD=$(ssh $NODE01 " sudo kubeadm token create --print-join-command 2> /dev/null") +} + + +additional_controlplane() { + logr "INFO" "Adding $NODE02" + ssh $NODE02 "sudo $JOIN_CMD $CONTROLPLANE_CMD" >> $logfile + sleep 1 + ssh $NODE02 "sudo docker run --network host --rm ghcr.io/kube-vip/kube-vip:$kube_vip_version manifest pod --interface ens160 --vip $kube_vip_vip --arp --leaderElection --enableLoadBalancer $kube_vip_mode | sed \"s/image:.*/image: plndr\/kube-vip:$kube_vip_version/\"| sudo tee /etc/kubernetes/manifests/vip.yaml" >> $logfile + logr "INFO" "Adding $NODE03" + ssh $NODE03 "sudo $JOIN_CMD $CONTROLPLANE_CMD" >> $logfile + sleep 1 + ssh $NODE03 "sudo docker run --network host --rm ghcr.io/kube-vip/kube-vip:$kube_vip_version manifest pod --interface ens160 --vip $kube_vip_vip --arp --leaderElection --enableLoadBalancer $kube_vip_mode | sed \"s/image:.*/image: plndr\/kube-vip:$kube_vip_version/\"| sudo tee /etc/kubernetes/manifests/vip.yaml" >> $logfile +} + +## Main() + +# Ensure we have an entirely new logfile +reset_logfile + +logr "INFO" "Starting kube-vip.io testing with Kubeadm" +logr "DEFAULT" "Creating Logfile $logfile" if [[ -z $1 && -z $2 && -z $3 && -z $4 ]]; then echo "Usage:" @@ -7,61 +70,45 @@ if [[ -z $1 && -z $2 && -z $3 && -z $4 ]]; then echo " Param 3: Kube-Vip mode [\"controlplane\"/\"services\"/\"hybrid\"]" echo " Param 4: Vip address" echo "" - echo "" ./create_k8s.sh 1.18.5 0.3.3 192.168.0.40 + echo "" ./create.sh 1.18.5 0.4.0 hybrid 192.168.0.40 exit 1 fi -case "$3" in +# Sane variable renaming +kubernetes_version=$1 +kube_vip_version=$2 +kube_vip_vip=$4 -"controlplane") echo "Sending SIGHUP signal" - mode="--controlplane" +case "$3" in +"controlplane") logr "INFO" "Creating in control plane only mode" + kube_vip_mode="--controlplane" ;; -"services") echo "Sending SIGINT signal" - mode="--services" +"services") logr "INFO" "Creating in services-only mode" + kube_vip_mode="--services" ;; -"hybrid") echo "Sending SIGQUIT signal" - mode="--controlplane --services" +"hybrid") logr "INFO" "Creating in hybrid mode" + kube_vip_mode="--controlplane --services" ;; *) echo "Unknown kube-vip mode [$3]" exit -1 ;; esac -source ./testing/nodes - -echo "Installing Kubernetes dependencies for Kubernetes $! on all nodes" -ssh $NODE01 "curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add && sudo apt-add-repository \"deb http://apt.kubernetes.io/ kubernetes-xenial main\" && sudo apt-get update -q && sudo apt-get install -qy kubelet=$1-00 kubectl=$1-00 kubeadm=$1-00" -ssh $NODE02 "curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add && sudo apt-add-repository \"deb http://apt.kubernetes.io/ kubernetes-xenial main\" && sudo apt-get update -q && sudo apt-get install -qy kubelet=$1-00 kubectl=$1-00 kubeadm=$1-00" -ssh $NODE03 "curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add && sudo apt-add-repository \"deb http://apt.kubernetes.io/ kubernetes-xenial main\" && sudo apt-get update -q && sudo apt-get install -qy kubelet=$1-00 kubectl=$1-00 kubeadm=$1-00" -ssh $NODE04 "curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add && sudo apt-add-repository \"deb http://apt.kubernetes.io/ kubernetes-xenial main\" && sudo apt-get update -q && sudo apt-get install -qy kubelet=$1-00 kubectl=$1-00 kubeadm=$1-00" -ssh $NODE05 "curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add && sudo apt-add-repository \"deb http://apt.kubernetes.io/ kubernetes-xenial main\" && sudo apt-get update -q && sudo apt-get install -qy kubelet=$1-00 kubectl=$1-00 kubeadm=$1-00" - -echo "Creating First node!" -ssh $NODE01 "sudo docker run --network host --rm plndr/kube-vip:$2 manifest pod $mode --interface ens160 --vip $4 --arp --leaderElection | sudo tee /etc/kubernetes/manifests/vip.yaml" -CONTROLPLANE_CMD=$(ssh $NODE01 "sudo kubeadm init --kubernetes-version $1 --control-plane-endpoint $4 --upload-certs --pod-network-cidr=10.0.0.0/16 | grep certificate-key") -ssh $NODE01 "sudo rm -rf ~/.kube/" -ssh $NODE01 "mkdir -p .kube" -ssh $NODE01 "sudo cp -i /etc/kubernetes/admin.conf .kube/config" -ssh $NODE01 "sudo chown dan:dan .kube/config" -echo "Enabling strict ARP on kube-proxy" -ssh $NODE01 "kubectl get configmap kube-proxy -n kube-system -o yaml | sed -e \"s/strictARP: false/strictARP: true/\" | kubectl apply -f - -n kube-system" -ssh $NODE01 "kubectl describe configmap -n kube-system kube-proxy | grep strictARP" -ssh $NODE01 "kubectl apply -f https://docs.projectcalico.org/manifests/calico.yaml" -JOIN_CMD=$(ssh $NODE01 " sudo kubeadm token create --print-join-command 2> /dev/null") - -ssh $NODE02 "sudo $JOIN_CMD $CONTROLPLANE_CMD" -sleep 3 -ssh $NODE02 "sudo docker run --network host --rm plndr/kube-vip:$2 manifest pod --interface ens160 --vip $4 --arp --leaderElection $mode | sudo tee /etc/kubernetes/manifests/vip.yaml" +if [[ -z "$DEPS" ]]; then + logr "INFO" "Installing specific version of Kubernetes Dependencies" + install_deps +fi -ssh $NODE03 "sudo $JOIN_CMD $CONTROLPLANE_CMD" -sleep 3 -ssh $NODE03 "sudo docker run --network host --rm plndr/kube-vip:$2 manifest pod --interface ens160 --vip $4 --arp --leaderElection $mode | sudo tee /etc/kubernetes/manifests/vip.yaml" -ssh $NODE04 "sudo $JOIN_CMD" -ssh $NODE05 "sudo $JOIN_CMD" +first_node +additional_controlplane +logr "INFO" "Adding $NODE04" +ssh $NODE04 "sudo $JOIN_CMD" >> $logfile +logr "INFO" "Adding $NODE05" +ssh $NODE05 "sudo $JOIN_CMD" >> $logfile +logr "DEFAULT" "Nodes should be deployed at this point, waiting 5 secs and querying the deployment" echo -echo " Nodes should be deployed at this point, waiting 5 secs and querying the deployment" sleep 5 -ssh $NODE01 "kubectl get nodes" -ssh $NODE01 "kubectl get pods -A" +ssh $NODE01 "kubectl get nodes" | tee >> $logfile +ssh $NODE01 "kubectl get pods -A" | tee >> $logfile echo -echo "Kubernetes: $1, Kube-vip $2, Advertising VIP: $4" +logr "INFO" "Kubernetes: $kubernetes_version, Kube-vip $kube_vip_version, Advertising VIP: $kube_vip_vip" diff --git a/testing/kubeadm/create_ctr.sh b/testing/kubeadm/create_ctr.sh new file mode 100755 index 00000000..b366321a --- /dev/null +++ b/testing/kubeadm/create_ctr.sh @@ -0,0 +1,116 @@ +#!/bin/bash +set -e + +# Read node configuration +source ./testing/nodes + +# Read logging function +source ./testing/logging.bash + +install_deps() { + echo "Installing Kubernetes dependencies for Kubernetes $kubernetes_version on all nodes" + ssh $NODE01 "sudo rm /etc/apt/sources.list.d/* && curl -4 -s -L https://dl.k8s.io/apt/doc/apt-key.gpg | sudo apt-key add && sudo apt-add-repository \"deb http://apt.kubernetes.io/ kubernetes-xenial main\" && sudo apt-get update -q && sudo apt-get install -qy --allow-downgrades kubelet=$kubernetes_version-00 kubectl=$kubernetes_version-00 kubeadm=$kubernetes_version-00" + ssh $NODE02 "sudo rm /etc/apt/sources.list.d/* && curl -4 -s -L https://dl.k8s.io/apt/doc/apt-key.gpg | sudo apt-key add && sudo apt-add-repository \"deb http://apt.kubernetes.io/ kubernetes-xenial main\" && sudo apt-get update -q && sudo apt-get install -qy --allow-downgrades kubelet=$kubernetes_version-00 kubectl=$kubernetes_version-00 kubeadm=$kubernetes_version-00" + ssh $NODE03 "sudo rm /etc/apt/sources.list.d/* && curl -4 -s -L https://dl.k8s.io/apt/doc/apt-key.gpg | sudo apt-key add && sudo apt-add-repository \"deb http://apt.kubernetes.io/ kubernetes-xenial main\" && sudo apt-get update -q && sudo apt-get install -qy --allow-downgrades kubelet=$kubernetes_version-00 kubectl=$kubernetes_version-00 kubeadm=$kubernetes_version-00" + ssh $NODE04 "sudo rm /etc/apt/sources.list.d/* && curl -4 -s -L https://dl.k8s.io/apt/doc/apt-key.gpg | sudo apt-key add && sudo apt-add-repository \"deb http://apt.kubernetes.io/ kubernetes-xenial main\" && sudo apt-get update -q && sudo apt-get install -qy --allow-downgrades kubelet=$kubernetes_version-00 kubectl=$kubernetes_version-00 kubeadm=$kubernetes_version-00" + ssh $NODE05 "sudo rm /etc/apt/sources.list.d/* && curl -4 -s -L https://dl.k8s.io/apt/doc/apt-key.gpg | sudo apt-key add && sudo apt-add-repository \"deb http://apt.kubernetes.io/ kubernetes-xenial main\" && sudo apt-get update -q && sudo apt-get install -qy --allow-downgrades kubelet=$kubernetes_version-00 kubectl=$kubernetes_version-00 kubeadm=$kubernetes_version-00" +} + +first_node() { + logr "INFO" "Creating First node!" + #ssh $NODE01 "sudo modprobe ip_vs_rr" + #ssh $NODE01 "sudo modprobe nf_conntrack" + logr "INFO" "$(ssh $NODE01 "ctr images rm ghcr.io/kube-vip/kube-vip:$kube_vip_version" 2>&1)" + + # echo "echo "ip_vs | tee -a /etc/modules" + logr "INFO" "Creating Kube-vip.io Manifest" + + ssh $NODE01 "sudo ctr image pull ghcr.io/kube-vip/kube-vip:$kube_vip_version" + ssh $NODE01 "sudo mkdir -p /etc/kubernetes/manifests/; sudo ctr run --rm --net-host ghcr.io/kube-vip/kube-vip:$kube_vip_version vip /kube-vip manifest pod --interface ens160 --vip $kube_vip_vip --arp --leaderElection --enableLoadBalancer $kube_vip_mode | sed \"s/image:.*/image: plndr\/kube-vip:$kube_vip_version/\" | sudo tee /etc/kubernetes/manifests/vip.yaml" >> $logfile + logr "INFO" "Deploying first Kubernetes node $NODE01" + FIRST_NODE=$(ssh $NODE01 "sudo kubeadm init --kubernetes-version $kubernetes_version --control-plane-endpoint $kube_vip_vip --upload-certs --pod-network-cidr=10.0.0.0/16") + echo "$FIRST_NODE" >> $logfile + CONTROLPLANE_CMD=$(echo "$FIRST_NODE" | grep -m1 certificate-key) + #CONTROLPLANE_CMD=$(ssh $NODE01 "sudo kubeadm init --kubernetes-version $kubernetes_version --control-plane-endpoint $kube_vip_vip --upload-certs --pod-network-cidr=10.0.0.0/16 | grep -m1 certificate-key") + ssh $NODE01 "sudo rm -rf ~/.kube/" + ssh $NODE01 "mkdir -p .kube" + ssh $NODE01 "sudo cp -i /etc/kubernetes/admin.conf .kube/config" + ssh $NODE01 "sudo chown dan:dan .kube/config" + logr "INFO" "Enabling strict ARP on kube-proxy" + ssh $NODE01 "kubectl get configmap kube-proxy -n kube-system -o yaml | sed -e \"s/strictARP: false/strictARP: true/\" | kubectl apply -f - -n kube-system" + ssh $NODE01 "kubectl describe configmap -n kube-system kube-proxy | grep strictARP" + logr "INFO" "Deploying Calico to the Kubernetes Cluster" + ssh $NODE01 "kubectl apply -f https://docs.projectcalico.org/manifests/calico.yaml" >> $logfile + logr "INFO" "Retrieving Join command" + JOIN_CMD=$(ssh $NODE01 "kubeadm token create --print-join-command 2> /dev/null") +} + + +additional_controlplane() { + logr "INFO" "Adding $NODE02" + ssh $NODE02 "sudo $JOIN_CMD $CONTROLPLANE_CMD" >> $logfile + sleep 1 + ssh $NODE02 "sudo ctr image pull ghcr.io/kube-vip/kube-vip:$kube_vip_version; sudo mkdir -p /etc/kubernetes/manifests/; sudo ctr run --rm --net-host ghcr.io/kube-vip/kube-vip:$kube_vip_version vip /kube-vip manifest pod --interface ens160 --vip $kube_vip_vip --arp --leaderElection --enableLoadBalancer $kube_vip_mode | sed \"s/image:.*/image: plndr\/kube-vip:$kube_vip_version/\"| sudo tee /etc/kubernetes/manifests/vip.yaml" >> $logfile + logr "INFO" "Adding $NODE03" + ssh $NODE03 "sudo $JOIN_CMD $CONTROLPLANE_CMD" >> $logfile + sleep 1 + ssh $NODE03 "sudo ctr image pull ghcr.io/kube-vip/kube-vip:$kube_vip_version; sudo mkdir -p /etc/kubernetes/manifests/; sudo ctr run --rm --net-host ghcr.io/kube-vip/kube-vip:$kube_vip_version vip /kube-vip manifest pod --interface ens160 --vip $kube_vip_vip --arp --leaderElection --enableLoadBalancer $kube_vip_mode | sed \"s/image:.*/image: plndr\/kube-vip:$kube_vip_version/\"| sudo tee /etc/kubernetes/manifests/vip.yaml" >> $logfile +} + +## Main() + +# Ensure we have an entirely new logfile +reset_logfile + +logr "INFO" "Starting kube-vip.io testing with Kubeadm" +logr "DEFAULT" "Creating Logfile $logfile" + +if [[ -z $1 && -z $2 && -z $3 && -z $4 ]]; then + echo "Usage:" + echo " Param 1: Kubernetes Version" + echo " Param 2: Kube-Vip Version" + echo " Param 3: Kube-Vip mode [\"controlplane\"/\"services\"/\"hybrid\"]" + echo " Param 4: Vip address" + echo "" + echo "" ./create.sh 1.18.5 0.4.0 hybrid 192.168.0.40 + exit 1 +fi + +# Sane variable renaming +kubernetes_version=$1 +kube_vip_version=$2 +kube_vip_vip=$4 + +case "$3" in +"controlplane") logr "INFO" "Creating in control plane only mode" + kube_vip_mode="--controlplane" + ;; +"services") logr "INFO" "Creating in services-only mode" + kube_vip_mode="--services" + ;; +"hybrid") logr "INFO" "Creating in hybrid mode" + kube_vip_mode="--controlplane --services" + ;; +*) echo "Unknown kube-vip mode [$3]" + exit -1 + ;; +esac + +if [[ -z "$DEPS" ]]; then + logr "INFO" "Installing specific version of Kubernetes Dependencies" + install_deps +fi + +first_node +additional_controlplane +logr "INFO" "Adding $NODE04" +ssh $NODE04 "sudo $JOIN_CMD" >> $logfile +logr "INFO" "Adding $NODE05" +ssh $NODE05 "sudo $JOIN_CMD" >> $logfile +logr "DEFAULT" "Nodes should be deployed at this point, waiting 5 secs and querying the deployment" +echo +sleep 5 +ssh $NODE01 "kubectl get nodes" | tee >> $logfile +ssh $NODE01 "kubectl get pods -A" | tee >> $logfile +echo +logr "INFO" "Kubernetes: $kubernetes_version, Kube-vip $kube_vip_version, Advertising VIP: $kube_vip_vip" diff --git a/testing/kubeadm/service.sh b/testing/kubeadm/service.sh new file mode 100755 index 00000000..8ac810a0 --- /dev/null +++ b/testing/kubeadm/service.sh @@ -0,0 +1,34 @@ +#!/bin/bash +set -e + +# Read node configuration +source ./testing/nodes + +# Read logging function +source ./testing/logging.bash + + +logr "INFO" "Starting kube-vip.io service testing with Kubeadm" +logr "DEFAULT" "Creating Logfile $logfile" + +# Adding Controller +logr "INFO" "Creating network range configmap" +ssh $NODE01 "kubectl create configmap -n kube-system kubevip --from-literal range-global=192.168.0.220-192.168.0.222" >> $logfile + +logr "INFO" "Deploying kube-vip.io Controller" +ssh $NODE01 "kubectl apply -f https://raw.githubusercontent.com/kube-vip/kube-vip-cloud-provider/main/manifest/kube-vip-cloud-controller.yaml" >> $logfile + +logr "INFO" "Creating \"nginx\" deployment" +ssh $NODE01 "kubectl apply -f https://k8s.io/examples/application/deployment.yaml" >> $logfile +sleep 5 + +logr "DEFAULT" "Creating \"nginx\" service" +ssh $NODE01 "kubectl expose deployment nginx-deployment --port=80 --type=LoadBalancer --name=nginx" >> $logfile + +logr "INFO" "Sleeping for 20 seconds to give the controller time to \"reconcile\"" +sleep 20 + +logr "INFO" "Retrieving logs from kube-vip.io cloud provider" +ssh $NODE01 "kubectl logs -n kube-system kube-vip-cloud-provider-0" >> $logfile +logr "INFO" "Retrieving service configuration" +ssh $NODE01 "kubectl describe svc nginx" | tee >> $logfile diff --git a/testing/logging.bash b/testing/logging.bash new file mode 100644 index 00000000..fb81b6a4 --- /dev/null +++ b/testing/logging.bash @@ -0,0 +1,46 @@ +LOG_ON_FILE=true + +logfile="/tmp/kube-vip-testing.$(date +'%Y-%m-%d').log" + + +echo_timing() { + #-------------------------------------------------------- + # Out: [19/01/2020 18h19:56] Hello + #-------------------------------------------------------- + echo [`date +%d"/"%m"/"%Y" "%H"h"%M":"%S`] $@ +} + +echo_color(){ + COLOR=$1; MSG=$2; + + if [[ ${COLOR} == *"WHITE"* ]]; then echo -e "\\e[39m"${MSG}"\\e[0m"; + elif [[ ${COLOR} == *"RED"* ]]; then echo -e "\\e[31m"${MSG}"\\e[0m"; + elif [[ ${COLOR} == *"GREEN"* ]]; then echo -e "\\e[32m"${MSG}"\\e[0m"; + elif [[ ${COLOR} == *"YELLOW"* ]]; then echo -e "\\e[33m"${MSG}"\\e[0m"; + elif [[ ${COLOR} == *"BLUE"* ]]; then echo -e "\\e[34m"${MSG}"\\e[0m"; + fi; +} + +echo_console(){ + TYPE_OF_MSG=$1; MSG=$2; + + if [[ ${TYPE_OF_MSG} == *"1"* ]] || [[ ${TYPE_OF_MSG} == *"SUCCESS"* ]]; then echo_timing "$(echo_color "GREEN" "[+]: ${MSG}")"; + elif [[ ${TYPE_OF_MSG} == *"2"* ]] || [[ ${TYPE_OF_MSG} == *"FAIL"* ]]; then echo_timing "$(echo_color "RED" "[-]: ${MSG}")"; + elif [[ ${TYPE_OF_MSG} == *"3"* ]] || [[ ${TYPE_OF_MSG} == *"WARNING"* ]]; then echo_timing "$(echo_color "YELLOW" "[!]: ${MSG}")"; + elif [[ ${TYPE_OF_MSG} == *"4"* ]] || [[ ${TYPE_OF_MSG} == *"INFO"* ]]; then echo_timing "$(echo_color "BLUE" "[i]: ${MSG}")"; + elif [[ ${TYPE_OF_MSG} == *"0"* ]] || [[ ${TYPE_OF_MSG} == *"DEFAULT"* ]]; then echo_timing "$(echo_color "WHITE" "[:]: ${MSG}")"; + else MSG=${TYPE_OF_MSG}; echo_timing "$(echo_color "WHITE" "[:]: ${MSG}")"; + fi; +} + +logr(){ + TYPE_OF_MSG=$1; MSG=$2; + + if [[ ${LOG_ON_FILE} ]]; then echo_console "${TYPE_OF_MSG}" "${MSG}" | tee -a "${logfile}"; + else echo_console "${TYPE_OF_MSG}" "${MSG}"; fi; +} + +reset_logfile() { + touch $logfile + cat /dev/null > $logfile +} \ No newline at end of file diff --git a/testing/nodes b/testing/nodes index f624c23c..3ecc09ec 100644 --- a/testing/nodes +++ b/testing/nodes @@ -1,6 +1,11 @@ # Home lab (for testing) -NODE01=k8s01.fnnrn.me -NODE02=k8s02.fnnrn.me -NODE03=k8s03.fnnrn.me -NODE04=k8s04.fnnrn.me -NODE05=k8s05.fnnrn.me \ No newline at end of file +#NODE01=k8s01.fnnrn.me +#NODE02=k8s02.fnnrn.me +#NODE03=k8s03.fnnrn.me +#NODE04=k8s04.fnnrn.me +#NODE05=k8s05.fnnrn.me +NODE01=192.168.0.191 +NODE02=192.168.0.192 +NODE03=192.168.0.193 +NODE04=192.168.0.194 +NODE05=192.168.0.195