diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml new file mode 100644 index 00000000..972401f6 --- /dev/null +++ b/.github/FUNDING.yml @@ -0,0 +1,4 @@ +# Enable GitHub funding + +github: [kube-vip] + diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 00000000..2e9dd2aa --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,35 @@ +--- +name: Bug report +about: Create a report to help us improve +title: '' +labels: '' +assignees: '' + +--- + +**Describe the bug** +A clear and concise description of what the bug is. + +**To Reproduce** +Steps to reproduce the behavior: +1. Go to '...' +2. Click on '....' +3. Scroll down to '....' +4. See error + +**Expected behavior** +A clear and concise description of what you expected to happen. + +**Screenshots** +If applicable, add screenshots to help explain your problem. + +**Environment (please complete the following information):** + - OS/Distro: [e.g. Ubuntu 1804] + - Kubernetes Version: [e.g. v.1.18] + - Kube-vip Version: [e.g. 0.2.3] + +**`Kube-vip.yaml`:** +If Possible add in your kube-vip manifest (please remove anything that is confidential) + +**Additional context** +Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 00000000..bbcbbe7d --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,20 @@ +--- +name: Feature request +about: Suggest an idea for this project +title: '' +labels: '' +assignees: '' + +--- + +**Is your feature request related to a problem? Please describe.** +A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] + +**Describe the solution you'd like** +A clear and concise description of what you want to happen. + +**Describe alternatives you've considered** +A clear and concise description of any alternative solutions or features you've considered. + +**Additional context** +Add any other context or screenshots about the feature request here. diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 00000000..c25ee7a4 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,14 @@ +version: 2 +updates: + - package-ecosystem: github-actions + directory: / + schedule: + interval: weekly + - package-ecosystem: gomod + directory: / + schedule: + interval: weekly + - package-ecosystem: docker + directory: / + schedule: + interval: weekly diff --git a/.github/workflows/anchore-syft.yml b/.github/workflows/anchore-syft.yml new file mode 100644 index 00000000..ad8e884f --- /dev/null +++ b/.github/workflows/anchore-syft.yml @@ -0,0 +1,31 @@ +# This workflow uses actions that are not certified by GitHub. +# They are provided by a third-party and are governed by +# separate terms of service, privacy policy, and support +# documentation. + +# This workflow checks out code, builds an image, performs a container image +# scan with Anchore's Syft tool, and uploads the results to the GitHub Dependency +# submission API. + +# For more information on the Anchore sbom-action usage +# and parameters, see https://github.com/anchore/sbom-action. For more +# information about the Anchore SBOM tool, Syft, see +# https://github.com/anchore/syft +name: Anchore Syft SBOM scan + +on: + release: + types: [published] + +jobs: + sbom: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + with: + ref: ${{ github.ref_name }} + - name: Anchore SBOM Action + uses: anchore/sbom-action@v0.15.8 + with: + format: cyclonedx-json diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml new file mode 100644 index 00000000..b139e663 --- /dev/null +++ b/.github/workflows/ci.yaml @@ -0,0 +1,97 @@ +name: For each commit and PR +on: + push: + pull_request: +env: + GO_VERSION: "1.21" +jobs: + validation: + runs-on: ubuntu-latest + name: Checks and linters + steps: + - name: Init + run: sudo apt-get update && sudo apt-get install -y build-essential golint && sudo sysctl fs.inotify.max_user_instances=8192 && sudo sysctl fs.inotify.max_user_watches=524288 + - name: Install golangci-lint + run: curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.55.2 + - name: Checkout code + uses: actions/checkout@v4 + - name: Install Go + uses: actions/setup-go@v5 + with: + go-version-file: go.mod + - name: All checks + run: make check + unit-tests: + runs-on: ubuntu-latest + name: Unit tests + steps: + - name: Checkout code + uses: actions/checkout@v4 + - name: Install Go + uses: actions/setup-go@v5 + with: + go-version-file: go.mod + - name: Run tests + run: make unit-tests + integration-tests: + name: Integration tests + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + - name: Install Go + uses: actions/setup-go@v5 + with: + go-version-file: go.mod + - name: Run tests + run: make integration-tests + e2e-tests: + runs-on: ubuntu-latest + name: E2E ARP tests + steps: + - name: Ensure fs wont cause issues + run: sudo sysctl fs.inotify.max_user_instances=8192 && sudo sysctl fs.inotify.max_user_watches=524288 + - name: Checkout code + uses: actions/checkout@v4 + - name: Install Go + uses: actions/setup-go@v5 + with: + go-version-file: go.mod + - name: Build image locally + run: make dockerx86Local + - name: Run Control plane tests + run: make e2e-tests + - name: Run Control plane tests v1.29.0 onwards + run: make e2e-tests129 + service-e2e-tests: + runs-on: ubuntu-latest + name: E2E service tests + steps: + - name: Checkout code + uses: actions/checkout@v4 + - name: Install Go + uses: actions/setup-go@v5 + with: + go-version-file: go.mod + - name: Build image with iptables + run: make dockerx86ActionIPTables + - name: Run tests + run: DOCKERTAG=action make service-tests + image-vul-check: + runs-on: ubuntu-latest + name: Image vulnerability scan + steps: + - name: Checkout code + uses: actions/checkout@v4 + - name: Build image with iptables + run: make dockerx86ActionIPTables + - name: Run Trivy vulnerability scanner + uses: aquasecurity/trivy-action@master + with: + image-ref: 'plndr/kube-vip:action' + format: 'table' + exit-code: '1' + ignore-unfixed: true + vuln-type: 'os,library' + severity: 'CRITICAL,HIGH' + diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml new file mode 100644 index 00000000..dca0ef6d --- /dev/null +++ b/.github/workflows/codeql-analysis.yml @@ -0,0 +1,75 @@ +# For most projects, this workflow file will not need changing; you simply need +# to commit it to your repository. +# +# You may wish to alter this file to override the set of languages analyzed, +# or to provide custom queries or build logic. +# +# ******** NOTE ******** +# We have attempted to detect the languages in your repository. Please check +# the `language` matrix defined below to confirm you have the correct set of +# supported CodeQL languages. +# +name: "CodeQL" + +on: + push: + branches: [ main ] + pull_request: + # The branches below must be a subset of the branches above + branches: [ main ] + schedule: + - cron: '17 10 * * 6' + +jobs: + analyze: + name: Analyze + runs-on: ubuntu-latest + permissions: + actions: read + contents: read + security-events: write + + strategy: + fail-fast: false + matrix: + language: [ 'go' ] + # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python', 'ruby' ] + # Learn more about CodeQL language support at https://git.io/codeql-language-support + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Install Go + uses: actions/setup-go@v5 + with: + go-version-file: go.mod + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v3 + with: + languages: ${{ matrix.language }} + # If you wish to specify custom queries, you can do so here or in a config file. + # By default, queries listed here will override any specified in a config file. + # Prefix the list here with "+" to use these queries and those in the config file. + # queries: ./path/to/local/query, your-org/your-repo/queries@main + + # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). + # If this step fails, then you should remove it and run the build manually (see below) + - name: Autobuild + uses: github/codeql-action/autobuild@v3 + + # ℹ️ Command-line programs to run using the OS shell. + # πŸ“š https://git.io/JvXDl + + # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines + # and modify them (or add more) to build your code if your project + # uses a compiled language + + #- run: | + # make bootstrap + # make release + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v3 diff --git a/.github/workflows/main.yaml b/.github/workflows/main.yaml new file mode 100644 index 00000000..e6558d22 --- /dev/null +++ b/.github/workflows/main.yaml @@ -0,0 +1,52 @@ +name: Build and publish main image regularly + +on: + schedule: + - cron: '25 0 * * *' + workflow_dispatch: + + +jobs: + nightly_build: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + - name: Login to DockerHub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + - name: Login to Github Packages + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - name: Build standard version + id: docker_build + uses: docker/build-push-action@v5 + with: + context: . + platforms: linux/amd64,linux/arm/v7,linux/arm64,linux/ppc64le,linux/s390x + push: ${{ github.event_name != 'pull_request' }} + tags: >- + plndr/kube-vip:${{ github.ref_name }}, + ghcr.io/kube-vip/kube-vip:${{ github.ref_name }} + - name: Build iptables version + id: docker_build_iptables + uses: docker/build-push-action@v5 + with: + context: . + file: Dockerfile_iptables + platforms: linux/amd64,linux/arm/v7,linux/arm64,linux/ppc64le,linux/s390x + push: ${{ github.event_name != 'pull_request' }} + tags: >- + plndr/kube-vip-iptables:${{ github.ref_name }}, + ghcr.io/kube-vip/kube-vip-iptables:${{ github.ref_name }} + - name: Image digest + run: echo ${{ steps.docker_build.outputs.digest }} diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml new file mode 100644 index 00000000..6a6c0e46 --- /dev/null +++ b/.github/workflows/release.yaml @@ -0,0 +1,56 @@ +name: Publish Releases to Docker Hub and GitHub Container Registry + +on: + push: + tags: + - '*' + workflow_dispatch: + +jobs: + docker: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + - name: Login to DockerHub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + - name: Login to Github Packages + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - name: Build and push main branch + id: docker_build + uses: docker/build-push-action@v5 + with: + context: . + platforms: linux/amd64,linux/arm/v7,linux/arm64,linux/ppc64le,linux/s390x + push: ${{ github.event_name != 'pull_request' }} + tags: >- + plndr/kube-vip:${{ github.ref_name }}, + plndr/kube-vip:latest, + ghcr.io/kube-vip/kube-vip:${{ github.ref_name }}, + ghcr.io/kube-vip/kube-vip:latest + - name: Build iptables version and push main branch + id: docker_build_iptables + uses: docker/build-push-action@v5 + with: + context: . + file: Dockerfile_iptables + platforms: linux/amd64,linux/arm/v7,linux/arm64,linux/ppc64le,linux/s390x + push: ${{ github.event_name != 'pull_request' }} + tags: >- + plndr/kube-vip-iptables:${{ github.ref_name }}, + plndr/kube-vip-iptables:latest, + ghcr.io/kube-vip/kube-vip-iptables:${{ github.ref_name }}, + ghcr.io/kube-vip/kube-vip-iptables:latest + - name: Image digest + run: echo ${{ steps.docker_build.outputs.digest }} diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..4d94032f --- /dev/null +++ b/.gitignore @@ -0,0 +1,7 @@ +.idea +kube-vip +.vscode +bin +testing/e2e/etcd/certs +pkg/etcd/etcd.pid +pkg/etcd/etcd-data diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 00000000..bda104b7 --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,13 @@ +run: + timeout: 10m + +linters: + enable: + - bodyclose + - gofmt + - goimports + - revive + - gosec + - misspell + - unconvert + - unparam diff --git a/CODEOWNERS b/CODEOWNERS new file mode 100644 index 00000000..ae4b473a --- /dev/null +++ b/CODEOWNERS @@ -0,0 +1,10 @@ + +# For more information on the syntax of the CODEOWNERS file, see: +# https://docs.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners + +# The Kube-Vip maintainers team +* @thebsdbox @yastij + +# Emeritus Maintainers +# +# N/A diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 00000000..fe8c1ee4 --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,128 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, religion, or sexual identity +and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contributes to a positive environment for our +community include: + +* Demonstrating empathy and kindness toward other people +* Being respectful of differing opinions, viewpoints, and experiences +* Giving and gracefully accepting constructive feedback +* Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +* Focusing on what is best not just for us as individuals, but for the + overall community + +Examples of unacceptable behavior include: + +* The use of sexualized language or imagery, and sexual attention or + advances of any kind +* Trolling, insulting or derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or email + address, without their explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official e-mail address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders responsible for enforcement at +conduct@kube-vip.io. +All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the +reporter of any incident. + +## Enforcement Guidelines + +Community leaders will follow these Community Impact Guidelines in determining +the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community Impact**: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community leaders, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community Impact**: A violation through a single incident or series +of actions. + +**Consequence**: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or +permanent ban. + +### 3. Temporary Ban + +**Community Impact**: A serious violation of community standards, including +sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban. + +### 4. Permanent Ban + +**Community Impact**: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of an +individual, or aggression toward or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within +the community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 2.0, available at +https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. + +Community Impact Guidelines were inspired by [Mozilla's code of conduct +enforcement ladder](https://github.com/mozilla/diversity). + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see the FAQ at +https://www.contributor-covenant.org/faq. Translations are available at +https://www.contributor-covenant.org/translations. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000..de6d2b63 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,289 @@ +# Developer Guide + +Thank you for taking the time out to contribute to the **kube-vip** project! + +This guide will walk you through the process of making your first commit and how +to effectively get it merged upstream. + + +- [Getting Started](#getting-started) +- [Contribute](#contribute) + - [GitHub Workflow](#github-workflow) + - [Getting reviewers](#getting-reviewers) + - [Inclusive Naming](#inclusive-naming) + - [Building and testing your change](#building-and-testing-your-change) + - [Reverting a commit](#reverting-a-commit) + - [Sign-off Your Work](#sign-off-your-work) +- [Issue and PR Management](#issue-and-pr-management) + - [Filing An Issue](#filing-an-issue) + - [Issue Triage](#issue-triage) + + +## Getting Started + +To get started, let's ensure you have completed the following prerequisites for +contributing to project **kube-vip**: + +1. Read and observe the [code of conduct](CODE_OF_CONDUCT.md). +2. Check out the [Architecture documentation](https://kube-vip.io) for the **kube-vip** + architecture and design. +3. Set up your [development environment](docs/contributors/manual-installation.md) + +Now that you're setup, skip ahead to learn how to [contribute](#contribute). + +**Also**, A GitHub account will be required in order to submit code changes and +interact with the project. For committing any changes in **Github** it is required for +you to have a [github account](https://github.com/join). + +## Contribute + +There are multiple ways in which you can contribute, either by contributing +code in the form of new features or bug-fixes or non-code contributions like +helping with code reviews, triaging of bugs, documentation updates, filing +[new issues](#filing-an-issue) or writing blogs/manuals etc. + +### GitHub Workflow + +Developers work in their own forked copy of the repository and when ready, +submit pull requests to have their changes considered and merged into the +project's repository. + +1. Fork your own copy of the repository to your GitHub account by clicking on + `Fork` button on [kube-vip's GitHub repository](https://github.com/kube-vip/kube-vip). +2. Clone the forked repository on your local setup. + + ```bash + git clone https://github.com/$user/kube-vip + ``` + + Add a remote upstream to track upstream kube-vip repository. + + ```bash + git remote add upstream https://github.com/kube-vip/kube-vip + ``` + + Never push to upstream remote + + ```bash + git remote set-url --push upstream no_push + ``` + +3. Create a topic branch. + + ```bash + git checkout -b branchName + ``` + +4. Make changes and commit it locally. Make sure that your commit is + [signed](#sign-off-your-work). + + ```bash + git add + git commit -s + ``` + +5. Update the "Unreleased" section of the [CHANGELOG](CHANGELOG.md) for any + significant change that impacts users. +6. Keeping branch in sync with upstream. + + ```bash + git checkout branchName + git fetch upstream + git rebase upstream/main + ``` + +7. Push local branch to your forked repository. + + ```bash + git push -f $remoteBranchName branchName + ``` + +8. Create a Pull request on GitHub. + Visit your fork at `https://github.com/kube-vip/kube-vip` and click + `Compare & Pull Request` button next to your `remoteBranchName` branch. + +### Getting reviewers + +Once you have opened a Pull Request (PR), reviewers will be assigned to your +PR and they may provide review comments which you need to address. +Commit changes made in response to review comments to the same branch on your +fork. Once a PR is ready to merge, squash any *fix review feedback, typo* +and *merged* sorts of commits. + +To make it easier for reviewers to review your PR, consider the following: + +1. Follow the golang [coding conventions](https://github.com/golang/go/wiki/CodeReviewComments). +2. Format your code with `make golangci-fix`; if the [linters](ci/README.md) flag an issue that + cannot be fixed automatically, an error message will be displayed so you can address the issue. +3. Follow [git commit](https://chris.beams.io/posts/git-commit/) guidelines. +4. Follow [logging](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md) guidelines. + +If your PR fixes a bug or implements a new feature, add the appropriate test +cases to our [automated test suite](ci/README.md) to guarantee enough +coverage. A PR that makes significant code changes without contributing new test +cases will be flagged by reviewers and will not be accepted. + +### Inclusive Naming + +For symbol names and documentation, do not introduce new usage of harmful +language such as 'master / slave' (or 'slave' independent of 'master') and +'blacklist / whitelist'. For more information about what constitutes harmful +language and for a reference word replacement list, please refer to the +[Inclusive Naming Initiative](https://inclusivenaming.org/). + +We are committed to removing all harmful language from the project. If you +detect existing usage of harmful language in code or documentation, please +report the issue to us or open a Pull Request to address it directly. Thanks! + +### Building and testing your change + +To build the **kube-vip** Docker image together with all **kube-vip** bits, you can simply +do: + +1. Checkout your feature branch and `cd` into it. +2. Run `make dockerx86` + +The second step will compile the **kube-vip** code in a `golang` container, and build +a `Ubuntu 20.04` Docker image that includes all the generated binaries. [`Docker`](https://docs.docker.com/install) +must be installed on your local machine in advance. + +Alternatively, you can build the **kube-vip** code in your local Go environment. The +**kube-vip** project uses the [Go modules support](https://github.com/golang/go/wiki/Modules) which was introduced in Go 1.11. It +facilitates dependency tracking and no longer requires projects to live inside +the `$GOPATH`. + +To develop locally, you can follow these steps: + + 1. [Install Go 1.19](https://golang.org/doc/install) + 2. Checkout your feature branch and `cd` into it. + 3. To build all Go files and install them under `bin`, run `make bin` + 4. To run all Go unit tests, run `make test-unit` + 5. To build the **kube-vip** Ubuntu Docker image separately with the binaries generated in step 2, run `make ubuntu` + +### CI testing + +For more information about the tests we run as part of CI, please refer to +[ci/README.md](ci/README.md). + +### Reverting a commit + +1. Create a branch in your forked repo + + ```bash + git checkout -b revertName + ``` + +2. Sync the branch with upstream + + ```bash + git fetch upstream + git rebase upstream/main + ``` + +3. Create a revert based on the SHA of the commit. The commit needs to be + [signed](#sign-off-your-work). + + ```bash + git revert -s SHA + ``` + +4. Push this new commit. + + ```bash + git push $remoteRevertName revertName + ``` + +5. Create a Pull Request on GitHub. + Visit your fork at `https://github.com/kube-vip/kube-vip` and click + `Compare & Pull Request` button next to your `remoteRevertName` branch. + +### Sign-off Your Work + +It is recommended to sign your work when contributing to the **kube-vip** +repository. + +Git provides the `-s` command-line option to append the required line +automatically to the commit message: + +```bash +git commit -s -m 'This is my commit message' +``` + +For an existing commit, you can also use this option with `--amend`: + +```bash +git commit -s --amend +``` + +If more than one person works on something it's possible for more than one +person to sign-off on it. For example: + +```bash +Signed-off-by: Some Developer somedev@example.com +Signed-off-by: Another Developer anotherdev@example.com +``` + +We use the [DCO Github App](https://github.com/apps/dco) to enforce that all +commits in a Pull Request include the required `Signed-off-by` line. If this is +not the case, the app will report a failed status for the Pull Request and it +will be blocked from being merged. + +Compared to our earlier CLA, DCO tends to make the experience simpler for new +contributors. If you are contributing as an employee, there is no need for your +employer to sign anything; the DCO assumes you are authorized to submit +contributions (it's your responsibility to check with your employer). + +## Issue and PR Management + +We use labels and workflows (some manual, some automated with GitHub Actions) to +help us manage triage, prioritize, and track issue progress. For a detailed +discussion, see [docs/issue-management.md](docs/contributors/issue-management.md). + +### Filing An Issue + +Help is always appreciated. If you find something that needs fixing, please file +an issue [here](https://github.com/kube-vip/kube-vip/issues). Please ensure +that the issue is self explanatory and has enough information for an assignee to +get started. + +Before picking up a task, go through the existing +[issues](https://github.com/kube-vip/kube-vip/issues) and make sure that your +change is not already being worked on. If it does not exist, please create a new +issue and discuss it with other members. + +For simple contributions to **kube-vip**, please ensure that this minimum set of +labels are included on your issue: + +* **kind** -- common ones are `kind/feature`, `kind/support`, `kind/bug`, + `kind/documentation`, or `kind/design`. For an overview of the different types + of issues that can be submitted, see [Issue and PR + Kinds](#issue-and-pr-kinds). + The kind of issue will determine the issue workflow. +* **area** (optional) -- if you know the area the issue belongs in, you can assign it. + Otherwise, another community member will label the issue during triage. The + area label will identify the area of interest an issue or PR belongs in and + will ensure the appropriate reviewers shepherd the issue or PR through to its + closure. For an overview of areas, see the + [`docs/github-labels.md`](docs/contributors/github-labels.md). +* **size** (optional) -- if you have an idea of the size (lines of code, complexity, + effort) of the issue, you can label it using a [size label](#size). The size + can be updated during backlog grooming by contributors. This estimate is used + to guide the number of features selected for a milestone. + +All other labels will be assigned during issue triage. + +### Issue Triage + +Once an issue has been submitted, the CI (GitHub actions) or a human will +automatically review the submitted issue or PR to ensure that it has all relevant +information. If information is lacking or there is another problem with the +submitted issue, an appropriate `triage/` label will be applied. + +After an issue has been triaged, the maintainers can prioritize the issue with +an appropriate `priority/` label. + +Once an issue has been submitted, categorized, triaged, and prioritized it +is marked as `ready-to-work`. A ready-to-work issue should have labels +indicating assigned areas, prioritization, and should not have any remaining +triage labels. + diff --git a/Dockerfile b/Dockerfile index 4126ef08..5ff57485 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,6 +1,6 @@ # syntax=docker/dockerfile:experimental -FROM golang:1.15-alpine as dev +FROM golang:1.21.6-alpine3.18 as dev RUN apk add --no-cache git ca-certificates make RUN adduser -D appuser COPY . /src/ @@ -12,5 +12,8 @@ RUN --mount=type=cache,sharing=locked,id=gomod,target=/go/pkg/mod/cache \ CGO_ENABLED=0 GOOS=linux make build FROM scratch +# Add Certificates into the image, for anything that does API calls +COPY --from=dev /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt +# Add kube-vip binary COPY --from=dev /src/kube-vip / -ENTRYPOINT ["/kube-vip"] \ No newline at end of file +ENTRYPOINT ["/kube-vip"] diff --git a/Dockerfile_iptables b/Dockerfile_iptables new file mode 100644 index 00000000..9f44c871 --- /dev/null +++ b/Dockerfile_iptables @@ -0,0 +1,21 @@ +# syntax=docker/dockerfile:experimental + +FROM golang:1.21.6-alpine3.18 as dev +RUN apk add --no-cache git make +RUN adduser -D appuser +COPY . /src/ +WORKDIR /src + +ENV GO111MODULE=on +RUN --mount=type=cache,sharing=locked,id=gomod,target=/go/pkg/mod/cache \ + --mount=type=cache,sharing=locked,id=goroot,target=/root/.cache/go-build \ + CGO_ENABLED=0 GOOS=linux make build + +FROM alpine:3.19.1 +# Update pkgs and add iptables +RUN apk upgrade && \ + apk add --no-cache iptables + +# Add kube-vip binary +COPY --from=dev /src/kube-vip / +ENTRYPOINT ["/kube-vip"] diff --git a/Makefile b/Makefile index 79cda47f..96e3a868 100644 --- a/Makefile +++ b/Makefile @@ -1,12 +1,12 @@ - SHELL := /bin/sh # The name of the executable (default is current directory name) TARGET := kube-vip -.DEFAULT_GOAL: $(TARGET) +.DEFAULT_GOAL := $(TARGET) # These will be provided to the target -VERSION := 0.2.0 +VERSION := v0.7.0 + BUILD := `git rev-parse HEAD` # Operating System Default (LINUX) @@ -14,18 +14,14 @@ TARGETOS=linux # Use linker flags to provide version/build settings to the target LDFLAGS=-ldflags "-s -w -X=main.Version=$(VERSION) -X=main.Build=$(BUILD) -extldflags -static" - -# go source files, ignore vendor directory -SRC = $(shell find . -type f -name '*.go' -not -path "./vendor/*") - DOCKERTAG ?= $(VERSION) -REPOSITORY = plndr +REPOSITORY ?= plndr -.PHONY: all build clean install uninstall fmt simplify check run +.PHONY: all build clean install uninstall fmt simplify check run e2e-tests all: check install -$(TARGET): $(SRC) +$(TARGET): @go build $(LDFLAGS) -o $(TARGET) build: $(TARGET) @@ -42,43 +38,106 @@ uninstall: clean @rm -f $$(which ${TARGET}) fmt: - @gofmt -l -w $(SRC) + @gofmt -l -w ./... demo: @cd demo - @docker buildx build --platform linux/amd64,linux/arm64,linux/arm/v7 --push -t $(REPOSITORY)/$(TARGET):$(DOCKERTAG) . + @docker buildx build --platform linux/amd64,linux/arm64,linux/arm/v7,linux/ppc64le,linux/s390x --push -t $(REPOSITORY)/$(TARGET):$(DOCKERTAG) . @echo New Multi Architecture Docker image created @cd .. ## Remote (push of images) - # This build a local docker image (x86 only) for quick testing + +dockerx86Dev: + @-rm ./kube-vip + @docker buildx build --platform linux/amd64 --push -t $(REPOSITORY)/$(TARGET):dev . + @echo New single x86 Architecture Docker image created + +dockerx86Iptables: + @-rm ./kube-vip + @docker buildx build --platform linux/amd64 -f ./Dockerfile_iptables --push -t $(REPOSITORY)/$(TARGET):dev . + @echo New single x86 Architecture Docker image created + dockerx86: + @-rm ./kube-vip @docker buildx build --platform linux/amd64 --push -t $(REPOSITORY)/$(TARGET):$(DOCKERTAG) . @echo New single x86 Architecture Docker image created docker: - @docker buildx build --platform linux/amd64,linux/arm64,linux/arm/v7 --push -t $(REPOSITORY)/$(TARGET):$(DOCKERTAG) . + @-rm ./kube-vip + @docker buildx build --platform linux/amd64,linux/arm64,linux/arm/v7,linux/ppc64le,linux/s390x --push -t $(REPOSITORY)/$(TARGET):$(DOCKERTAG) . @echo New Multi Architecture Docker image created ## Local (docker load of images) - # This will build a local docker image (x86 only), use make dockerLocal for all architectures dockerx86Local: + @-rm ./kube-vip @docker buildx build --platform linux/amd64 --load -t $(REPOSITORY)/$(TARGET):$(DOCKERTAG) . @echo New Multi Architecture Docker image created +dockerx86Action: + @-rm ./kube-vip + @docker buildx build --platform linux/amd64 --load -t $(REPOSITORY)/$(TARGET):action . + @echo New Multi Architecture Docker image created + +dockerx86ActionIPTables: + @-rm ./kube-vip + @docker buildx build --platform linux/amd64 -f ./Dockerfile_iptables --load -t $(REPOSITORY)/$(TARGET):action . + @echo New Multi Architecture Docker image created + dockerLocal: - @docker buildx build --platform linux/amd64,linux/arm64,linux/arm/v7 --load -t $(REPOSITORY)/$(TARGET):$(DOCKERTAG) . + @-rm ./kube-vip + @docker buildx build --platform linux/amd64,linux/arm64,linux/arm/v7,linux/ppc64le,linux/s390x --load -t $(REPOSITORY)/$(TARGET):$(DOCKERTAG) . @echo New Multi Architecture Docker image created simplify: - @gofmt -s -l -w $(SRC) + @gofmt -s -l -w ./... check: - @test -z $(shell gofmt -l main.go | tee /dev/stderr) || echo "[WARN] Fix formatting issues with 'make fmt'" - @for d in $$(go list ./... | grep -v /vendor/); do golint $${d}; done - @go tool vet ${SRC} + go mod tidy + test -z "$(git status --porcelain)" + test -z $(shell gofmt -l main.go | tee /dev/stderr) || echo "[WARN] Fix formatting issues with 'make fmt'" + golangci-lint run + go vet ./... run: install @$(TARGET) + +manifests: + @make build + @mkdir -p ./docs/manifests/$(VERSION)/ + @./kube-vip manifest pod --interface eth0 --vip 192.168.0.1 --arp --leaderElection --controlplane --services > ./docs/manifests/$(VERSION)/kube-vip-arp.yaml + @./kube-vip manifest pod --interface eth0 --vip 192.168.0.1 --arp --leaderElection --controlplane --services --enableLoadBalancer > ./docs/manifests/$(VERSION)/kube-vip-arp-lb.yaml + @./kube-vip manifest pod --interface eth0 --vip 192.168.0.1 --bgp --controlplane --services > ./docs/manifests/$(VERSION)/kube-vip-bgp.yaml + @./kube-vip manifest daemonset --interface eth0 --vip 192.168.0.1 --arp --leaderElection --controlplane --services --inCluster > ./docs/manifests/$(VERSION)/kube-vip-arp-ds.yaml + @./kube-vip manifest daemonset --interface eth0 --vip 192.168.0.1 --arp --leaderElection --controlplane --services --inCluster --enableLoadBalancer > ./docs/manifests/$(VERSION)/kube-vip-arp-ds-lb.yaml + @./kube-vip manifest daemonset --interface eth0 --vip 192.168.0.1 --bgp --leaderElection --controlplane --services --inCluster > ./docs/manifests/$(VERSION)/kube-vip-bgp-ds.yaml + @./kube-vip manifest daemonset --interface eth0 --vip 192.168.0.1 --bgp --leaderElection --controlplane --services --inCluster --provider-config /etc/cloud-sa/cloud-sa.json > ./docs/manifests/$(VERSION)/kube-vip-bgp-em-ds.yaml + @-rm ./kube-vip + +unit-tests: + go test ./... + +integration-tests: + go test -tags=integration,e2e -v ./pkg/etcd + +e2e-tests: + E2E_IMAGE_PATH=$(REPOSITORY)/$(TARGET):$(DOCKERTAG) go run github.com/onsi/ginkgo/v2/ginkgo --tags=e2e -v -p ./testing/e2e ./testing/e2e/etcd + +e2e-tests129: + V129=true K8S_IMAGE_PATH=kindest/node:v1.29.0 E2E_IMAGE_PATH=$(REPOSITORY)/$(TARGET):$(DOCKERTAG) go run github.com/onsi/ginkgo/v2/ginkgo --tags=e2e -v -p ./testing/e2e + +service-tests: + E2E_IMAGE_PATH=$(REPOSITORY)/$(TARGET):$(DOCKERTAG) go run ./testing/e2e/services -Services + +trivy: dockerx86ActionIPTables + docker run -v /var/run/docker.sock:/var/run/docker.sock aquasec/trivy:0.47.0 \ + image \ + --format table \ + --exit-code 1 \ + --ignore-unfixed \ + --vuln-type 'os,library' \ + --severity 'CRITICAL,HIGH' \ + $(REPOSITORY)/$(TARGET):action + diff --git a/README.md b/README.md index 92cf886b..9aec8ed4 100644 --- a/README.md +++ b/README.md @@ -1,25 +1,38 @@ # kube-vip +High Availability and Load-Balancing -**NOTE** All documentation of both usage and architecture are now available at [https://kube-vip.io](https://kube-vip.io) +![](https://github.com/kube-vip/kube-vip/raw/main/kube-vip.png) +[![Build and publish main image regularly](https://github.com/kube-vip/kube-vip/actions/workflows/main.yaml/badge.svg)](https://github.com/kube-vip/kube-vip/actions/workflows/main.yaml) ## Overview -Kubernetes Virtual IP and Load-Balancer for both control pane and Kubernetes services +Kubernetes Virtual IP and Load-Balancer for both control plane and Kubernetes services The idea behind `kube-vip` is a small self-contained Highly-Available option for all environments, especially: - Bare-Metal - Edge (arm / Raspberry PI) +- Virtualisation - Pretty much anywhere else :) -![](/overview.png) +**NOTE:** All documentation of both usage and architecture are now available at [https://kube-vip.io](https://kube-vip.io). -The `kube-vip` application builds a multi-node or multi-pod cluster to provide High-Availability. When a leader is elected, this node will inherit the Virtual IP and become the leader of the load-balancing within the cluster. +## Features -When running **out of cluster** it will use [raft](https://en.wikipedia.org/wiki/Raft_(computer_science)) clustering technology +Kube-Vip was originally created to provide a HA solution for the Kubernetes control plane, over time it has evolved to incorporate that same functionality into Kubernetes service type [load-balancers](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer). -When running **in cluster** it will use [leader election](https://godoc.org/k8s.io/client-go/tools/leaderelection) +- VIP addresses can be both IPv4 or IPv6 +- Control Plane with ARP (Layer 2) or BGP (Layer 3) +- Control Plane using either [leader election](https://godoc.org/k8s.io/client-go/tools/leaderelection) or [raft](https://en.wikipedia.org/wiki/Raft_(computer_science)) +- Control Plane HA with kubeadm (static Pods) +- Control Plane HA with K3s/and others (daemonsets) +- Service LoadBalancer using [leader election](https://godoc.org/k8s.io/client-go/tools/leaderelection) for ARP (Layer 2) +- Service LoadBalancer using multiple nodes with BGP +- Service LoadBalancer address pools per namespace or global +- Service LoadBalancer address via (existing network DHCP) +- Service LoadBalancer address exposure to gateway via UPNP +- ... manifest generation, vendor API integrations and many more... ## Why? @@ -31,53 +44,27 @@ The purpose of `kube-vip` is to simplify the building of HA Kubernetes clusters, **VIP**: - [Keepalived](https://www.keepalived.org/) -- [Ucarp](https://ucarp.wordpress.com/) +- [UCARP](https://ucarp.wordpress.com/) - Hardware Load-balancer (functionality differs per vendor) **LoadBalancing**: - [HAProxy](http://www.haproxy.org/) - [Nginx](http://nginx.com) -- Hardware Load-balancer(functionality differs per vendor) +- Hardware Load-balancer (functionality differs per vendor) All of these would require a separate level of configuration and in some infrastructures multiple teams in order to implement. Also when considering the software components, they may require packaging into containers or if they’re pre-packaged then security and transparency may be an issue. Finally, in edge environments we may have limited room for hardware (no HW load-balancer) or packages solutions in the correct architectures might not exist (e.g. ARM). Luckily with `kube-vip` being written in GO, it’s small(ish) and easy to build for multiple architectures, with the added security benefit of being the only thing needed in the container. +## Troubleshooting and Feedback -## Standalone Usage - -The usage of `kube-vip` can either be directly by taking the binary / building yourself (`make build`), or alternatively through a pre-built docker container which can be found in the plunder Docker Hub repository [https://hub.docker.com/r/plndr/kube-vip](https://hub.docker.com/r/plndr/kube-vip). For further - -### Configuration - -To generate the basic `yaml` configuration: - -``` -kube-vip sample config > config.yaml -``` - -Modify the `localPeer` section to match this particular instance (local IP address/port etc..) and ensure that the `remotePeers` section is correct for the current instance and all other instances in the cluster. Also ensure that the `interface` is the correct interface that the `vip` will bind to. +Please raise issues on the GitHub repository and as mentioned check the documentation at [https://kube-vip.io](https://kube-vip.io/). +## Contributing -## Starting a simple cluster +Thanks for taking the time to join our community and start contributing! We welcome pull requests. Feel free to dig through the [issues](https://github.com/kube-vip/kube-vip/issues) and jump in. -To start `kube-vip` ensure the configuration for the `localPeers` and `remotePeers` is correct for each instance and the cluster as a whole and start: +:warning: This project has issue compiling on MacOS, please compile it on linux distribution -``` -kube-vip start -c /config.yaml -INFO[0000] Reading configuration from [config.yaml] -INFO[0000] 2020-02-01T15:41:04.287Z [INFO] raft: initial configuration: index=1 servers="[{Suffrage:Voter ID:server1 Address:192.168.0.70:10000} {Suffrage:Voter ID:server2 Address:192.168.0.71:10000} {Suffrage:Voter ID:server3 Address:192.168.0.72:10000}]" -INFO[0000] 2020-02-01T15:41:04.287Z [INFO] raft: entering follower state: follower="Node at 192.168.0.70:10000 [Follower]" leader= -INFO[0000] Started -INFO[0000] The Node [] is leading -INFO[0001] The Node [] is leading -INFO[0001] 2020-02-01T15:41:05.522Z [WARN] raft: heartbeat timeout reached, starting election: last-leader= -INFO[0001] 2020-02-01T15:41:05.522Z [INFO] raft: entering candidate state: node="Node at 192.168.0.70:10000 [Candidate]" term=2 -INFO[0001] 2020-02-01T15:41:05.522Z [DEBUG] raft: votes: needed=2 -INFO[0001] 2020-02-01T15:41:05.522Z [DEBUG] raft: vote granted: from=server1 term=2 tally=1 -INFO[0001] 2020-02-01T15:41:05.523Z [DEBUG] raft: newer term discovered, fallback to follower -INFO[0001] 2020-02-01T15:41:05.523Z [INFO] raft: entering follower state: follower="Node at 192.168.0.70:10000 [Follower]" leader= -INFO[0001] 2020-02-01T15:41:05.838Z [WARN] raft: failed to get previous log: previous-index=2 last-index=1 error="log not found" -INFO[0002] The Node [192.168.0.72:10000] is leading -``` +## Star History -After a few seconds with additional nodes started a leader election will take place and the leader will assume the **vip**. +[![Star History Chart](https://api.star-history.com/svg?repos=kube-vip/kube-vip&type=Date)](https://star-history.com/#kube-vip/kube-vip&Date) diff --git a/ROADMAP.md b/ROADMAP.md new file mode 100644 index 00000000..7f934019 --- /dev/null +++ b/ROADMAP.md @@ -0,0 +1,35 @@ +# Kube-Vip Roadmap + +This document outlines the roadmap for the **kube-vip** project and only covers the technologies within this particular project, other projects that augment or provide additional functionality (such as cloud-providers) may have their own roadmaps in future. The functionality for **kube-vip** has grown either been developed organically or through real-world needs, and this is the first attempt to put into words a plan for the future of **kube-vip** and will additional evolve over time. This means that items listed or detailed here are not necessarily set in stone and the roadmap can grow/shrink as the project matures. We definitely welcome suggestions and ideas from everyone about the roadmap and **kube-vip** features. Reach us through Issues, Slack or email @kube-vip.io. + +## Release methodology + +The **kube-vip** project attempts to follow a tick-tock release cycle, this typically means that one release will come **packed** with new features where the following release will come with fixes, code sanitation and performance enhancements. + +## Roadmap + +The **kube-vip** project offers two main areas of functionality: + +- HA Kubernetes clusters through a control-plane VIP +- Kubernetes `service type:LoadBalancer` + +Whilst both of these functions share underlying technologies and code they will have slightly differing roadmaps. + +### HA Kubernetes Control Plane + +- **Re-implement LoadBalancing** - due to a previous request the HTTP loadbalancing was removed leaving just HA for the control plane. This functionality will be re-implemented either through the original round-robin HTTP requests or utilising IPVS. +- **Utilise the Kubernetes API to determine additional Control Plane members** - Once a single node cluster is running **kube-vip** could use the API to determine the additional members, at this time a Cluster-API provider needs to drop a static manifest per CP node. +- **Re-evaluate raft** - **kube-vip** is mainly designed to run within a Kubernetes cluster, however it's original design was a raft cluster external to Kubernetes. Unfortunately given some of the upgrade paths identified in things like CAPV moving to leaderElection within Kubernetes became a better idea. + +## Kubernetes `service type:LoadBalancer` + +- **`ARP` LeaderElection per loadBalancer** - Currently only one pod that is elected leader will field all traffic for a VIP.. extending this to generate a leaderElection token per service would allow services to proliferate across all pods across the cluster +- **Aligning of `service` and `manager`** - The move to allow hybrid (be both HA control plane and offer load-balancer services at the same time) introduced a duplicate code path.. these need to converge as it's currently confusing for contributors. + +## Global **Kube-Vip** items + +- **Improved metrics** - At this time the scaffolding for monitoring exists, however this needs drastically extending to provide greater observability to what is happening within **kube-vip** +- **Windows support** - The Go SDK didn't support the capability for low-levels sockets for ARP originally, this should be revisited. +- **Additional BGP features** : + - Communities + - BFD diff --git a/cmd/kube-vip-config.go b/cmd/kube-vip-config.go deleted file mode 100644 index d8a9f0ce..00000000 --- a/cmd/kube-vip-config.go +++ /dev/null @@ -1,146 +0,0 @@ -package cmd - -import ( - "fmt" - - "github.com/ghodss/yaml" - "github.com/plunder-app/kube-vip/pkg/kubevip" - log "github.com/sirupsen/logrus" - "github.com/spf13/cobra" - appv1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// [sample configuration] - flags -var cliConfig kubevip.Config -var cliConfigLB kubevip.LoadBalancer -var cliLocalPeer string -var cliRemotePeers, cliBackends []string - -func init() { - kubeVipSampleConfig.Flags().StringVar(&cliConfig.Interface, "interface", "eth0", "Name of the interface to bind to") - kubeVipSampleConfig.Flags().StringVar(&cliConfig.VIP, "vip", "192.168.0.1", "The Virtual IP address") - kubeVipSampleConfig.Flags().BoolVar(&cliConfig.SingleNode, "singleNode", false, "Start this instance as a single node") - kubeVipSampleConfig.Flags().BoolVar(&cliConfig.StartAsLeader, "startAsLeader", false, "Start this instance as the cluster leader") - kubeVipSampleConfig.Flags().BoolVar(&cliConfig.GratuitousARP, "arp", true, "Use ARP broadcasts to improve VIP re-allocations") - kubeVipSampleConfig.Flags().StringVar(&cliLocalPeer, "localPeer", "server1:192.168.0.1:10000", "Settings for this peer, format: id:address:port") - kubeVipSampleConfig.Flags().StringSliceVar(&cliRemotePeers, "remotePeers", []string{"server2:192.168.0.2:10000", "server3:192.168.0.3:10000"}, "Comma seperated remotePeers, format: id:address:port") - // Load Balancer flags - kubeVipSampleConfig.Flags().BoolVar(&cliConfigLB.BindToVip, "lbBindToVip", false, "Bind example load balancer to VIP") - kubeVipSampleConfig.Flags().StringVar(&cliConfigLB.Type, "lbType", "tcp", "Type of load balancer instance (TCP/HTTP)") - kubeVipSampleConfig.Flags().StringVar(&cliConfigLB.Name, "lbName", "Example Load Balancer", "The name of a load balancer instance") - kubeVipSampleConfig.Flags().IntVar(&cliConfigLB.Port, "lbPort", 8080, "Port that load balancer will expose on") - kubeVipSampleConfig.Flags().StringSliceVar(&cliBackends, "lbBackends", []string{"192.168.0.1:8080", "192.168.0.2:8080"}, "Comma seperated backends, format: address:port") -} - -var kubeVipSampleConfig = &cobra.Command{ - Use: "config", - Short: "Generate a Sample configuration", - Run: func(cmd *cobra.Command, args []string) { - - // // Parse localPeer - // p, err := kubevip.ParsePeerConfig(cliLocalPeer) - // if err != nil { - // cmd.Help() - // log.Fatalln(err) - // } - // cliConfig.LocalPeer = *p - - // // Parse remotePeers - // //Iterate backends - // for i := range cliRemotePeers { - // p, err := kubevip.ParsePeerConfig(cliRemotePeers[i]) - // if err != nil { - // cmd.Help() - // log.Fatalln(err) - // } - // cliConfig.RemotePeers = append(cliConfig.RemotePeers, *p) - // } - - // //Iterate backends - // for i := range cliBackends { - // b, err := kubevip.ParseBackendConfig(cliBackends[i]) - // if err != nil { - // cmd.Help() - // log.Fatalln(err) - // } - // cliConfigLB.Backends = append(cliConfigLB.Backends, *b) - // } - - // Add the basic Load-Balancer to the configuration - cliConfig.LoadBalancers = append(cliConfig.LoadBalancers, cliConfigLB) - - err := cliConfig.ParseFlags(cliLocalPeer, cliRemotePeers, cliBackends) - if err != nil { - cmd.Help() - log.Fatalln(err) - } - - err = kubevip.ParseEnvironment(&cliConfig) - if err != nil { - cmd.Help() - log.Fatalln(err) - } - - cliConfig.PrintConfig() - }, -} - -var kubeVipSampleManifest = &cobra.Command{ - Use: "manifest", - Short: "Generate a Sample kubernetes manifest", - Run: func(cmd *cobra.Command, args []string) { - // Generate the sample manifest specification - p := &appv1.Pod{ - TypeMeta: metav1.TypeMeta{ - Kind: "Pod", - APIVersion: "v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "kube-vip", - Namespace: "kube-system", - }, - Spec: appv1.PodSpec{ - Containers: []appv1.Container{ - { - Name: "kube-vip", - Image: fmt.Sprintf("docker.io/plndr/kube-vip:%s", Release.Version), - SecurityContext: &appv1.SecurityContext{ - Capabilities: &appv1.Capabilities{ - Add: []appv1.Capability{ - "NET_ADMIN", - "SYS_TIME", - }, - }, - }, - Args: []string{ - "start", - "-c", - "/etc/kube-vip/config.yaml", - }, - VolumeMounts: []appv1.VolumeMount{ - { - Name: "config", - MountPath: "/etc/kube-vip/", - }, - }, - }, - }, - Volumes: []appv1.Volume{ - { - Name: "config", - VolumeSource: appv1.VolumeSource{ - HostPath: &appv1.HostPathVolumeSource{ - Path: "/etc/kube-vip/", - }, - }, - }, - }, - HostNetwork: true, - }, - } - - b, _ := yaml.Marshal(p) - fmt.Printf(string(b)) - }, -} diff --git a/cmd/kube-vip-kubeadm.go b/cmd/kube-vip-kubeadm.go index 485c3f9b..9db4b5aa 100644 --- a/cmd/kube-vip-kubeadm.go +++ b/cmd/kube-vip-kubeadm.go @@ -1,18 +1,12 @@ package cmd import ( - "context" "fmt" - "net" "os" - "github.com/plunder-app/kube-vip/pkg/kubevip" + "github.com/kube-vip/kube-vip/pkg/kubevip" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/clientcmd" ) // kubeadm adds two subcommands for managing a vip during a kubeadm init/join @@ -29,7 +23,7 @@ var kubeKubeadm = &cobra.Command{ Use: "kubeadm", Short: "Kubeadm functions", Run: func(cmd *cobra.Command, args []string) { - cmd.Help() + _ = cmd.Help() // TODO - A load of text detailing what's actually happening }, } @@ -43,20 +37,24 @@ var kubeKubeadmInit = &cobra.Command{ log.SetLevel(log.Level(logLevel)) initConfig.LoadBalancers = append(initConfig.LoadBalancers, initLoadBalancer) // TODO - A load of text detailing what's actually happening - kubevip.ParseEnvironment(&initConfig) + err := kubevip.ParseEnvironment(&initConfig) + if err != nil { + log.Fatalf("Error parsing environment from config: %v", err) + } + // TODO - check for certain things VIP/interfaces if initConfig.Interface == "" { - cmd.Help() + _ = cmd.Help() log.Fatalln("No interface is specified for kube-vip to bind to") } if initConfig.VIP == "" && initConfig.Address == "" { - cmd.Help() + _ = cmd.Help() log.Fatalln("No address is specified for kube-vip to expose services on") } - cfg := kubevip.GeneratePodManifestFromConfig(&initConfig, Release.Version) - fmt.Println(cfg) + cfg := kubevip.GeneratePodManifestFromConfig(&initConfig, Release.Version, inCluster) + fmt.Println(cfg) // output manifest to stdout }, } @@ -69,15 +67,19 @@ var kubeKubeadmJoin = &cobra.Command{ initConfig.LoadBalancers = append(initConfig.LoadBalancers, initLoadBalancer) // TODO - A load of text detailing what's actually happening - kubevip.ParseEnvironment(&initConfig) + err := kubevip.ParseEnvironment(&initConfig) + if err != nil { + log.Fatalf("Error parsing environment from config: %v", err) + } + // TODO - check for certain things VIP/interfaces if initConfig.Interface == "" { - cmd.Help() + _ = cmd.Help() log.Fatalln("No interface is specified for kube-vip to bind to") } if initConfig.VIP == "" && initConfig.Address == "" { - cmd.Help() + _ = cmd.Help() log.Fatalln("No address is specified for kube-vip to expose services on") } @@ -85,77 +87,7 @@ var kubeKubeadmJoin = &cobra.Command{ log.Fatalf("Unable to find file [%s]", kubeConfigPath) } - // We will use kubeconfig in order to find all the master nodes - // use the current context in kubeconfig - config, err := clientcmd.BuildConfigFromFlags("", kubeConfigPath) - if err != nil { - log.Fatal(err.Error()) - } - - // create the clientset - clientset, err := kubernetes.NewForConfig(config) - if err != nil { - log.Fatal(err.Error()) - } - - opts := metav1.ListOptions{} - opts.LabelSelector = "node-role.kubernetes.io/master" - nodes, err := clientset.CoreV1().Nodes().List(context.TODO(), opts) - - // Iterate over all nodes that are masters and find the details to build a peer list - for x := range nodes.Items { - // Get hostname and address - var nodeAddress, nodeHostname string - for y := range nodes.Items[x].Status.Addresses { - switch nodes.Items[x].Status.Addresses[y].Type { - case corev1.NodeHostName: - nodeHostname = nodes.Items[x].Status.Addresses[y].Address - case corev1.NodeInternalIP: - nodeAddress = nodes.Items[x].Status.Addresses[y].Address - } - } - - newPeer, err := kubevip.ParsePeerConfig(fmt.Sprintf("%s:%s:%d", nodeHostname, nodeAddress, 10000)) - if err != nil { - panic(err.Error()) - } - initConfig.RemotePeers = append(initConfig.RemotePeers, *newPeer) - - } - // Generate manifest and print - cfg := kubevip.GeneratePodManifestFromConfig(&initConfig, Release.Version) - fmt.Println(cfg) + cfg := kubevip.GeneratePodManifestFromConfig(&initConfig, Release.Version, inCluster) + fmt.Println(cfg) // output manifest to stdout }, } - -func autoGenLocalPeer() (*kubevip.RaftPeer, error) { - // hostname // address // defaultport - h, err := os.Hostname() - if err != nil { - return nil, err - } - - var a string - addrs, err := net.InterfaceAddrs() - if err != nil { - return nil, err - } - for _, address := range addrs { - // check the address type and if it is not a loopback the display it - if ipnet, ok := address.(*net.IPNet); ok && !ipnet.IP.IsLoopback() { - if ipnet.IP.To4() != nil { - a = ipnet.IP.String() - break - } - } - } - if a == "" { - return nil, fmt.Errorf("Unable to find local address") - } - return &kubevip.RaftPeer{ - ID: h, - Address: a, - Port: 10000, - }, nil - -} diff --git a/cmd/kube-vip-manifests.go b/cmd/kube-vip-manifests.go index 77666e79..089000c8 100644 --- a/cmd/kube-vip-manifests.go +++ b/cmd/kube-vip-manifests.go @@ -3,9 +3,10 @@ package cmd import ( "fmt" - "github.com/plunder-app/kube-vip/pkg/kubevip" + "github.com/kube-vip/kube-vip/pkg/kubevip" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" + "gopkg.in/yaml.v2" ) // manifests will eventually deprecate the kubeadm set of subcommands @@ -13,17 +14,23 @@ import ( // - Pod spec manifest, mainly used for a static pod (kubeadm) // - Daemonset manifest, mainly used to run kube-vip as a deamonset within Kubernetes (k3s/rke) +// var inCluster bool +var taint bool + func init() { + kubeManifest.PersistentFlags().BoolVar(&inCluster, "inCluster", false, "Use the incluster token to authenticate to Kubernetes") + kubeManifestDaemon.PersistentFlags().BoolVar(&taint, "taint", false, "Taint the manifest for only running on control planes") kubeManifest.AddCommand(kubeManifestPod) kubeManifest.AddCommand(kubeManifestDaemon) + kubeManifest.AddCommand(kubeManifestRbac) } var kubeManifest = &cobra.Command{ Use: "manifest", Short: "Manifest functions", Run: func(cmd *cobra.Command, args []string) { - cmd.Help() + _ = cmd.Help() // TODO - A load of text detailing what's actually happening }, } @@ -36,20 +43,18 @@ var kubeManifestPod = &cobra.Command{ log.SetLevel(log.Level(logLevel)) initConfig.LoadBalancers = append(initConfig.LoadBalancers, initLoadBalancer) // TODO - A load of text detailing what's actually happening - kubevip.ParseEnvironment(&initConfig) - // TODO - check for certain things VIP/interfaces - if initConfig.Interface == "" { - cmd.Help() - log.Fatalln("No interface is specified for kube-vip to bind to") + if err := kubevip.ParseEnvironment(&initConfig); err != nil { + log.Fatalf("Error parsing environment from config: %v", err) } - if initConfig.VIP == "" { - cmd.Help() + // The control plane has a requirement for a VIP being specified + if initConfig.EnableControlPlane && (initConfig.VIP == "" && initConfig.Address == "" && !initConfig.DDNS) { + _ = cmd.Help() log.Fatalln("No address is specified for kube-vip to expose services on") } - cfg := kubevip.GeneratePodManifestFromConfig(&initConfig, Release.Version) - fmt.Println(cfg) + cfg := kubevip.GeneratePodManifestFromConfig(&initConfig, Release.Version, inCluster) + fmt.Println(cfg) // output manifest to stdout }, } @@ -61,19 +66,42 @@ var kubeManifestDaemon = &cobra.Command{ log.SetLevel(log.Level(logLevel)) initConfig.LoadBalancers = append(initConfig.LoadBalancers, initLoadBalancer) // TODO - A load of text detailing what's actually happening - kubevip.ParseEnvironment(&initConfig) - // TODO - check for certain things VIP/interfaces - if initConfig.Interface == "" { - cmd.Help() - log.Fatalln("No interface is specified for kube-vip to bind to") + if err := kubevip.ParseEnvironment(&initConfig); err != nil { + log.Fatalf("error parsing environment config: %v", err) } - if initConfig.VIP == "" { - cmd.Help() + // TODO - check for certain things VIP/interfaces + + // The control plane has a requirement for a VIP being specified + if initConfig.EnableControlPlane && (initConfig.VIP == "" && initConfig.Address == "" && !initConfig.DDNS) { + _ = cmd.Help() log.Fatalln("No address is specified for kube-vip to expose services on") } - cfg := kubevip.GenerateDeamonsetManifestFromConfig(&initConfig, Release.Version) - fmt.Println(cfg) + cfg := kubevip.GenerateDaemonsetManifestFromConfig(&initConfig, Release.Version, inCluster, taint) + fmt.Println(cfg) // output manifest to stdout + }, +} + +var kubeManifestRbac = &cobra.Command{ + Use: "rbac", + Short: "Generate an RBAC Manifest", + Run: func(cmd *cobra.Command, args []string) { + // Set the logging level for all subsequent functions + log.SetLevel(log.Level(logLevel)) + initConfig.LoadBalancers = append(initConfig.LoadBalancers, initLoadBalancer) + // TODO - A load of text detailing what's actually happening + if err := kubevip.ParseEnvironment(&initConfig); err != nil { + log.Fatalf("Error parsing environment from config: %v", err) + } + + // The control plane has a requirement for a VIP being specified + if initConfig.EnableControlPlane && (initConfig.VIP == "" && initConfig.Address == "" && !initConfig.DDNS) { + _ = cmd.Help() + log.Fatalln("No address is specified for kube-vip to expose services on") + } + cfg := kubevip.GenerateSA() + b, _ := yaml.Marshal(cfg) + fmt.Println(string(b)) // output manifest to stdout }, } diff --git a/cmd/kube-vip-start.go b/cmd/kube-vip-start.go index 41e5d0a8..51f34e7f 100644 --- a/cmd/kube-vip-start.go +++ b/cmd/kube-vip-start.go @@ -1,13 +1,9 @@ package cmd import ( - "context" - "os" - "os/signal" - - "github.com/plunder-app/kube-vip/pkg/cluster" - "github.com/plunder-app/kube-vip/pkg/kubevip" - "github.com/plunder-app/kube-vip/pkg/vip" + "github.com/kube-vip/kube-vip/pkg/bgp" + "github.com/kube-vip/kube-vip/pkg/cluster" + "github.com/kube-vip/kube-vip/pkg/kubevip" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" ) @@ -16,7 +12,6 @@ import ( var startConfig kubevip.Config var startConfigLB kubevip.LoadBalancer var startLocalPeer, startKubeConfigPath string -var startRemotePeers, startBackends []string var inCluster bool func init() { @@ -28,24 +23,27 @@ func init() { kubeVipStart.Flags().StringVar(&startConfig.Interface, "interface", "eth0", "Name of the interface to bind to") kubeVipStart.Flags().StringVar(&startConfig.VIP, "vip", "192.168.0.1", "The Virtual IP address") kubeVipStart.Flags().StringVar(&startConfig.Address, "address", "", "an address (IP or DNS name) to use as a VIP") + kubeVipStart.Flags().IntVar(&startConfig.Port, "port", 6443, "listen port for the VIP") + kubeVipStart.Flags().BoolVar(&startConfig.DDNS, "ddns", false, "use Dynamic DNS + DHCP to allocate VIP for address") kubeVipStart.Flags().BoolVar(&startConfig.SingleNode, "singleNode", false, "Start this instance as a single node") kubeVipStart.Flags().BoolVar(&startConfig.StartAsLeader, "startAsLeader", false, "Start this instance as the cluster leader") - kubeVipStart.Flags().BoolVar(&startConfig.GratuitousARP, "arp", false, "Use ARP broadcasts to improve VIP re-allocations") + kubeVipStart.Flags().BoolVar(&startConfig.EnableARP, "arp", false, "Use ARP broadcasts to improve VIP re-allocations") kubeVipStart.Flags().StringVar(&startLocalPeer, "localPeer", "server1:192.168.0.1:10000", "Settings for this peer, format: id:address:port") - kubeVipStart.Flags().StringSliceVar(&startRemotePeers, "remotePeers", []string{"server2:192.168.0.2:10000", "server3:192.168.0.3:10000"}, "Comma seperated remotePeers, format: id:address:port") + // Load Balancer flags kubeVipStart.Flags().BoolVar(&startConfigLB.BindToVip, "lbBindToVip", false, "Bind example load balancer to VIP") kubeVipStart.Flags().StringVar(&startConfigLB.Type, "lbType", "tcp", "Type of load balancer instance (TCP/HTTP)") kubeVipStart.Flags().StringVar(&startConfigLB.Name, "lbName", "Example Load Balancer", "The name of a load balancer instance") kubeVipStart.Flags().IntVar(&startConfigLB.Port, "lbPort", 8080, "Port that load balancer will expose on") - kubeVipStart.Flags().IntVar(&startConfigLB.BackendPort, "lbBackEndPort", 6443, "A port that all backends may be using (optional)") - kubeVipStart.Flags().StringSliceVar(&startBackends, "lbBackends", []string{"192.168.0.1:8080", "192.168.0.2:8080"}, "Comma seperated backends, format: address:port") + kubeVipStart.Flags().StringVar(&startConfigLB.ForwardingMethod, "lbForwardingMethod", "local", "The forwarding method of a load balancer instance") // Cluster configuration kubeVipStart.Flags().StringVar(&startKubeConfigPath, "kubeConfig", "/etc/kubernetes/admin.conf", "The path of a kubernetes configuration file") kubeVipStart.Flags().BoolVar(&inCluster, "inCluster", false, "Use the incluster token to authenticate to Kubernetes") kubeVipStart.Flags().BoolVar(&startConfig.EnableLeaderElection, "leaderElection", false, "Use the Kubernetes leader election mechanism for clustering") + // This sets the namespace that the lock should exist in + kubeVipStart.Flags().StringVarP(&startConfig.Namespace, "namespace", "n", "kube-system", "The configuration map defined within the cluster") } var kubeVipStart = &cobra.Command{ @@ -58,70 +56,59 @@ var kubeVipStart = &cobra.Command{ // If a configuration file is loaded, then it will overwrite flags - if configPath != "" { - c, err := kubevip.OpenConfig(configPath) - if err != nil { - log.Fatalf("%v", err) - } - startConfig = *c - } - // parse environment variables, these will overwrite anything loaded or flags err = kubevip.ParseEnvironment(&startConfig) if err != nil { log.Fatalln(err) } + if startConfig.LeaderElectionType == "etcd" { + log.Fatalln("Leader election with etcd not supported in start command, use manager") + } + newCluster, err := cluster.InitCluster(&startConfig, disableVIP) if err != nil { log.Fatalf("%v", err) } - - // start the dns updater if the address flag is used and the address isn't an IP - if startConfig.Address != "" && !vip.IsIP(startConfig.Address) { - log.Infof("starting the DNS updater for the address %s", startConfig.Address) - - ipUpdater := vip.NewIPUpdater(startConfig.Address, newCluster.Network) - - ipUpdater.Run(context.Background()) - } - + var bgpServer *bgp.Server if startConfig.SingleNode { // If the Virtual IP isn't disabled then create the netlink configuration // Start a single node cluster - newCluster.StartSingleNode(&startConfig, disableVIP) + if err := newCluster.StartSingleNode(&startConfig, disableVIP); err != nil { + log.Errorf("error starting single node: %v", err) + } } else { if disableVIP { log.Fatalln("Cluster mode requires the Virtual IP to be enabled, use single node with no VIP") } if startConfig.EnableLeaderElection { - cm, err := cluster.NewManager(startKubeConfigPath, inCluster) + cm, err := cluster.NewManager(startKubeConfigPath, inCluster, startConfig.Port) if err != nil { log.Fatalf("%v", err) } - // Leader Cluster will block - err = newCluster.StartLeaderCluster(&startConfig, cm) - if err != nil { - log.Fatalf("%v", err) + if startConfig.EnableBGP { + log.Info("Starting the BGP server to advertise VIP routes to VGP peers") + bgpServer, err = bgp.NewBGPServer(&startConfig.BGPConfig, nil) + if err != nil { + log.Fatalf("%v", err) + } + + // Defer a function to check if the bgpServer has been created and if so attempt to close it + defer func() { + if bgpServer != nil { + bgpServer.Close() + } + }() } - } else { - // // Start a multi-node (raft) cluster, this doesn't block so will wait on signal - err = newCluster.StartRaftCluster(&startConfig) + // Leader Cluster will block + err = newCluster.StartCluster(&startConfig, cm, bgpServer) if err != nil { log.Fatalf("%v", err) } - signalChan := make(chan os.Signal, 1) - signal.Notify(signalChan, os.Interrupt) - - <-signalChan - - newCluster.Stop() } - } - }, } diff --git a/cmd/kube-vip.go b/cmd/kube-vip.go index 02f46644..b37c51f3 100644 --- a/cmd/kube-vip.go +++ b/cmd/kube-vip.go @@ -1,31 +1,49 @@ package cmd import ( + "context" "fmt" + "net/http" "os" - "strconv" + "strings" + "time" - "github.com/plunder-app/kube-vip/pkg/kubevip" - "github.com/plunder-app/kube-vip/pkg/service" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" + "github.com/vishvananda/netlink" + + "github.com/kube-vip/kube-vip/pkg/equinixmetal" + "github.com/kube-vip/kube-vip/pkg/kubevip" + "github.com/kube-vip/kube-vip/pkg/manager" + "github.com/kube-vip/kube-vip/pkg/vip" ) // Path to the configuration file var configPath string +// Path to the configuration file +// var namespace string + // Disable the Virtual IP (bind to the existing network stack) var disableVIP bool +// Disable the Virtual IP (bind to the existing network stack) +// var controlPlane bool + // Run as a load balancer service (within a pod / kubernetes) -var serviceArp bool +// var serviceArp bool // ConfigMap name within a Kubernetes cluster var configMap string -// Configure the level of loggin +// Configure the level of logging var logLevel uint32 +// Provider Config +var providerConfig string + // Release - this struct contains the release information populated when building kube-vip var Release struct { Version string @@ -33,8 +51,10 @@ var Release struct { } // Structs used via the various subcommands -var initConfig kubevip.Config -var initLoadBalancer kubevip.LoadBalancer +var ( + initConfig kubevip.Config + initLoadBalancer kubevip.LoadBalancer +) // Points to a kubernetes configuration file var kubeConfigPath string @@ -45,69 +65,105 @@ var kubeVipCmd = &cobra.Command{ } func init() { - - localpeer, err := autoGenLocalPeer() - if err != nil { - log.Fatalln(err) - } - initConfig.LocalPeer = *localpeer - //initConfig.Peers = append(initConfig.Peers, *localpeer) + // Basic flags kubeVipCmd.PersistentFlags().StringVar(&initConfig.Interface, "interface", "", "Name of the interface to bind to") + kubeVipCmd.PersistentFlags().StringVar(&initConfig.ServicesInterface, "serviceInterface", "", "Name of the interface to bind to (for services)") kubeVipCmd.PersistentFlags().StringVar(&initConfig.VIP, "vip", "", "The Virtual IP address") - kubeVipCmd.PersistentFlags().StringVar(&startConfig.Address, "address", "", "an address (IP or DNS name) to use as a VIP") - kubeVipCmd.PersistentFlags().StringVar(&initConfig.VIPCIDR, "cidr", "", "The CIDR range for the virtual IP address") - kubeVipCmd.PersistentFlags().BoolVar(&initConfig.GratuitousARP, "arp", true, "Enable Arp for Vip changes") + kubeVipCmd.PersistentFlags().StringVar(&initConfig.VIPSubnet, "vipSubnet", "", "The Virtual IP address subnet e.g. /32 /24 /8 etc..") + + kubeVipCmd.PersistentFlags().StringVar(&initConfig.VIPCIDR, "cidr", "32", "The CIDR range for the virtual IP address") // todo: deprecate + + kubeVipCmd.PersistentFlags().StringVar(&initConfig.Address, "address", "", "an address (IP or DNS name) to use as a VIP") + kubeVipCmd.PersistentFlags().IntVar(&initConfig.Port, "port", 6443, "Port for the VIP") + kubeVipCmd.PersistentFlags().BoolVar(&initConfig.EnableARP, "arp", false, "Enable Arp for VIP changes") + kubeVipCmd.PersistentFlags().BoolVar(&initConfig.EnableWireguard, "wireguard", false, "Enable Wireguard for services VIPs") + kubeVipCmd.PersistentFlags().BoolVar(&initConfig.EnableRoutingTable, "table", false, "Enable Routing Table for services VIPs") + + // LoadBalancer flags + kubeVipCmd.PersistentFlags().BoolVar(&initConfig.EnableLoadBalancer, "enableLoadBalancer", false, "enable loadbalancing on the VIP with IPVS") + kubeVipCmd.PersistentFlags().IntVar(&initConfig.LoadBalancerPort, "lbPort", 6443, "loadbalancer port for the VIP") + kubeVipCmd.PersistentFlags().StringVar(&initConfig.LoadBalancerForwardingMethod, "lbForwardingMethod", "local", "loadbalancer forwarding method") + kubeVipCmd.PersistentFlags().BoolVar(&initConfig.DDNS, "ddns", false, "use Dynamic DNS + DHCP to allocate VIP for address") // Clustering type (leaderElection) kubeVipCmd.PersistentFlags().BoolVar(&initConfig.EnableLeaderElection, "leaderElection", false, "Use the Kubernetes leader election mechanism for clustering") + kubeVipCmd.PersistentFlags().StringVar(&initConfig.LeaderElectionType, "leaderElectionType", "kubernetes", "Defines the backend to run the leader election: kubernetes or etcd. Defaults to kubernetes.") + kubeVipCmd.PersistentFlags().StringVar(&initConfig.LeaseName, "leaseName", "plndr-cp-lock", "Name of the lease that is used for leader election") kubeVipCmd.PersistentFlags().IntVar(&initConfig.LeaseDuration, "leaseDuration", 5, "Length of time a Kubernetes leader lease can be held for") kubeVipCmd.PersistentFlags().IntVar(&initConfig.RenewDeadline, "leaseRenewDuration", 3, "Length of time a Kubernetes leader can attempt to renew its lease") kubeVipCmd.PersistentFlags().IntVar(&initConfig.RetryPeriod, "leaseRetry", 1, "Number of times the host will retry to hold a lease") - // Clustering type (raft) - kubeVipCmd.PersistentFlags().BoolVar(&initConfig.StartAsLeader, "startAsLeader", false, "Start this instance as the cluster leader") - kubeVipCmd.PersistentFlags().BoolVar(&initConfig.AddPeersAsBackends, "addPeersToLB", true, "Add raft peers to the load-balancer") - - // Packet flags - kubeVipCmd.PersistentFlags().BoolVar(&initConfig.EnablePacket, "packet", false, "This will use the Packet API (requires the token ENV) to update the EIP <-> VIP") - kubeVipCmd.PersistentFlags().StringVar(&initConfig.PacketAPIKey, "packetKey", "", "The API token for authenticating with the Packet API") - kubeVipCmd.PersistentFlags().StringVar(&initConfig.PacketProject, "packetProject", "", "The name of project already created within Packet") - - // Load Balancer flags - kubeVipCmd.PersistentFlags().BoolVar(&initConfig.EnableLoadBalancer, "lbEnable", false, "Enable a load-balancer on the VIP") - kubeVipCmd.PersistentFlags().BoolVar(&initLoadBalancer.BindToVip, "lbBindToVip", true, "Bind example load balancer to VIP") - kubeVipCmd.PersistentFlags().StringVar(&initLoadBalancer.Type, "lbType", "tcp", "Type of load balancer instance (TCP/HTTP)") - kubeVipCmd.PersistentFlags().StringVar(&initLoadBalancer.Name, "lbName", "Kubeadm Load Balancer", "The name of a load balancer instance") - kubeVipCmd.PersistentFlags().IntVar(&initLoadBalancer.Port, "lbPort", 6443, "Port that load balancer will expose on") - kubeVipCmd.PersistentFlags().IntVar(&initLoadBalancer.BackendPort, "lbBackEndPort", 6444, "A port that all backends may be using (optional)") + // Equinix Metal flags + kubeVipCmd.PersistentFlags().BoolVar(&initConfig.EnableMetal, "metal", false, "This will use the Equinix Metal API (requires the token ENV) to update the EIP <-> VIP") + kubeVipCmd.PersistentFlags().StringVar(&initConfig.MetalAPIKey, "metalKey", "", "The API token for authenticating with the Equinix Metal API") + kubeVipCmd.PersistentFlags().StringVar(&initConfig.MetalProject, "metalProject", "", "The name of project already created within Equinix Metal") + kubeVipCmd.PersistentFlags().StringVar(&initConfig.MetalProjectID, "metalProjectID", "", "The ID of project already created within Equinix Metal") + kubeVipCmd.PersistentFlags().StringVar(&initConfig.ProviderConfig, "provider-config", "", "The path to a provider configuration") // BGP flags kubeVipCmd.PersistentFlags().BoolVar(&initConfig.EnableBGP, "bgp", false, "This will enable BGP support within kube-vip") kubeVipCmd.PersistentFlags().StringVar(&initConfig.BGPConfig.RouterID, "bgpRouterID", "", "The routerID for the bgp server") + kubeVipCmd.PersistentFlags().StringVar(&initConfig.BGPConfig.SourceIF, "sourceIF", "", "The source interface for bgp peering (not to be used with sourceIP)") + kubeVipCmd.PersistentFlags().StringVar(&initConfig.BGPConfig.SourceIP, "sourceIP", "", "The source address for bgp peering (not to be used with sourceIF)") kubeVipCmd.PersistentFlags().Uint32Var(&initConfig.BGPConfig.AS, "localAS", 65000, "The local AS number for the bgp server") + kubeVipCmd.PersistentFlags().Uint64Var(&initConfig.BGPConfig.HoldTime, "bgpHoldTimer", 30, "The hold timer for all bgp peers (it defines the time a session is held)") + kubeVipCmd.PersistentFlags().Uint64Var(&initConfig.BGPConfig.KeepaliveInterval, "bgpKeepAliveInterval", 10, "The keepalive interval for all bgp peers (it defines the heartbeat of keepalive messages)") kubeVipCmd.PersistentFlags().StringVar(&initConfig.BGPPeerConfig.Address, "peerAddress", "", "The address of a BGP peer") kubeVipCmd.PersistentFlags().Uint32Var(&initConfig.BGPPeerConfig.AS, "peerAS", 65000, "The AS number for a BGP peer") + kubeVipCmd.PersistentFlags().StringVar(&initConfig.BGPPeerConfig.Password, "peerPass", "", "The md5 password for a BGP peer") + kubeVipCmd.PersistentFlags().BoolVar(&initConfig.BGPPeerConfig.MultiHop, "multihop", false, "This will enable BGP multihop support") + kubeVipCmd.PersistentFlags().StringSliceVar(&initConfig.BGPPeers, "bgppeers", []string{}, "Comma separated BGP Peer, format: address:as:password:multihop") + kubeVipCmd.PersistentFlags().StringVar(&initConfig.Annotations, "annotations", "", "Set Node annotations prefix for parsing") + + // Namespace for kube-vip + kubeVipCmd.PersistentFlags().StringVarP(&initConfig.Namespace, "namespace", "n", "kube-system", "The namespace for the configmap defined within the cluster") // Manage logging kubeVipCmd.PersistentFlags().Uint32Var(&logLevel, "log", 4, "Set the level of logging") // Service flags - kubeVipService.Flags().StringVarP(&configMap, "configMap", "c", "kube-vip", "The configuration map defined within the cluster") - kubeVipService.Flags().StringVarP(&service.Interface, "interface", "i", "eth0", "Name of the interface to bind to") - kubeVipService.Flags().BoolVar(&service.OutSideCluster, "OutSideCluster", false, "Start Controller outside of cluster") - kubeVipService.Flags().BoolVar(&service.EnableArp, "arp", false, "Use ARP broadcasts to improve VIP re-allocations") + kubeVipService.Flags().StringVarP(&configMap, "configMap", "c", "plndr", "The configuration map defined within the cluster") + + // Routing Table flags + kubeVipCmd.PersistentFlags().IntVar(&initConfig.RoutingTableID, "tableID", 198, "The routing table used for all table entries") + kubeVipCmd.PersistentFlags().IntVar(&initConfig.RoutingTableType, "tableType", 0, "The type of route that will be added to the routing table") + + // Behaviour flags + kubeVipCmd.PersistentFlags().BoolVar(&initConfig.EnableControlPlane, "controlplane", false, "Enable HA for control plane") + kubeVipCmd.PersistentFlags().BoolVar(&initConfig.DetectControlPlane, "autodetectcp", false, "Determine working address for control plane (from loopback)") + kubeVipCmd.PersistentFlags().BoolVar(&initConfig.EnableServices, "services", false, "Enable Kubernetes services") + + // Extended behaviour flags + kubeVipCmd.PersistentFlags().BoolVar(&initConfig.EnableServicesElection, "servicesElection", false, "Enable leader election per kubernetes service") + kubeVipCmd.PersistentFlags().BoolVar(&initConfig.LoadBalancerClassOnly, "lbClassOnly", false, "Enable load balancing only for services with LoadBalancerClass \"kube-vip.io/kube-vip-class\"") + kubeVipCmd.PersistentFlags().StringVar(&initConfig.LoadBalancerClassName, "lbClassName", "kube-vip.io/kube-vip-class", "Name of load balancer class for kube-VIP, defaults to \"kube-vip.io/kube-vip-class\"") + kubeVipCmd.PersistentFlags().BoolVar(&initConfig.EnableServiceSecurity, "onlyAllowTrafficServicePorts", false, "Only allow traffic to service ports, others will be dropped, defaults to false") + kubeVipCmd.PersistentFlags().BoolVar(&initConfig.EnableNodeLabeling, "enableNodeLabeling", false, "Enable leader node labeling with \"kube-vip.io/has-ip=\", defaults to false") + kubeVipCmd.PersistentFlags().StringVar(&initConfig.ServicesLeaseName, "servicesLeaseName", "plndr-svcs-lock", "Name of the lease that is used for leader election for services (in arp mode)") + kubeVipCmd.PersistentFlags().StringVar(&initConfig.DNSMode, "dnsMode", "first", "Name of the mode that DNS lookup will be performed (first, ipv4, ipv6, dual)") + kubeVipCmd.PersistentFlags().BoolVar(&initConfig.DisableServiceUpdates, "disableServiceUpdates", false, "If true, kube-vip will process services as usual, but will not update service's Status.LoadBalancer.Ingress slice") + kubeVipCmd.PersistentFlags().BoolVar(&initConfig.EnableEndpointSlices, "enableEndpointSlices", false, "If enabled, kube-vip will only advertise services, but will use EndpointSlices instead of endpoints to get IPs of Pods") + + // Prometheus HTTP Server + kubeVipCmd.PersistentFlags().StringVar(&initConfig.PrometheusHTTPServer, "prometheusHTTPServer", ":2112", "Host and port used to expose Prometheus metrics via an HTTP server") + + // Etcd + kubeVipCmd.PersistentFlags().StringVar(&initConfig.Etcd.CAFile, "etcdCACert", "", "Verify certificates of TLS-enabled secure servers using this CA bundle file") + kubeVipCmd.PersistentFlags().StringVar(&initConfig.Etcd.ClientCertFile, "etcdCert", "", "Identify secure client using this TLS certificate file") + kubeVipCmd.PersistentFlags().StringVar(&initConfig.Etcd.ClientKeyFile, "etcdKey", "", "Identify secure client using this TLS key file") + kubeVipCmd.PersistentFlags().StringSliceVar(&initConfig.Etcd.Endpoints, "etcdEndpoints", nil, "Etcd member endpoints") + + // Kubernetes client specific flags + + kubeVipCmd.PersistentFlags().StringVar(&initConfig.K8sConfigFile, "k8sConfigPath", "/etc/kubernetes/admin.conf", "Path to the configuration file used with the Kubernetes client") kubeVipCmd.AddCommand(kubeKubeadm) kubeVipCmd.AddCommand(kubeManifest) + kubeVipCmd.AddCommand(kubeVipManager) kubeVipCmd.AddCommand(kubeVipSample) kubeVipCmd.AddCommand(kubeVipService) kubeVipCmd.AddCommand(kubeVipStart) kubeVipCmd.AddCommand(kubeVipVersion) - - // Sample commands - kubeVipSample.AddCommand(kubeVipSampleConfig) - kubeVipSample.AddCommand(kubeVipSampleManifest) - } // Execute - starts the command parsing process @@ -132,7 +188,7 @@ var kubeVipSample = &cobra.Command{ Use: "sample", Short: "Generate a Sample configuration", Run: func(cmd *cobra.Command, args []string) { - cmd.Help() + _ = cmd.Help() }, } @@ -143,44 +199,219 @@ var kubeVipService = &cobra.Command{ // Set the logging level for all subsequent functions log.SetLevel(log.Level(logLevel)) - // User Environment variables as an option to make manifest clearer - envInterface := os.Getenv("vip_interface") - if envInterface != "" { - service.Interface = envInterface + // parse environment variables, these will overwrite anything loaded or flags + err := kubevip.ParseEnvironment(&initConfig) + if err != nil { + log.Fatalln(err) + } + + if err := initConfig.CheckInterface(); err != nil { + log.Fatalln(err) } + // User Environment variables as an option to make manifest clearer envConfigMap := os.Getenv("vip_configmap") - if envInterface != "" { + if envConfigMap != "" { configMap = envConfigMap } - envLog := os.Getenv("vip_loglevel") - if envLog != "" { - logLevel, err := strconv.Atoi(envLog) + // Define the new service manager + mgr, err := manager.New(configMap, &initConfig) + if err != nil { + log.Fatalf("%v", err) + } + + // Start the service manager, this will watch the config Map and construct kube-vip services for it + err = mgr.Start() + if err != nil { + log.Fatalf("%v", err) + } + }, +} + +var kubeVipManager = &cobra.Command{ + Use: "manager", + Short: "Start the kube-vip manager", + Run: func(cmd *cobra.Command, args []string) { + // parse environment variables, these will overwrite anything loaded or flags + err := kubevip.ParseEnvironment(&initConfig) + if err != nil { + log.Fatalln(err) + } + + // Set the logging level for all subsequent functions + log.SetLevel(log.Level(initConfig.Logging)) + + // Welome messages + log.Infof("Starting kube-vip.io [%s]", Release.Version) + log.Debugf("Build kube-vip.io [%s]", Release.Build) + + // start prometheus server + if initConfig.PrometheusHTTPServer != "" { + go servePrometheusHTTPServer(cmd.Context(), PrometheusHTTPServerConfig{ + Addr: initConfig.PrometheusHTTPServer, + }) + } + + // Determine the kube-vip mode + var mode string + if initConfig.EnableARP { + mode = "ARP" + } + + if initConfig.EnableBGP { + mode = "BGP" + } + + if initConfig.EnableWireguard { + mode = "Wireguard" + } + + if initConfig.EnableRoutingTable { + mode = "Routing Table" + } + + // Provide configuration to output/logging + log.Infof("namespace [%s], Mode: [%s], Features(s): Control Plane:[%t], Services:[%t]", initConfig.Namespace, mode, initConfig.EnableControlPlane, initConfig.EnableServices) + + // End if nothing is enabled + if !initConfig.EnableServices && !initConfig.EnableControlPlane { + log.Fatalln("no features are enabled") + } + + // If we're using wireguard then all traffic goes through the wg0 interface + if initConfig.EnableWireguard { + if initConfig.Interface == "" { + // Set the vip interface to the wireguard interface + initConfig.Interface = "wg0" + } + + log.Infof("configuring Wireguard networking") + l, err := netlink.LinkByName(initConfig.Interface) + if err != nil { + if strings.Contains(err.Error(), "Link not found") { + log.Warnf("interface \"%s\" doesn't exist, attempting to create wireguard interface", initConfig.Interface) + err = netlink.LinkAdd(&netlink.Wireguard{LinkAttrs: netlink.LinkAttrs{Name: initConfig.Interface}}) + if err != nil { + log.Fatalln(err) + } + l, err = netlink.LinkByName(initConfig.Interface) + if err != nil { + log.Fatalln(err) + } + } + } + err = netlink.LinkSetUp(l) if err != nil { - panic(fmt.Sprintf("Unable to parse environment variable [vip_loglevel], should be int")) + log.Fatalln(err) + } + + } else { // if we're not using Wireguard then we'll need to use an actual interface + // Check if the interface needs auto-detecting + if initConfig.Interface == "" { + log.Infof("No interface is specified for VIP in config, auto-detecting default Interface") + defaultIF, err := vip.GetDefaultGatewayInterface() + if err != nil { + _ = cmd.Help() + log.Fatalf("unable to detect default interface -> [%v]", err) + } + initConfig.Interface = defaultIF.Name + log.Infof("kube-vip will bind to interface [%s]", initConfig.Interface) + + go func() { + if err := vip.MonitorDefaultInterface(context.TODO(), defaultIF); err != nil { + log.Fatalf("crash: %s", err.Error()) + } + }() } - log.SetLevel(log.Level(logLevel)) + } + // Perform a check on th state of the interface + if err := initConfig.CheckInterface(); err != nil { + log.Fatalln(err) } - envArp := os.Getenv("vip_arp") - if envArp != "" { - arpBool, err := strconv.ParseBool(envArp) - if err != nil { - panic(fmt.Sprintf("Unable to parse environment variable [arp], should be bool (true/false)")) + // User Environment variables as an option to make manifest clearer + envConfigMap := os.Getenv("vip_configmap") + if envConfigMap != "" { + configMap = envConfigMap + } + + // If Equinix Metal is enabled and there is a provider configuration passed + if initConfig.EnableMetal { + if providerConfig != "" { + providerAPI, providerProject, err := equinixmetal.GetPacketConfig(providerConfig) + if err != nil { + log.Fatalf("%v", err) + } + initConfig.MetalAPIKey = providerAPI + initConfig.MetalProject = providerProject } - service.EnableArp = arpBool } // Define the new service manager - mgr, err := service.NewManager(configMap) + mgr, err := manager.New(configMap, &initConfig) if err != nil { - log.Fatalf("%v", err) + log.Fatalf("configuring new Manager error -> %v", err) } + + prometheus.MustRegister(mgr.PrometheusCollector()...) + // Start the service manager, this will watch the config Map and construct kube-vip services for it err = mgr.Start() if err != nil { - log.Fatalf("%v", err) + log.Fatalf("starting new Manager error -> %v", err) } }, } + +// PrometheusHTTPServerConfig defines the Prometheus server configuration. +type PrometheusHTTPServerConfig struct { + // Addr sets the http server address used to expose the metric endpoint + Addr string +} + +func servePrometheusHTTPServer(ctx context.Context, config PrometheusHTTPServerConfig) { + var err error + mux := http.NewServeMux() + mux.Handle("/metrics", promhttp.Handler()) + mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + _, _ = w.Write([]byte(` + kube-vip + +

kube-vip Metrics

+

Metrics

+ + `)) + }) + + srv := &http.Server{ + Addr: config.Addr, + Handler: mux, + ReadHeaderTimeout: 2 * time.Second, + } + + go func() { + if err = srv.ListenAndServe(); err != nil && err != http.ErrServerClosed { + log.Fatalf("listen:%+s\n", err) + } + }() + + log.Printf("prometheus HTTP server started") + + <-ctx.Done() + + log.Printf("prometheus HTTP server stopped") + + ctxShutDown, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer func() { + cancel() + }() + + if err = srv.Shutdown(ctxShutDown); err != nil { + log.Fatalf("server Shutdown Failed:%+s", err) + } + + if err == http.ErrServerClosed { + err = nil + } +} diff --git a/demo/README.md b/demo/README.md new file mode 100644 index 00000000..10595094 --- /dev/null +++ b/demo/README.md @@ -0,0 +1,46 @@ +# Demo client-server + +This contains some example code to determine how long "failovers" are taking within kube-vip, the server component should live within the cluster and the client should be externally. + +## Deploy the server + +Simply apply the manifest to a working cluster that has kube-vip deployed: + +``` +kubectl apply -f ./demo/server/deploy.yaml +``` + +Retrieve the loadBalancer IP that is fronting the service: + +``` +kubectl get svc demo-service +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +demo-service LoadBalancer 10.104.18.147 192.168.0.217 10002:32529/UDP 117m +``` + +## Connect the client + +From elsewhere, clone the kube-vip repository and connect the client to the server endpoint (loadBalancer IP) with the following command: + +``` +go run ./demo/client/main.go -address= +``` + +You will only see output when the client has reconcilled the connection to a pod beneath the service, where it will print the timestamp to reconnection along with the time in milliseconds it took: + +``` +15:58:35.916952 3008 +15:58:45.947506 2005 +15:58:57.983151 3007 +15:59:08.013450 2005 +15:59:20.046491 3008 +15:59:30.076341 2507 +15:59:42.110747 3008 +``` + +## Kill some pods to test + +On a machine or control plane that has `kubectl` and has the credentials to speak to the cluster we will run a command to find the demo pod and kill it every 10 seconds: + +`while true ; do kubectl delete pod $(kubectl get pods | grep -v NAME | grep vip| awk '{ print $1 }'); sleep 10; done` + diff --git a/demo/client/main.go b/demo/client/main.go new file mode 100644 index 00000000..2a4e9e9c --- /dev/null +++ b/demo/client/main.go @@ -0,0 +1,71 @@ +package main + +import ( + "bufio" + "flag" + "fmt" + "net" + "time" +) + +const udpdata = "a3ViZS12aXAK=kube-vip" + +func main() { + address := flag.String("address", "127.0.0.1", "The address of the server") + port := flag.Int("port", 10002, "the port of the server") + interval := flag.Float64("interval", 1000, "Interval in milliseconds") + flag.Parse() + var errorTime time.Time + var errorOccurred bool + for { + p := make([]byte, 2048) + conn, err := net.Dial("udp", fmt.Sprintf("%s:%d", *address, *port)) + if err != nil { + if !errorOccurred { + errorTime = time.Now() + errorOccurred = true + } + continue + } + + err = conn.SetDeadline(time.Now().Add(time.Duration(*interval) * time.Millisecond)) + if err != nil { + //fmt.Printf("Connectivity error [%v]", err) + if !errorOccurred { + errorTime = time.Now() + errorOccurred = true + } + if err = conn.Close(); err != nil { + fmt.Printf("Error closing connection [%v]", err) + } + continue + } + + _, err = fmt.Fprint(conn, udpdata) + if err != nil { + fmt.Printf("Error writing data [%v]", err) + } + + _, err = bufio.NewReader(conn).Read(p) + if err != nil { + //fmt.Printf("read error %v\n", err) + if !errorOccurred { + errorTime = time.Now() + errorOccurred = true + } + if err = conn.Close(); err != nil { + fmt.Printf("Error closing connection [%v]", err) + } + continue + } + time.Sleep(time.Duration(*interval) * time.Millisecond) + if errorOccurred { + finishTime := time.Since(errorTime) + //fmt.Printf("connectivity reconciled in %dms\n", finishTime.Milliseconds()) + //t :=time.Now().Format("15:04:05.000000") + fmt.Printf("%s %d\n", time.Now().Format("15:04:05.000000"), finishTime.Milliseconds()) + + errorOccurred = false + } + } +} diff --git a/demo/go.mod b/demo/go.mod deleted file mode 100644 index a9427e42..00000000 --- a/demo/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/plunder-app/kube-vip/demo - -go 1.13 diff --git a/demo/Dockerfile b/demo/server/Dockerfile similarity index 90% rename from demo/Dockerfile rename to demo/server/Dockerfile index a11a5074..96e85ad6 100644 --- a/demo/Dockerfile +++ b/demo/server/Dockerfile @@ -1,6 +1,6 @@ # syntax=docker/dockerfile:experimental -FROM golang:1.13-alpine as dev +FROM golang:1.19-alpine as dev RUN apk add --no-cache git ca-certificates RUN adduser -D appuser COPY main.go /src/ @@ -13,4 +13,4 @@ RUN --mount=type=cache,sharing=locked,id=gomod,target=/go/pkg/mod/cache \ FROM scratch COPY --from=dev /src/demo / -CMD ["/demo"] \ No newline at end of file +CMD ["/demo"] diff --git a/demo/Makefile b/demo/server/Makefile similarity index 94% rename from demo/Makefile rename to demo/server/Makefile index 216c881a..d02d5e3d 100644 --- a/demo/Makefile +++ b/demo/server/Makefile @@ -45,7 +45,7 @@ fmt: @gofmt -l -w $(SRC) docker: - @docker buildx build --platform linux/amd64,linux/arm64,linux/arm/v7 --push -t $(REPOSITORY)/$(TARGET):$(DOCKERTAG) . + @docker buildx build --platform linux/amd64,linux/arm64,linux/arm/v7,linux/ppc64le,linux/s390x --push -t $(REPOSITORY)/$(TARGET):$(DOCKERTAG) . @echo New Multi Architecture Docker image created simplify: diff --git a/demo/deploy.yaml b/demo/server/deploy.yaml similarity index 58% rename from demo/deploy.yaml rename to demo/server/deploy.yaml index cb29e4e4..19327764 100644 --- a/demo/deploy.yaml +++ b/demo/server/deploy.yaml @@ -6,7 +6,7 @@ metadata: app: kube-vip-demo name: kube-vip-demo spec: - replicas: 3 + replicas: 1 selector: matchLabels: app: kube-vip-demo @@ -30,4 +30,25 @@ spec: ports: - containerPort: 10001 - containerPort: 10002 -status: {} \ No newline at end of file +status: {} +--- +apiVersion: v1 +kind: Service +metadata: + name: demo-service + namespace: default + labels: + app: demo-service + annotations: + kube-vip.io/egress: "true" +spec: + type: LoadBalancer + # "Local" preserves the client source IP and avoids a second hop for + # LoadBalancer and NodePort + externalTrafficPolicy: Local + ports: + - name: demo-udp + port: 10002 + protocol: UDP + selector: + app: kube-vip-demo \ No newline at end of file diff --git a/demo/server/go.mod b/demo/server/go.mod new file mode 100644 index 00000000..02df02d9 --- /dev/null +++ b/demo/server/go.mod @@ -0,0 +1,3 @@ +module github.com/kube-vip/kube-vip/demo + +go 1.13 diff --git a/demo/main.go b/demo/server/main.go similarity index 97% rename from demo/main.go rename to demo/server/main.go index 24aab371..89c2928d 100644 --- a/demo/main.go +++ b/demo/server/main.go @@ -60,7 +60,7 @@ func main() { fmt.Println("error: ", err) } - ServerConn.WriteTo(buf[0:n], addr) + ServerConn.WriteTo(buf[0:n]) } } } diff --git a/example/deploy/0.1.2.yaml b/example/deploy/0.1.2.yaml index 4e5c5a2f..2f6caa31 100644 --- a/example/deploy/0.1.2.yaml +++ b/example/deploy/0.1.2.yaml @@ -28,7 +28,7 @@ spec: - kube-vip-cluster topologyKey: "kubernetes.io/hostname" containers: - - image: plndr/kube-vip:0.1.2 + - image: ghcr.io/kube-vip/kube-vip:0.3.7 imagePullPolicy: Always name: kube-vip command: diff --git a/example/deploy/0.1.3.yaml b/example/deploy/0.1.3.yaml index a693e6a1..b76bf985 100644 --- a/example/deploy/0.1.3.yaml +++ b/example/deploy/0.1.3.yaml @@ -58,7 +58,7 @@ spec: - kube-vip-cluster topologyKey: "kubernetes.io/hostname" containers: - - image: plndr/kube-vip:0.1.3 + - image: ghcr.io/kube-vip/kube-vip:0.3.7 imagePullPolicy: Always name: kube-vip command: diff --git a/example/deploy/0.3.5.yaml b/example/deploy/0.3.5.yaml new file mode 100644 index 00000000..a93da17f --- /dev/null +++ b/example/deploy/0.3.5.yaml @@ -0,0 +1,55 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + creationTimestamp: null + name: kube-vip-ds + namespace: kube-system +spec: + selector: + matchLabels: + name: kube-vip-ds + template: + metadata: + creationTimestamp: null + labels: + name: kube-vip-ds + spec: + containers: + - args: + - manager + env: + - name: vip_arp + value: "true" + - name: vip_interface + value: eth0 + - name: port + value: "6443" + - name: vip_cidr + value: "32" + - name: svc_enable + value: "true" + - name: vip_startleader + value: "false" + - name: vip_addpeerstolb + value: "true" + - name: vip_localpeer + value: ip-172-20-40-207:172.20.40.207:10000 + - name: vip_address + image: plndr/kube-vip:v0.3.5 + imagePullPolicy: Always + name: kube-vip + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + - SYS_TIME + hostNetwork: true + serviceAccountName: kube-vip + updateStrategy: {} +status: + currentNumberScheduled: 0 + desiredNumberScheduled: 0 + numberMisscheduled: 0 + numberReady: 0 diff --git a/go.mod b/go.mod index ec9e3421..02b6f8fd 100644 --- a/go.mod +++ b/go.mod @@ -1,60 +1,134 @@ -module github.com/plunder-app/kube-vip +module github.com/kube-vip/kube-vip -go 1.14 +go 1.21 + +toolchain go1.21.3 require ( - github.com/armon/go-metrics v0.3.4 // indirect + github.com/cloudflare/ipvs v0.10.1 github.com/davecgh/go-spew v1.1.1 + github.com/florianl/go-conntrack v0.4.0 + github.com/golang/protobuf v1.5.3 + github.com/google/go-cmp v0.6.0 + github.com/insomniacslk/dhcp v0.0.0-20230731140434-0f9eb93a696c + github.com/jpillora/backoff v1.0.0 + github.com/kamhlos/upnp v0.0.0-20210324072331-5661950dff08 + github.com/mdlayher/ndp v1.0.1 + github.com/onsi/ginkgo/v2 v2.15.0 + github.com/onsi/gomega v1.30.0 + github.com/osrg/gobgp/v3 v3.22.0 + github.com/packethost/packngo v0.31.0 + github.com/pkg/errors v0.9.1 + github.com/prometheus/client_golang v1.18.0 + github.com/sirupsen/logrus v1.9.3 + github.com/spf13/cobra v1.8.0 + github.com/stretchr/testify v1.8.4 + github.com/vishvananda/netlink v1.2.1-beta.2 + go.etcd.io/etcd/api/v3 v3.5.11 + go.etcd.io/etcd/client/pkg/v3 v3.5.11 + go.etcd.io/etcd/client/v3 v3.5.11 + go.uber.org/zap v1.26.0 + golang.org/x/exp v0.0.0-20231005195138-3e424a577f31 + golang.org/x/sys v0.16.0 + golang.zx2c4.com/wireguard/wgctrl v0.0.0-20230429144221-925a1e7659e6 + gopkg.in/yaml.v2 v2.4.0 + k8s.io/api v0.29.1 + k8s.io/apimachinery v0.29.1 + k8s.io/client-go v0.29.1 + k8s.io/klog/v2 v2.120.1 + sigs.k8s.io/kind v0.20.0 + sigs.k8s.io/yaml v1.4.0 +) + +require ( + github.com/BurntSushi/toml v1.3.2 // indirect + github.com/alessio/shellescape v1.4.1 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/coreos/go-semver v0.3.0 // indirect + github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect - github.com/digineo/go-dhclient v1.0.2 + github.com/eapache/channels v1.1.0 // indirect github.com/eapache/queue v1.1.0 // indirect - github.com/fatih/color v1.9.0 // indirect - github.com/ghodss/yaml v1.0.0 - github.com/go-logr/logr v0.2.1 // indirect - github.com/golang/protobuf v1.4.2 - github.com/google/go-cmp v0.5.2 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/evanphx/json-patch/v5 v5.6.0 // indirect + github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/go-logr/logr v1.4.1 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.22.3 // indirect + github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/google/gnostic-models v0.6.8 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/gopacket v1.1.18 // indirect - github.com/google/uuid v1.1.2 // indirect - github.com/googleapis/gnostic v0.5.1 // indirect - github.com/hashicorp/go-hclog v0.14.1 // indirect - github.com/hashicorp/go-immutable-radix v1.2.0 // indirect - github.com/hashicorp/go-msgpack v1.1.5 // indirect - github.com/hashicorp/go-retryablehttp v0.6.7 // indirect - github.com/hashicorp/golang-lru v0.5.4 // indirect - github.com/hashicorp/raft v1.1.2 - github.com/imdario/mergo v0.3.11 // indirect + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect + github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 // indirect + github.com/google/uuid v1.3.1 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/imdario/mergo v0.3.12 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/josharian/native v1.1.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect github.com/k-sone/critbitgo v1.4.0 // indirect - github.com/magiconair/properties v1.8.3 // indirect - github.com/mattn/go-colorable v0.1.7 // indirect - github.com/mdlayher/raw v0.0.0-20191009151244-50f2db8cc065 // indirect - github.com/mitchellh/mapstructure v1.3.3 // indirect - github.com/osrg/gobgp v2.0.0+incompatible - github.com/packethost/packngo v0.3.0 - github.com/pelletier/go-toml v1.8.1 // indirect - github.com/pkg/errors v0.9.1 - github.com/sirupsen/logrus v1.6.0 - github.com/spf13/afero v1.4.0 // indirect - github.com/spf13/cast v1.3.1 // indirect - github.com/spf13/cobra v1.0.0 + github.com/magiconair/properties v1.8.7 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/mattn/go-isatty v0.0.14 // indirect + github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect + github.com/mdlayher/genetlink v1.3.2 // indirect + github.com/mdlayher/netlink v1.7.2 // indirect + github.com/mdlayher/packet v1.1.2 // indirect + github.com/mdlayher/socket v0.4.1 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/pelletier/go-toml v1.9.5 // indirect + github.com/pelletier/go-toml/v2 v2.0.8 // indirect + github.com/pierrec/lz4/v4 v4.1.18 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/prometheus/client_model v0.5.0 // indirect + github.com/prometheus/common v0.45.0 // indirect + github.com/prometheus/procfs v0.12.0 // indirect + github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect + github.com/spf13/afero v1.9.5 // indirect + github.com/spf13/cast v1.5.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect - github.com/spf13/viper v1.7.1 // indirect - github.com/vishvananda/netlink v1.1.0 - github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae // indirect - golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a // indirect - golang.org/x/net v0.0.0-20200904194848-62affa334b73 - golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43 // indirect - golang.org/x/sys v0.0.0-20200917073148-efd3b9a0ff20 // indirect - golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e // indirect - google.golang.org/genproto v0.0.0-20200917134801-bb4cff56e0d0 // indirect - google.golang.org/grpc v1.32.0 // indirect - gopkg.in/ini.v1 v1.61.0 // indirect - k8s.io/api v0.19.2 - k8s.io/apimachinery v0.19.2 - k8s.io/client-go v0.19.0 - k8s.io/klog v1.0.0 - k8s.io/klog/v2 v2.3.0 // indirect - k8s.io/utils v0.0.0-20200912215256-4140de9c8800 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/spf13/viper v1.16.0 // indirect + github.com/subosito/gotenv v1.4.2 // indirect + github.com/tj/go-spin v1.1.0 // indirect + github.com/u-root/uio v0.0.0-20230305220412-3e8cd9d6bf63 // indirect + github.com/vishvananda/netns v0.0.4 // indirect + github.com/xlab/c-for-go v0.0.0-20230906092656-a1822f0a09c1 // indirect + github.com/xlab/pkgconfig v0.0.0-20170226114623-cea12a0fd245 // indirect + go.uber.org/multierr v1.10.0 // indirect + golang.org/x/crypto v0.17.0 // indirect + golang.org/x/mod v0.14.0 // indirect + golang.org/x/net v0.19.0 // indirect + golang.org/x/oauth2 v0.12.0 // indirect + golang.org/x/sync v0.5.0 // indirect + golang.org/x/term v0.15.0 // indirect + golang.org/x/text v0.14.0 // indirect + golang.org/x/time v0.3.0 // indirect + golang.org/x/tools v0.16.1 // indirect + golang.zx2c4.com/wireguard v0.0.0-20230325221338-052af4a8072b // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect + google.golang.org/grpc v1.59.0 // indirect + google.golang.org/protobuf v1.31.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect + k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect + modernc.org/cc/v4 v4.1.0 // indirect + modernc.org/mathutil v1.5.0 // indirect + modernc.org/opt v0.1.3 // indirect + modernc.org/strutil v1.1.3 // indirect + modernc.org/token v1.0.1 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) - -replace github.com/osrg/gobgp v2.0.0+incompatible => github.com/osrg/gobgp v0.0.0-20191101114856-a42a1a5f6bf0 diff --git a/go.sum b/go.sum index a30c30cc..4bbbffa9 100644 --- a/go.sum +++ b/go.sum @@ -3,10 +3,10 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= @@ -14,6 +14,9 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -22,7 +25,6 @@ cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4g cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -32,132 +34,95 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= -github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= -github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= -github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= -github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= +github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 h1:EFSB7Zo9Eg91v7MJPVsifUysc/wPdN+NOnVe6bWbdBM= -github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg= -github.com/armon/go-metrics v0.3.4 h1:Xqf+7f2Vhl9tsqDYmXhnXInUdcrtgpRNpIA15/uldSc= -github.com/armon/go-metrics v0.3.4/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/alessio/shellescape v1.4.1 h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVKJUX0= +github.com/alessio/shellescape v1.4.1/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= -github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= -github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/cilium/ebpf v0.5.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudflare/ipvs v0.10.1 h1:rRP+HtkATKJd62iGulRq3hg+oh/kJyttFLizGO4aB3I= +github.com/cloudflare/ipvs v0.10.1/go.mod h1:HBnpmdbOqfYEz7Qkix+IdMfu+7lnPL/5o8iWTXrc5ZQ= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-farm v0.0.0-20171119141306-ac7624ea8da3/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/digineo/go-dhclient v1.0.2 h1:69ZRY+AZnAx+BjO7UWTYWCGgnxW6oBOGwJXgciuLSEU= -github.com/digineo/go-dhclient v1.0.2/go.mod h1:DPvyqGEW8irJvp2lrnGfQWpjj6VidXX9STLBTfNing4= -github.com/dnaeon/go-vcr v1.0.1 h1:r8L/HqC0Hje5AXMu1ooW8oyQyOFv4GxqpL0nRP7SLLY= -github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= -github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= +github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= +github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/eapache/channels v1.1.0 h1:F1taHcn7/F0i8DYqKXJnyhJcVpp2kgFcNePxXtnyu4k= github.com/eapache/channels v1.1.0/go.mod h1:jMm2qB5Ubtg9zLd+inMZd2/NUvXgzmWXsDaLyQIGfH0= -github.com/eapache/queue v1.0.2/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses= -github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= -github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/fsnotify/fsnotify v1.4.2/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= +github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= +github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= +github.com/florianl/go-conntrack v0.4.0 h1:TlYkxytdwgVayfU0cKwkHurQA0Rd1ZSEBRckRYDUu18= +github.com/florianl/go-conntrack v0.4.0/go.mod h1:iPDx4oIats2T7X7Jm3PFyRCJM1GfZhJaSHOWROYOrE8= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= +github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY= -github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v0.2.1 h1:fV3MLmabKIZ383XifUjFSwcoGee0v9qgPp8wy5svibE= -github.com/go-logr/logr v0.2.1/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= -github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= -github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= -github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= -github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg= +github.com/go-test/deep v1.1.0/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -165,10 +130,8 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/protobuf v1.0.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= @@ -179,33 +142,35 @@ github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrU github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= -github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gopacket v1.1.17 h1:rMrlX2ZY2UbvT+sdz3+6J+pp2z+msCq9MxTU6ymxbBY= -github.com/google/gopacket v1.1.17/go.mod h1:UdDNZ1OO62aGYVnPhxT1U6aI7ukYtA/kB8vaU0diBUM= -github.com/google/gopacket v1.1.18 h1:lum7VRA9kdlvBi7/v2p7/zcbkduHaCH/SVVyurs7OpY= -github.com/google/gopacket v1.1.18/go.mod h1:UdDNZ1OO62aGYVnPhxT1U6aI7ukYtA/kB8vaU0diBUM= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -213,310 +178,238 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= +github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 h1:SJ+NtwL6QaZ21U+IrK7d0gGgpjGGvd2kz+FzTHVzdqI= +github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2/go.mod h1:Tv1PlzqC9t8wNnpPdctvtSUOPUUg4SHeE6vR1Ir2hmg= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= +github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I= -github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= -github.com/googleapis/gnostic v0.5.1 h1:A8Yhf6EtqTv9RMsU6MQTyrtV1TjWlR6xU9BsZIwuTCM= -github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-hclog v0.9.1 h1:9PZfAcVEvez4yhLH2TBU64/h/z4xlFI80cWXRrxuKuM= -github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI= -github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-hclog v0.14.1 h1:nQcJDQwIAGnmoUWp8ubocEX40cCml/17YkF6csQLReU= -github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-immutable-radix v1.2.0 h1:l6UW37iCXwZkZoAbEYnptSHVE/cQ5bOTPYG5W3vf9+8= -github.com/hashicorp/go-immutable-radix v1.2.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= -github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-msgpack v1.1.5 h1:9byZdVjKTe5mce63pRVNP1L7UAmdHOTEMGehn6KvJWs= -github.com/hashicorp/go-msgpack v1.1.5/go.mod h1:gWVc3sv/wbDmR3rQsj1CAktEZzoz1YNK9NfGLXJ69/4= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-retryablehttp v0.6.6 h1:HJunrbHTDDbBb/ay4kxa1n+dLmttUlnP3V9oNE4hmsM= -github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= -github.com/hashicorp/go-retryablehttp v0.6.7 h1:8/CAEZt/+F7kR7GevNHulKkUjLht3CPmn7egmhieNKo= -github.com/hashicorp/go-retryablehttp v0.6.7/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= -github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/hcl v0.0.0-20170509225359-392dba7d905e/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/raft v1.1.2 h1:oxEL5DDeurYxLd3UbcY/hccgSPhLLpiBZ1YxtWEq59c= -github.com/hashicorp/raft v1.1.2/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= -github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/hugelgupf/socketpair v0.0.0-20190730060125-05d35a94e714 h1:/jC7qQFrv8CrSJVmaolDVOxTfS9kc36uB6H40kdbQq8= +github.com/hugelgupf/socketpair v0.0.0-20190730060125-05d35a94e714/go.mod h1:2Goc3h8EklBH5mspfHFxBnEoURQCGzQQH1ga9Myjvis= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA= -github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= +github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/jessevdk/go-flags v1.3.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/insomniacslk/dhcp v0.0.0-20230731140434-0f9eb93a696c h1:P/3mFnHCv1A/ej4m8pF5EB6FUt9qEL2Q9lfrcUNwCYs= +github.com/insomniacslk/dhcp v0.0.0-20230731140434-0f9eb93a696c/go.mod h1:7474bZ1YNCvarT6WFKie4kEET6J0KYRDC4XJqqXzQW4= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/josharian/native v0.0.0-20200817173448-b6b71def0850/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= +github.com/josharian/native v1.0.1-0.20221213033349-c1e37c09b531/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= +github.com/josharian/native v1.1.0 h1:uuaP0hAbW7Y4l0ZRQ6C9zfb7Mg1mbFKry/xzDAfmtLA= +github.com/josharian/native v1.1.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= +github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/jsimonetti/rtnetlink v0.0.0-20190606172950-9527aa82566a/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw= +github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4/go.mod h1:WGuG/smIU4J/54PblvSbh+xvCZmpJnFgr3ds6Z55XMQ= +github.com/jsimonetti/rtnetlink v0.0.0-20201009170750-9c6f07d100c1/go.mod h1:hqoO/u39cqLeBLebZ8fWdE96O7FxrAsRYhnVOdgHxok= +github.com/jsimonetti/rtnetlink v0.0.0-20201216134343-bde56ed16391/go.mod h1:cR77jAZG3Y3bsb8hF6fHJbFoyFukLFOkQ98S0pQz3xw= +github.com/jsimonetti/rtnetlink v0.0.0-20201220180245-69540ac93943/go.mod h1:z4c53zj6Eex712ROyh8WI0ihysb5j2ROyV42iNogmAs= +github.com/jsimonetti/rtnetlink v0.0.0-20210122163228-8d122574c736/go.mod h1:ZXpIyOK59ZnN7J0BV99cZUPmsqDRZ3eq5X+st7u/oSA= +github.com/jsimonetti/rtnetlink v0.0.0-20210212075122-66c871082f2b/go.mod h1:8w9Rh8m+aHZIG69YPGGem1i5VzoyRC8nw2kA8B+ik5U= +github.com/jsimonetti/rtnetlink v0.0.0-20210525051524-4cc836578190/go.mod h1:NmKSdU4VGSiv1bMsdqNALI4RSvvjtz65tTMCnD05qLo= +github.com/jsimonetti/rtnetlink v0.0.0-20211022192332-93da33804786/go.mod h1:v4hqbTdfQngbVSZJVWUhGE/lbTFf9jb+ygmNUDQMuOs= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/k-sone/critbitgo v1.3.1-0.20191024122315-48c9e1530131 h1:2bjzgZk4GiWAFkj15/SkmxIO30u69RyPiSS+F0d+Kzs= -github.com/k-sone/critbitgo v1.3.1-0.20191024122315-48c9e1530131/go.mod h1:7E6pyoyADnFxlUBEKcnfS49b7SUAQGMK+OAp/UQvo0s= github.com/k-sone/critbitgo v1.4.0 h1:l71cTyBGeh6X5ATh6Fibgw3+rtNT80BA0uNNWgkPrbE= github.com/k-sone/critbitgo v1.4.0/go.mod h1:7E6pyoyADnFxlUBEKcnfS49b7SUAQGMK+OAp/UQvo0s= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kamhlos/upnp v0.0.0-20210324072331-5661950dff08 h1:UQlM3K8NSN3cqIsICAQnSVOQe9B4LyFEu/xJUr+Scn4= +github.com/kamhlos/upnp v0.0.0-20210324072331-5661950dff08/go.mod h1:0L/S1RSG4wA4M2Vhau3z7VsYMLxFnsX0bzzgwYRIdYU= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/magiconair/properties v1.7.3/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.3 h1:kJSsc6EXkBLgr3SphHk9w5mtjn0bjlR4JYEXKrJ45rQ= -github.com/magiconair/properties v1.8.3/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= -github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.7 h1:bQGKb3vps/j0E9GfJQ03JyhRuxsvdAanXlT9BTw3mdw= -github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.10 h1:qxFzApOv4WsAL965uUPIsXzAKCZxN2p9UqdhFS4ZW10= -github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= -github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= -github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mdlayher/raw v0.0.0-20191004140158-e1402808046b h1:8Oryv4wHvBHAxi9Swzu1zyN4BFrJKvv5pOnN0scSTw8= -github.com/mdlayher/raw v0.0.0-20191004140158-e1402808046b/go.mod h1:7EpbotpCmVZcu+KCX4g9WaRNuu11uyhiW7+Le1dKawg= -github.com/mdlayher/raw v0.0.0-20191009151244-50f2db8cc065 h1:aFkJ6lx4FPip+S+Uw4aTegFMct9shDvP+79PsSxpm3w= -github.com/mdlayher/raw v0.0.0-20191009151244-50f2db8cc065/go.mod h1:7EpbotpCmVZcu+KCX4g9WaRNuu11uyhiW7+Le1dKawg= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v0.0.0-20170523030023-d0303fe80992/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.3.3 h1:SzB1nHZ2Xi+17FP0zVQBHIZqvwRN9408fJO8h+eeNA8= -github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= +github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= +github.com/mdlayher/ethtool v0.0.0-20210210192532-2b88debcdd43/go.mod h1:+t7E0lkKfbBsebllff1xdTmyJt8lH37niI6kwFk9OTo= +github.com/mdlayher/ethtool v0.0.0-20211028163843-288d040e9d60/go.mod h1:aYbhishWc4Ai3I2U4Gaa2n3kHWSwzme6EsG/46HRQbE= +github.com/mdlayher/genetlink v1.0.0/go.mod h1:0rJ0h4itni50A86M2kHcgS85ttZazNt7a8H2a2cw0Gc= +github.com/mdlayher/genetlink v1.3.2 h1:KdrNKe+CTu+IbZnm/GVUMXSqBBLqcGpRDa0xkQy56gw= +github.com/mdlayher/genetlink v1.3.2/go.mod h1:tcC3pkCrPUGIKKsCsp0B3AdaaKuHtaxoJRz3cc+528o= +github.com/mdlayher/ndp v1.0.1 h1:+yAD79/BWyFlvAoeG5ncPS0ItlHP/eVbH7bQ6/+LVA4= +github.com/mdlayher/ndp v1.0.1/go.mod h1:rf3wKaWhAYJEXFKpgF8kQ2AxypxVbfNcZbqoAo6fVzk= +github.com/mdlayher/netlink v0.0.0-20190409211403-11939a169225/go.mod h1:eQB3mZE4aiYnlUsyGGCOpPETfdQq4Jhsgf1fk3cwQaA= +github.com/mdlayher/netlink v1.0.0/go.mod h1:KxeJAFOFLG6AjpyDkQ/iIhxygIUKD+vcwqcnu43w/+M= +github.com/mdlayher/netlink v1.1.0/go.mod h1:H4WCitaheIsdF9yOYu8CFmCgQthAPIWZmcKp9uZHgmY= +github.com/mdlayher/netlink v1.1.1/go.mod h1:WTYpFb/WTvlRJAyKhZL5/uy69TDDpHHu2VZmb2XgV7o= +github.com/mdlayher/netlink v1.2.0/go.mod h1:kwVW1io0AZy9A1E2YYgaD4Cj+C+GPkU6klXCMzIJ9p8= +github.com/mdlayher/netlink v1.2.1/go.mod h1:bacnNlfhqHqqLo4WsYeXSqfyXkInQ9JneWI68v1KwSU= +github.com/mdlayher/netlink v1.2.2-0.20210123213345-5cc92139ae3e/go.mod h1:bacnNlfhqHqqLo4WsYeXSqfyXkInQ9JneWI68v1KwSU= +github.com/mdlayher/netlink v1.3.0/go.mod h1:xK/BssKuwcRXHrtN04UBkwQ6dY9VviGGuriDdoPSWys= +github.com/mdlayher/netlink v1.4.0/go.mod h1:dRJi5IABcZpBD2A3D0Mv/AiX8I9uDEu5oGkAVrekmf8= +github.com/mdlayher/netlink v1.4.1/go.mod h1:e4/KuJ+s8UhfUpO9z00/fDZZmhSrs+oxyqAS9cNgn6Q= +github.com/mdlayher/netlink v1.5.0/go.mod h1:1Kr8BBFxGyUyNmztC9WLOayqYVAd2wsgOZm18nqGuzQ= +github.com/mdlayher/netlink v1.7.2 h1:/UtM3ofJap7Vl4QWCPDGXY8d3GIY2UGSDbK+QWmY8/g= +github.com/mdlayher/netlink v1.7.2/go.mod h1:xraEF7uJbxLhc5fpHL4cPe221LI2bdttWlU+ZGLfQSw= +github.com/mdlayher/packet v1.1.2 h1:3Up1NG6LZrsgDVn6X4L9Ge/iyRyxFEFD9o6Pr3Q1nQY= +github.com/mdlayher/packet v1.1.2/go.mod h1:GEu1+n9sG5VtiRE4SydOmX5GTwyyYlteZiFU+x0kew4= +github.com/mdlayher/socket v0.0.0-20210307095302-262dc9984e00/go.mod h1:GAFlyu4/XV68LkQKYzKhIo/WW7j3Zi0YRAz/BOoanUc= +github.com/mdlayher/socket v0.0.0-20211007213009-516dcbdf0267/go.mod h1:nFZ1EtZYK8Gi/k6QNu7z7CgO20i/4ExeQswwWuPmG/g= +github.com/mdlayher/socket v0.1.0/go.mod h1:mYV5YIZAfHh4dzDVzI8x8tWLWCliuX8Mon5Awbj+qDs= +github.com/mdlayher/socket v0.4.1 h1:eM9y2/jlbs1M615oshPQOHZzj6R6wMT7bX5NPiQvn2U= +github.com/mdlayher/socket v0.4.1/go.mod h1:cAqeGjoufqdxWkD7DkpyS+wcefOtmu5OQ8KuoJGIReA= +github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721 h1:RlZweED6sbSArvlE924+mUcZuXKLBHA35U7LN621Bws= +github.com/mikioh/ipaddr v0.0.0-20190404000644-d465c8ab6721/go.mod h1:Ickgr2WtCLZ2MDGd4Gr0geeCH5HybhRJbonOgQpvSxc= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw= -github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= -github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/osrg/gobgp v0.0.0-20191101114856-a42a1a5f6bf0 h1:sqWev+JAbQevLXYorfyTzf1c5VubkPi+Q83O9oBCNgA= -github.com/osrg/gobgp v0.0.0-20191101114856-a42a1a5f6bf0/go.mod h1:IVw8wEHROhX0qrmI8c6j3N8EDXZSC4YkktSzkX/JZ8Q= -github.com/packethost/packngo v0.3.0 h1:mE5UHyhr5sKN1Qa0GtExRG9ECUX/muazI0f53gSrt5E= -github.com/packethost/packngo v0.3.0/go.mod h1:aRxUEV1TprXVcWr35v8tNYgZMjv7FHaInXx224vF2fc= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= -github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= -github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo= -github.com/pelletier/go-toml v1.0.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.8.1 h1:1Nf83orprkJyknT6h7zbuEGUEjcyVlCxSUGTENmNCRM= -github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= -github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY= +github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM= +github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8= +github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= +github.com/osrg/gobgp/v3 v3.22.0 h1:HKCk9+8hV5GQ4c35NuV8q+eKSnsScf+0v7oXB6jS8wU= +github.com/osrg/gobgp/v3 v3.22.0/go.mod h1:4fbscYpsCk14EO16nTWAdJyErO4MbAZ2zLJmsmeXu/k= +github.com/packethost/packngo v0.31.0 h1:LLH90ardhULWbagBIc3I3nl2uU75io0a7AwY6hyi0S4= +github.com/packethost/packngo v0.31.0/go.mod h1:Io6VJqzkiqmIEQbpOjeIw9v8q9PfcTEq8TEY/tMQsfw= +github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= +github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= +github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ= +github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4= +github.com/pierrec/lz4/v4 v4.1.14/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pierrec/lz4/v4 v4.1.18 h1:xaKrnTkyoqfh1YItXl56+6KJNVYWlEEPuAQW9xsplYQ= +github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= +github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= +github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v0.0.0-20170713114250-a3f95b5c4235/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v0.0.0-20170217164146-9be650865eab/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/afero v1.4.0 h1:jsLTaI1zwYO3vjrzHalkVcIHXTNmdQFepW4OI8H3+x8= -github.com/spf13/afero v1.4.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= -github.com/spf13/cast v1.1.0/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg= -github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= -github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.0-20170731170427-b26b538f6930/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v1.0.0 h1:6m/oheQuQ13N9ks4hubMG6BnvwOeaJrqSPLahSnczz8= -github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= -github.com/spf13/jwalterweatherman v0.0.0-20170523133247-0efa5202c046/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM= +github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= +github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= +github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= +github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= +github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= +github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= -github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.0/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.0.0/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM= -github.com/spf13/viper v1.4.0 h1:yXHLWeravcrgGyFSyCgdYpXQ9dR9c/WED3pg1RhxqEU= -github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= -github.com/spf13/viper v1.7.1 h1:pM5oEahlgWv/WnHXpgbKz7iLIxRf65tye2Ci+XFK5sk= -github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/spf13/viper v1.16.0 h1:rGGH0XDZhdUOryiDWjmIvUSWpbNqisK8Wk0Vyefw8hc= +github.com/spf13/viper v1.16.0/go.mod h1:yg78JgCJcbrQOvV9YLXgkLaZqUidkY9K+Dd1FofRzQg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= -github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= -github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= -github.com/vishvananda/netlink v0.0.0-20170802012344-a95659537721/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= -github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0= -github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= -github.com/vishvananda/netns v0.0.0-20170707011535-86bef332bfc3/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= -github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df h1:OviZH7qLw/7ZovXvuNyL3XQl8UFofeikI1NW1Gypu7k= -github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= -github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae h1:4hwBBUfQCFe3Cym0ZtKyq7L16eZUtYKs+BaHDN6mAns= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= +github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= +github.com/tj/go-spin v1.1.0 h1:lhdWZsvImxvZ3q1C5OIB7d72DuOwP4O2NdBg9PyzNds= +github.com/tj/go-spin v1.1.0/go.mod h1:Mg1mzmePZm4dva8Qz60H2lHwmJ2loum4VIrLgVnKwh4= +github.com/u-root/uio v0.0.0-20230305220412-3e8cd9d6bf63 h1:YcojQL98T/OO+rybuzn2+5KrD5dBwXIvYBvQ2cD3Avg= +github.com/u-root/uio v0.0.0-20230305220412-3e8cd9d6bf63/go.mod h1:eLL9Nub3yfAho7qB0MzZizFhTU2QkLeoVsWdHtDW264= +github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs= +github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= +github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= +github.com/xlab/c-for-go v0.0.0-20230906092656-a1822f0a09c1 h1:d9k72yL7DUmIZJPaqsh+mMWlKOfv+drGA2D8I55SnjA= +github.com/xlab/c-for-go v0.0.0-20230906092656-a1822f0a09c1/go.mod h1:NYjqfg762bzbQeElSH5apzukcCvK3Vxa8pA2jci6T4s= +github.com/xlab/pkgconfig v0.0.0-20170226114623-cea12a0fd245 h1:Sw125DKxZhPUI4JLlWugkzsrlB50jR9v2khiD9FxuSo= +github.com/xlab/pkgconfig v0.0.0-20170226114623-cea12a0fd245/go.mod h1:C+diUUz7pxhNY6KAoLgrTYARGWnt82zWTylZlxT92vk= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +go.etcd.io/etcd/api/v3 v3.5.11 h1:B54KwXbWDHyD3XYAwprxNzTe7vlhR69LuBgZnMVvS7E= +go.etcd.io/etcd/api/v3 v3.5.11/go.mod h1:Ot+o0SWSyT6uHhA56al1oCED0JImsRiU9Dc26+C2a+4= +go.etcd.io/etcd/client/pkg/v3 v3.5.11 h1:bT2xVspdiCj2910T0V+/KHcVKjkUrCZVtk8J2JF2z1A= +go.etcd.io/etcd/client/pkg/v3 v3.5.11/go.mod h1:seTzl2d9APP8R5Y2hFL3NVlD6qC/dOT+3kvrqPyTas4= +go.etcd.io/etcd/client/v3 v3.5.11 h1:ajWtgoNSZJ1gmS8k+icvPtqsqEav+iUorF7b0qozgUU= +go.etcd.io/etcd/client/v3 v3.5.11/go.mod h1:a6xQUEqFJ8vztO1agJh/KQKOMfFI8og52ZconzcDJwE= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= +go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo= +go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ= +go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= +go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200420201142-3c4aac89819a h1:y6sBfNd1b9Wy08a6K1Z1DZc4aXABUN5TKjkYhz7UKmo= golang.org/x/crypto v0.0.0-20200420201142-3c4aac89819a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a h1:vclmkQCjlDX5OydZ9wv8rBCcS0QyQY66Mpf/7BZbInM= -golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -527,6 +420,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20231005195138-3e424a577f31 h1:9k5exFQKQglLo+RoP+4zMjOFE14P6+vyR0baDAi0Rcs= +golang.org/x/exp v0.0.0-20231005195138-3e424a577f31/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -539,6 +434,7 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -547,27 +443,26 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= +golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190419010253-1f3472d942ba/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191003171128-d98b1b443823/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191007182048-72f939374954/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -580,20 +475,37 @@ golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200904194848-62affa334b73 h1:MXfv8rhZWmFeqX3GNZRsd6vOLoaCHjYEX3qkRo3YBUA= -golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs= +golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201216054612-986b41b23924/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210928044308-7d9f5e0b762b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211020060615-d418f374d309/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220107192237-5cfca573fb4d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= +golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43 h1:ld7aEMNHoBnnDAX15v1T6z31v8HwR2A9FYOuAhWqkwc= golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.12.0 h1:smVPGxink+n1ZI5pkQa8y6fZT0RW0MgCO5bFpepy4B4= +golang.org/x/oauth2 v0.12.0/go.mod h1:A74bZ3aGXgCY0qaIC9Ahg6Lglin4AMAco8cIv9baba4= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -602,36 +514,27 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= +golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190411185658-b44545bcd369/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190418153312-f0ce4c0180be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9 h1:L2auWcuQIvxz9xSEqzESnV/QN/gNRXNApHi3fYwl2w0= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -644,35 +547,63 @@ golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4 h1:5/PjkGUjvEU5Gl6BxmvKRPpqo2uNMv4rcHBMwzk/st8= -golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200917073148-efd3b9a0ff20 h1:4X356008q5SA3YXu8PiRap39KFmy4Lf6sGlceJKZQsU= -golang.org/x/sys v0.0.0-20200917073148-efd3b9a0ff20/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201009025420-dfb3f7c4e634/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201118182958-a01c418693c7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201218084310-7d0127a74742/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210110051926-789bb1bd4061/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210123111255-9b0068b26619/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210216163648-f7da38b97c65/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210525143221-35b2ab0089ea/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220622161953-175b2fd9d664/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= +golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s= -golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190424220101-1e8e1cfdf96b/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= @@ -680,11 +611,8 @@ golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190826182127-07722704da13/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191026034945-b2104f82a97d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -707,14 +635,29 @@ golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= +golang.org/x/tools v0.16.1 h1:TLyB3WofjdOEepBHAU20JdNC1Zbg87elYofWYAY5oZA= +golang.org/x/tools v0.16.1/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.zx2c4.com/wireguard v0.0.0-20230325221338-052af4a8072b h1:J1CaxgLerRR5lgx3wnr6L04cJFbWoceSK9JWBdglINo= +golang.zx2c4.com/wireguard v0.0.0-20230325221338-052af4a8072b/go.mod h1:tqur9LnfstdR9ep2LaJT4lFUl0EjlHtge+gAjmsHUG4= +golang.zx2c4.com/wireguard/wgctrl v0.0.0-20230429144221-925a1e7659e6 h1:CawjfCvYQH2OU3/TnxLx97WDSUDRABfT18pCOYwc2GE= +golang.zx2c4.com/wireguard/wgctrl v0.0.0-20230429144221-925a1e7659e6/go.mod h1:3rxYc4HtVcSG9gVaTs2GEBdehh+sYPOwKtyUWEOTb80= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -731,18 +674,19 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20170731182057-09f6ed296fc6/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7 h1:ZUjXAXmrAyrmmCPHgCA/vChHcpsX27MZ3yBonD/z1KE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -765,32 +709,42 @@ google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200917134801-bb4cff56e0d0 h1:uslsjIdqvZYANxSBQjTI47vZfwMaTN3mLELkMnMIY/A= -google.golang.org/genproto v0.0.0-20200917134801-bb4cff56e0d0/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/grpc v1.5.1/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d h1:VBu5YqKPv6XiJ199exd8Br+Aetz+o08F+PLMnwJQHAY= +google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= +google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q= +google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d h1:uvYuEyMHKNt+lT4K3bN6fGswmK8qSvcreM3BwjDh+y4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.0 h1:G+97AoqBnmZIT91cLG/EkCoK9NSelj64P8bOHHNmGn0= -google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.32.0 h1:zWTV+LMdc3kaiJMSTOFz2UgSBgx8RNQoTGiZu3fR9S0= -google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= +google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -799,75 +753,74 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno= -gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.61.0 h1:LBCdW4FmFYL4s/vDZD1RQYX7oAR6IjujCYgMdbHBR10= -gopkg.in/ini.v1 v1.61.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.0.0-20170721122051-25c4ec802a7d/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= +gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20191022112108-ee025456fe28/go.mod h1:YZLKf07TTEX58hlaDFZRbZEdP4uwSiqhU91o1aN3EvM= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.19.0/go.mod h1:I1K45XlvTrDjmj5LoM5LuP/KYrhWbjUKT/SoPG0qTjw= -k8s.io/api v0.19.2 h1:q+/krnHWKsL7OBZg/rxnycsl9569Pud76UJ77MvKXms= -k8s.io/api v0.19.2/go.mod h1:IQpK0zFQ1xc5iNIQPqzgoOwuFugaYHK4iCknlAQP9nI= -k8s.io/apimachinery v0.19.0/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= -k8s.io/apimachinery v0.19.2 h1:5Gy9vQpAGTKHPVOh5c4plE274X8D/6cuEiTO2zve7tc= -k8s.io/apimachinery v0.19.2/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= -k8s.io/client-go v0.19.0 h1:1+0E0zfWFIWeyRhQYWzimJOyAk2UT7TiARaLNwJCf7k= -k8s.io/client-go v0.19.0/go.mod h1:H9E/VT95blcFQnlyShFgnFT9ZnJOAceiUHM3MlRC+mU= -k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= -k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.2.0 h1:XRvcwJozkgZ1UQJmfMGpvRthQHOvihEhYtDfAaxMz/A= -k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.3.0 h1:WmkrnW7fdrm0/DMClc+HIxtftvxVIPAhlVwMQo5yLco= -k8s.io/klog/v2 v2.3.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6 h1:+WnxoVtG8TMiudHBSEtrVL1egv36TkkJm+bA8AxicmQ= -k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= -k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20200912215256-4140de9c8800 h1:9ZNvfPvVIEsp/T1ez4GQuzCcCTEQWhovSofhqR73A6g= -k8s.io/utils v0.0.0-20200912215256-4140de9c8800/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +honnef.co/go/tools v0.2.1/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY= +honnef.co/go/tools v0.2.2/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY= +k8s.io/api v0.29.1 h1:DAjwWX/9YT7NQD4INu49ROJuZAAAP/Ijki48GUPzxqw= +k8s.io/api v0.29.1/go.mod h1:7Kl10vBRUXhnQQI8YR/R327zXC8eJ7887/+Ybta+RoQ= +k8s.io/apimachinery v0.29.1 h1:KY4/E6km/wLBguvCZv8cKTeOwwOBqFNjwJIdMkMbbRc= +k8s.io/apimachinery v0.29.1/go.mod h1:6HVkd1FwxIagpYrHSwJlQqZI3G9LfYWRPAkUvLnXTKU= +k8s.io/client-go v0.29.1 h1:19B/+2NGEwnFLzt0uB5kNJnfTsbV8w6TgQRz9l7ti7A= +k8s.io/client-go v0.29.1/go.mod h1:TDG/psL9hdet0TI9mGyHJSgRkW3H9JZk2dNEUS7bRks= +k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= +k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= +k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +modernc.org/cc/v4 v4.1.0 h1:PlApAKux1sNvreOGs1Hr04FFz35QmAWoa98YFjcdH94= +modernc.org/cc/v4 v4.1.0/go.mod h1:T6KFXc8WI0m9k6IOHuRe9+vB+Pb/AaV8BMZoVqHLm1I= +modernc.org/ccorpus2 v1.1.0 h1:r/Z2+wOD5Tmcs1AMVXJgslE9HgRRROVWo0qUox1kJIo= +modernc.org/ccorpus2 v1.1.0/go.mod h1:Wifvo4Q/qS/h1aRoC2TffcHsnxwTikmi1AuLANuucJQ= +modernc.org/mathutil v1.5.0 h1:rV0Ko/6SfM+8G+yKiyI830l3Wuz1zRutdslNoQ0kfiQ= +modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4= +modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/strutil v1.1.3 h1:fNMm+oJklMGYfU9Ylcywl0CO5O6nTfaowNsh2wpPjzY= +modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= +modernc.org/token v1.0.1 h1:A3qvTqOwexpfZZeyI0FeGPDlSWX5pjZu9hF4lU+EKWg= +modernc.org/token v1.0.1/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/structured-merge-diff/v4 v4.0.1 h1:YXTMot5Qz/X1iBRJhAt+vI+HVttY0WkSqqhKxQ0xVbA= -sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/kind v0.20.0 h1:f0sc3v9mQbGnjBUaqSFST1dwIuiikKVGgoTwpoP33a8= +sigs.k8s.io/kind v0.20.0/go.mod h1:aBlbxg08cauDgZ612shr017/rZwqd7AS563FvpWKPVs= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/kube-vip.png b/kube-vip.png new file mode 100644 index 00000000..f2f7bf96 Binary files /dev/null and b/kube-vip.png differ diff --git a/kubernetes-control-plane.md b/kubernetes-control-plane.md deleted file mode 100644 index 0bf508bc..00000000 --- a/kubernetes-control-plane.md +++ /dev/null @@ -1,147 +0,0 @@ -# Load Balancing a Kubernetes Cluster (Control-Plane) - -This document covers all of the details for using `kube-vip` to build a HA Kubernetes cluster - -`tl;dr version` -- Generate/modify first node `kube-vip` config/manifest -- `init` first node -- `join` remaining nodes -- Add remaining config/manifests - -## Infrastructure architecture - -The infrastructure for our example HA Kubernetes cluster is as follows: - -| Node | Address | -|----------------|------------| -| VIP | 10.0.0.75 | -| controlPlane01 | 10.0.0.70 | -| controlPlane02 | 10.0.0.71 | -| controlPlane03 | 10.0.0.72 | - -All nodes are running Ubuntu 18.04, Docker CE and will use Kubernetes 1.17.0. - -### Generate the `kube-vip` configuration - -Make sure that the config directory exists: `sudo mkdir -p /etc/kube-vip/`, this directory can be any directory however the `hostPath` in the manifest will need modifying to point to the correct path. - -``` -sudo docker run -it --rm plndr/kube-vip:0.1.5 sample config | sudo tee /etc/kube-vip/config.yaml -``` - -### Modify the configuration - -**Cluster Configuration** -Modify the `remotePeers` to point to the correct addresses of the other two nodes, ensure that their `id` is unique otherwise this will confuse the raft algorithm. The `localPeer` should be the configuration of the current node (`controlPlane01`), which is where this instance of the cluster will run. - -As this node will be the first node, it will need to elect itself leader as until this occurs the VIP won’t be activated! - -`startAsLeader: true` - -**VIP Config** -We will need to set our VIP address to `192.168.0.75` and to ensure all hosts are updated when the VIP moves we will enable ARP broadcasts `gratuitousARP: true` - -**Load Balancer** -We will configure the load balancer to sit on the standard API-Server port `6443` and we will configure the backends to point to the API-servers that will be configured to run on port `6444`. Also for the Kubernetes Control Plane we will configure the load balancer to be of `type: tcp`. - -We can also use `6443` for both the VIP and the API-Servers, in order to do this we need to specify that the api-server is bound to it's local IP. To do this we use the `--apiserver-advertise-address` flag as part of the `init`, this means that we can then bind the same port to the VIP and we wont have a port conflict. - -**config.yaml** - -`user@controlPlane01:/etc/kube-vip$ cat config.yaml` - -... - -``` -remotePeers: -- id: server2 - address: 192.168.0.71 - port: 10000 -- id: server3 - address: 192.168.0.72 - port: 10000 -localPeer: - id: server1 - address: 192.168.0.70 - port: 10000 -vip: 192.168.0.75 -gratuitousARP: true -singleNode: false -startAsLeader: true -interface: ens192 -loadBalancers: -- name: Kubernetes Control Plane - type: tcp - port: 6443 - bindToVip: true - backends: - - port: 6444 - address: 192.168.0.70 - - port: 6444 - address: 192.168.0.71 - - port: 6444 - address: 192.168.0.72 -``` - -### First Node - -To generate the basic Kubernetes static pod `yaml` configuration: - -Make sure that the manifest directory exists: `sudo mkdir -p /etc/kubernetes/manifests/` - -``` -sudo docker run -it --rm plndr/kube-vip:0.1.5 sample manifest | sudo tee /etc/kubernetes/manifests/kube-vip.yaml -``` - -Ensure that `image: plndr/kube-vip:` is modified to point to a specific version (`0.1.5` at the time of writing), refer to [docker hub](https://hub.docker.com/r/plndr/kube-vip/tags) for details. Also ensure that the `hostPath` points to the correct `kube-vip` configuration, if it isn’t the above path. - -The **vip** is set to `192.168.0.75` and this first node will elect itself as leader, and as part of the `kubeadm init` it will use the VIP in order to speak back to the initialising api-server. - -`sudo kubeadm init --control-plane-endpoint β€œ192.168.0.75:6443” --apiserver-bind-port 6444 --upload-certs --kubernetes-version β€œv1.17.0”` - -Once this node is up and running we will be able to see the control-plane pods, including the `kube-vip` pod: - -``` -$ kubectl get pods -A -NAMESPACE NAME READY STATUS RESTARTS AGE -<...> -kube-system kube-vip-controlplane01 1/1 Running 0 10m -``` - -### Remaining Nodes - -We first will need to create the `kube-vip` configuration that resides in `/etc/kube-vip/config.yaml` or we can regenerate it from scratch using the above example. Ensure that the configuration is almost identical with the `localPeer` and `remotePeers` sections are updated for each node. Finally, ensure that the remaining nodes will behave as standard cluster nodes by setting `startAsLeader: false`. - -At this point **DON’T** generate the manifests, this is due to some bizarre `kubeadm/kubelet` behaviour. - -``` - kubeadm join 192.168.0.75:6443 --token \ - --discovery-token-ca-cert-hash sha256: \ - --control-plane --certificate-key - -``` - -**After** this node has been added to the cluster, we can add the manifest to also add this node as a `kube-vip` member. (Adding the manifest afterwards doesn’t interfere with `kubeadm`). - -``` -sudo docker run -it --rm plndr/kube-vip:0.1.5 sample manifest | sudo tee /etc/kubernetes/manifests/kube-vip.yaml -``` - -Once this node is added we will be able to see that the `kube-vip` pod is up and running as expected: - -``` -user@controlPlane01:~$ kubectl get pods -A | grep vip -kube-system kube-vip-controlplane01 1/1 Running 1 16m -kube-system kube-vip-controlplane02 1/1 Running 0 18m -kube-system kube-vip-controlplane03 1/1 Running 0 20m - -``` - -If we look at the logs, we can see that the VIP is running on the second node and we’re waiting for our third node to join the cluster: - -``` -$ kubectl logs kube-vip-controlplane02 -n kube-system -time=β€œ2020-02-12T15:33:09Z” level=info msg=β€œThe Node [192.168.0.70:10000] is leading” -time=β€œ2020-02-12T15:33:09Z” level=info msg=β€œThe Node [192.168.0.70:10000] is leading” - -``` diff --git a/main.go b/main.go index 313f5f91..2d66f9cd 100644 --- a/main.go +++ b/main.go @@ -1,6 +1,8 @@ package main -import "github.com/plunder-app/kube-vip/cmd" +import ( + "github.com/kube-vip/kube-vip/cmd" +) // Version is populated from the Makefile and is tied to the release TAG var Version string @@ -9,6 +11,7 @@ var Version string var Build string func main() { + cmd.Release.Version = Version cmd.Release.Build = Build cmd.Execute() diff --git a/pkg/bgp/hosts.go b/pkg/bgp/hosts.go index 2c2f0211..2d283e99 100644 --- a/pkg/bgp/hosts.go +++ b/pkg/bgp/hosts.go @@ -2,9 +2,10 @@ package bgp import ( "context" + "fmt" "net" - api "github.com/osrg/gobgp/api" + api "github.com/osrg/gobgp/v3/api" ) // AddHost will update peers of a host @@ -13,15 +14,20 @@ func (b *Server) AddHost(addr string) (err error) { if err != nil { return err } + p := b.getPath(ip) if p == nil { - return + return fmt.Errorf("failed to get path for %v", ip) } _, err = b.s.AddPath(context.Background(), &api.AddPathRequest{ Path: p, }) + if err != nil { + return err + } + return } diff --git a/pkg/bgp/peers.go b/pkg/bgp/peers.go index f3f24879..e78b9906 100644 --- a/pkg/bgp/peers.go +++ b/pkg/bgp/peers.go @@ -7,93 +7,169 @@ import ( "strconv" "strings" - "github.com/golang/protobuf/ptypes" + "github.com/golang/protobuf/ptypes" //nolint "github.com/golang/protobuf/ptypes/any" - api "github.com/osrg/gobgp/api" + api "github.com/osrg/gobgp/v3/api" ) -//AddPeer will add peers to the BGP configuration +// AddPeer will add peers to the BGP configuration func (b *Server) AddPeer(peer Peer) (err error) { - port := 179 - - if t := strings.SplitN(peer.Address, ":", 2); len(t) == 2 { - peer.Address = t[0] - - if port, err = strconv.Atoi(t[1]); err != nil { - return fmt.Errorf("Unable to parse port '%s' as int: %s", t[1], err) - } - } - p := &api.Peer{ Conf: &api.PeerConf{ NeighborAddress: peer.Address, - PeerAs: peer.AS, + PeerAsn: peer.AS, + AuthPassword: peer.Password, }, Timers: &api.Timers{ Config: &api.TimersConfig{ - ConnectRetry: 10, + ConnectRetry: 10, + HoldTime: b.c.HoldTime, + KeepaliveInterval: b.c.KeepaliveInterval, }, }, + // This enables routes to be sent to routers across multiple hops + EbgpMultihop: &api.EbgpMultihop{ + Enabled: peer.MultiHop, + MultihopTtl: 50, + }, + Transport: &api.Transport{ MtuDiscovery: true, RemoteAddress: peer.Address, - RemotePort: uint32(port), + RemotePort: uint32(179), }, } - // if b.c.SourceIP != "" { - // p.Transport.LocalAddress = b.c.SourceIP - // } + if b.c.SourceIP != "" { + p.Transport.LocalAddress = b.c.SourceIP + } - // if b.c.SourceIF != "" { - // p.Transport.BindInterface = b.c.SourceIF - // } + if b.c.SourceIF != "" { + p.Transport.BindInterface = b.c.SourceIF + } return b.s.AddPeer(context.Background(), &api.AddPeerRequest{ Peer: p, }) } -func (b *Server) getPath(ip net.IP) *api.Path { - var pfxLen uint32 = 32 - if ip.To4() == nil { - if !b.c.IPv6 { - return nil - } - - pfxLen = 128 - } +func (b *Server) getPath(ip net.IP) (path *api.Path) { + isV6 := ip.To4() == nil - nlri, _ := ptypes.MarshalAny(&api.IPAddressPrefix{ - Prefix: ip.String(), - PrefixLen: pfxLen, - }) - - a1, _ := ptypes.MarshalAny(&api.OriginAttribute{ + //nolint + originAttr, _ := ptypes.MarshalAny(&api.OriginAttribute{ Origin: 0, }) - var nh string - if b.c.NextHop != "" { - nh = b.c.NextHop - } else if b.c.SourceIP != "" { - nh = b.c.SourceIP + if !isV6 { + //nolint + nlri, _ := ptypes.MarshalAny(&api.IPAddressPrefix{ + Prefix: ip.String(), + PrefixLen: 32, + }) + + //nolint + nhAttr, _ := ptypes.MarshalAny(&api.NextHopAttribute{ + NextHop: "0.0.0.0", // gobgp will fill this + }) + + path = &api.Path{ + Family: &api.Family{ + Afi: api.Family_AFI_IP, + Safi: api.Family_SAFI_UNICAST, + }, + Nlri: nlri, + Pattrs: []*any.Any{originAttr, nhAttr}, + } } else { - nh = b.c.RouterID + //nolint + nlri, _ := ptypes.MarshalAny(&api.IPAddressPrefix{ + Prefix: ip.String(), + PrefixLen: 128, + }) + + v6Family := &api.Family{ + Afi: api.Family_AFI_IP6, + Safi: api.Family_SAFI_UNICAST, + } + + //nolint + mpAttr, _ := ptypes.MarshalAny(&api.MpReachNLRIAttribute{ + Family: v6Family, + NextHops: []string{"::"}, // gobgp will fill this + Nlris: []*any.Any{nlri}, + }) + + path = &api.Path{ + Family: v6Family, + Nlri: nlri, + Pattrs: []*any.Any{originAttr, mpAttr}, + } } + return +} - a2, _ := ptypes.MarshalAny(&api.NextHopAttribute{ - NextHop: nh, - }) +// ParseBGPPeerConfig - take a string and parses it into an array of peers +func ParseBGPPeerConfig(config string) (bgpPeers []Peer, err error) { + peers := strings.Split(config, ",") + if len(peers) == 0 { + return nil, fmt.Errorf("No BGP Peer configurations found") + } - return &api.Path{ - Family: &api.Family{ - Afi: api.Family_AFI_IP, - Safi: api.Family_SAFI_UNICAST, - }, - Nlri: nlri, - Pattrs: []*any.Any{a1, a2}, + for x := range peers { + peerStr := peers[x] + if peerStr == "" { + continue + } + isV6Peer := peerStr[0] == '[' + + address := "" + if isV6Peer { + addressEndPos := strings.IndexByte(peerStr, ']') + if addressEndPos == -1 { + return nil, fmt.Errorf("no matching ] found for IPv6 BGP Peer") + } + address = peerStr[1:addressEndPos] + peerStr = peerStr[addressEndPos+1:] + } + + peer := strings.Split(peerStr, ":") + if len(peer) < 2 { + return nil, fmt.Errorf("mandatory peering params : incomplete") + } + + if !isV6Peer { + address = peer[0] + } + + ASNumber, err := strconv.ParseUint(peer[1], 10, 32) + if err != nil { + return nil, fmt.Errorf("BGP Peer AS format error [%s]", peer[1]) + } + + password := "" + if len(peer) >= 3 { + password = peer[2] + } + + multiHop := false + if len(peer) >= 4 { + multiHop, err = strconv.ParseBool(peer[3]) + if err != nil { + return nil, fmt.Errorf("BGP MultiHop format error (true/false) [%s]", peer[1]) + } + } + + peerConfig := Peer{ + Address: address, + AS: uint32(ASNumber), + Password: password, + MultiHop: multiHop, + } + + bgpPeers = append(bgpPeers, peerConfig) } + return } diff --git a/pkg/bgp/server.go b/pkg/bgp/server.go index 11555c4f..8d9da11d 100644 --- a/pkg/bgp/server.go +++ b/pkg/bgp/server.go @@ -3,22 +3,22 @@ package bgp import ( "context" "fmt" - "log" "time" - api "github.com/osrg/gobgp/api" - gobgp "github.com/osrg/gobgp/pkg/server" + api "github.com/osrg/gobgp/v3/api" + gobgp "github.com/osrg/gobgp/v3/pkg/server" + log "github.com/sirupsen/logrus" ) // NewBGPServer takes a configuration and returns a running BGP server instance -func NewBGPServer(c *Config) (b *Server, err error) { +func NewBGPServer(c *Config, peerStateChangeCallback func(*api.WatchEventResponse_PeerEvent)) (b *Server, err error) { if c.AS == 0 { return nil, fmt.Errorf("You need to provide AS") } - // if c.SourceIP != "" && c.SourceIF != "" { - // return nil, fmt.Errorf("SourceIP and SourceIF are mutually exclusive") - // } + if c.SourceIP != "" && c.SourceIF != "" { + return nil, fmt.Errorf("SourceIP and SourceIF are mutually exclusive") + } if len(c.Peers) == 0 { return nil, fmt.Errorf("You need to provide at least one peer") @@ -32,7 +32,7 @@ func NewBGPServer(c *Config) (b *Server, err error) { if err = b.s.StartBgp(context.Background(), &api.StartBgpRequest{ Global: &api.Global{ - As: c.AS, + Asn: c.AS, RouterId: c.RouterID, ListenPort: -1, }, @@ -40,7 +40,14 @@ func NewBGPServer(c *Config) (b *Server, err error) { return } - if err = b.s.MonitorPeer(context.Background(), &api.MonitorPeerRequest{}, func(p *api.Peer) { log.Println(p) }); err != nil { + if err = b.s.WatchEvent(context.Background(), &api.WatchEventRequest{Peer: &api.WatchEventRequest_Peer{}}, func(r *api.WatchEventResponse) { + if p := r.GetPeer(); p != nil && p.Type == api.WatchEventResponse_PeerEvent_STATE { + log.Infof("[BGP] %s", p.String()) + if peerStateChangeCallback != nil { + peerStateChangeCallback(p) + } + } + }); err != nil { return } diff --git a/pkg/bgp/types.go b/pkg/bgp/types.go index 4d54924d..b11a0b41 100644 --- a/pkg/bgp/types.go +++ b/pkg/bgp/types.go @@ -1,23 +1,26 @@ package bgp -import gobgp "github.com/osrg/gobgp/pkg/server" +import gobgp "github.com/osrg/gobgp/v3/pkg/server" // Peer defines a BGP Peer type Peer struct { - Address string - AS uint32 + Address string + AS uint32 + Password string + MultiHop bool } // Config defines the BGP server configuration type Config struct { AS uint32 RouterID string - NextHop string SourceIP string SourceIF string + HoldTime uint64 + KeepaliveInterval uint64 + Peers []Peer - IPv6 bool } // Server manages a server object diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index af9c95b5..f4ca0c6f 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -1,53 +1,74 @@ package cluster import ( - "github.com/plunder-app/kube-vip/pkg/kubevip" - "github.com/plunder-app/kube-vip/pkg/vip" -) + "sync" + + log "github.com/sirupsen/logrus" -const leaderLogcount = 5 + "github.com/kube-vip/kube-vip/pkg/kubevip" + "github.com/kube-vip/kube-vip/pkg/vip" +) // Cluster - The Cluster object manages the state of the cluster for a particular node type Cluster struct { - stateMachine FSM - stop chan bool - completed chan bool - Network vip.Network + stop chan bool + completed chan bool + once sync.Once + Network []vip.Network } // InitCluster - Will attempt to initialise all of the required settings for the cluster func InitCluster(c *kubevip.Config, disableVIP bool) (*Cluster, error) { - - // TODO - Check for root (needed to netlink) - var network vip.Network + var networks []vip.Network var err error if !disableVIP { // Start the Virtual IP Networking configuration - network, err = startNetworking(c) + networks, err = startNetworking(c) if err != nil { return nil, err } } // Initialise the Cluster structure newCluster := &Cluster{ - Network: network, + Network: networks, } + log.Debugf("init enable service security: %t", c.EnableServiceSecurity) + return newCluster, nil } - -func startNetworking(c *kubevip.Config) (vip.Network, error) { +func startNetworking(c *kubevip.Config) ([]vip.Network, error) { address := c.VIP if c.Address != "" { address = c.Address } - network, err := vip.NewConfig(address, c.Interface) - if err != nil { - return nil, err + addresses := vip.GetIPs(address) + + networks := []vip.Network{} + for _, addr := range addresses { + network, err := vip.NewConfig(addr, c.Interface, c.VIPSubnet, c.DDNS, c.RoutingTableID, c.RoutingTableType, c.DNSMode) + if err != nil { + return nil, err + } + networks = append(networks, network...) } - return network, nil + + return networks, nil +} + +// Stop - Will stop the Cluster and release VIP if needed +func (cluster *Cluster) Stop() { + // Close the stop channel, which will shut down the VIP (if needed) + if cluster.stop != nil { + cluster.once.Do(func() { // Ensure that the close channel can only ever be called once + close(cluster.stop) + }) + } + + // Wait until the completed channel is closed, signallign all shutdown tasks completed + <-cluster.completed } diff --git a/pkg/cluster/clusterDDNS.go b/pkg/cluster/clusterDDNS.go new file mode 100644 index 00000000..0cf21be5 --- /dev/null +++ b/pkg/cluster/clusterDDNS.go @@ -0,0 +1,28 @@ +package cluster + +import ( + "context" + + "github.com/kube-vip/kube-vip/pkg/vip" +) + +// StartDDNS should start go routine for dhclient to hold the lease for the IP +// StartDDNS should wait until IP is allocated from DHCP, set it to cluster.Network +// so the OnStartedLeading can continue to configure the VIP initially +// during runtime if IP changes, startDDNS don't have to do reconfigure because +// dnsUpdater already have the functionality to keep trying resolve the IP +// and update the VIP configuration if it changes +func (cluster *Cluster) StartDDNS(ctx context.Context) error { + for i := range cluster.Network { + ddnsMgr := vip.NewDDNSManager(ctx, cluster.Network[i]) + ip, err := ddnsMgr.Start() + if err != nil { + return err + } + if err = cluster.Network[i].SetIP(ip); err != nil { + return err + } + } + + return nil +} diff --git a/pkg/cluster/clusterLeader.go b/pkg/cluster/clusterLeader.go deleted file mode 100644 index 9da41f7f..00000000 --- a/pkg/cluster/clusterLeader.go +++ /dev/null @@ -1,329 +0,0 @@ -package cluster - -import ( - "context" - "fmt" - "os" - "os/signal" - "path/filepath" - "syscall" - "time" - - "github.com/plunder-app/kube-vip/pkg/bgp" - "github.com/plunder-app/kube-vip/pkg/kubevip" - leaderelection "github.com/plunder-app/kube-vip/pkg/leaderElection" - "github.com/plunder-app/kube-vip/pkg/loadbalancer" - "github.com/plunder-app/kube-vip/pkg/packet" - - "github.com/plunder-app/kube-vip/pkg/vip" - - "github.com/packethost/packngo" - - log "github.com/sirupsen/logrus" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/clientcmd" - "k8s.io/client-go/tools/leaderelection/resourcelock" -) - -const plunderLock = "plunder-lock" -const namespace = "kube-system" - -// Manager degines the manager of the load-balancing services -type Manager struct { - clientSet *kubernetes.Clientset -} - -// NewManager will create a new managing object -func NewManager(path string, inCluster bool) (*Manager, error) { - var clientset *kubernetes.Clientset - if inCluster { - // This will attempt to load the configuration when running within a POD - cfg, err := rest.InClusterConfig() - if err != nil { - return nil, fmt.Errorf("error creating kubernetes client config: %s", err.Error()) - } - clientset, err = kubernetes.NewForConfig(cfg) - - if err != nil { - return nil, fmt.Errorf("error creating kubernetes client: %s", err.Error()) - } - // use the current context in kubeconfig - } else { - if path == "" { - path = filepath.Join(os.Getenv("HOME"), ".kube", "config") - } - config, err := clientcmd.BuildConfigFromFlags("", path) - if err != nil { - panic(err.Error()) - } - - // We modify the config so that we can always speak to the correct host - id, err := os.Hostname() - if err != nil { - return nil, err - } - - // TODO - we need to make the host/port configurable - - config.Host = fmt.Sprintf("%s:6443", id) - clientset, err = kubernetes.NewForConfig(config) - - if err != nil { - return nil, fmt.Errorf("error creating kubernetes client: %s", err.Error()) - } - } - - return &Manager{ - clientSet: clientset, - }, nil -} - -// StartLeaderCluster - Begins a running instance of the Raft cluster -func (cluster *Cluster) StartLeaderCluster(c *kubevip.Config, sm *Manager) error { - - id, err := os.Hostname() - if err != nil { - return err - } - - log.Infof("Beginning cluster membership, namespace [%s], lock name [%s], id [%s]", namespace, plunderLock, id) - - // we use the Lease lock type since edits to Leases are less common - // and fewer objects in the cluster watch "all Leases". - lock := &resourcelock.LeaseLock{ - LeaseMeta: metav1.ObjectMeta{ - Name: plunderLock, - Namespace: namespace, - }, - Client: sm.clientSet.CoordinationV1(), - LockConfig: resourcelock.ResourceLockConfig{ - Identity: id, - }, - } - - // use a Go context so we can tell the leaderelection code when we - // want to step down - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - // use a Go context so we can tell the arp loop code when we - // want to step down - ctxArp, cancelArp := context.WithCancel(context.Background()) - defer cancelArp() - - // listen for interrupts or the Linux SIGTERM signal and cancel - // our context, which the leader election code will observe and - // step down - signalChan := make(chan os.Signal, 1) - // Add Notification for Userland interrupt - signal.Notify(signalChan, syscall.SIGINT) - - // Add Notification for SIGTERM (sent from Kubernetes) - signal.Notify(signalChan, syscall.SIGTERM) - - // Add Notification for SIGKILL (sent from Kubernetes) - signal.Notify(signalChan, syscall.SIGKILL) - - go func() { - <-signalChan - log.Info("Received termination, signaling shutdown") - // Cancel the context, which will in turn cancel the leadership - cancel() - // Cancel the arp context, which will in turn stop any broadcasts - }() - - // (attempt to) Remove the virtual IP, incase it already exists - cluster.Network.DeleteIP() - - // Managers for Vip load balancers and none-vip loadbalancers - nonVipLB := loadbalancer.LBManager{} - VipLB := loadbalancer.LBManager{} - - // BGP server - var bgpServer *bgp.Server - // Defer a function to check if the bgpServer has been created and if so attempt to close it - defer func() { - if bgpServer != nil { - bgpServer.Close() - } - }() - - // If Packet is enabled then we can begin our preperation work - var packetClient *packngo.Client - if c.EnablePacket { - packetClient, err = packngo.NewClient() - if err != nil { - log.Error(err) - } - - // We're using Packet with BGP, popuplate the Peer information from the API - if c.EnableBGP { - log.Infoln("Looking up the BGP configuration from packet") - err = packet.BGPLookup(packetClient, c) - if err != nil { - log.Error(err) - } - } - } - - if c.EnableBGP { - // Lets start BGP - log.Info("Starting the BGP server to adverise VIP routes to VGP peers") - bgpServer, err = bgp.NewBGPServer(&c.BGPConfig) - if err != nil { - log.Error(err) - } - } - - if c.EnableLoadBalancer { - - // Iterate through all Configurations - if len(c.LoadBalancers) != 0 { - for x := range c.LoadBalancers { - // If the load balancer doesn't bind to the VIP - if c.LoadBalancers[x].BindToVip == false { - err = nonVipLB.Add("", &c.LoadBalancers[x]) - if err != nil { - log.Warnf("Error creating loadbalancer [%s] type [%s] -> error [%s]", c.LoadBalancers[x].Name, c.LoadBalancers[x].Type, err) - } - } - } - } - } - // start the leader election code loop - leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{ - Lock: lock, - // IMPORTANT: you MUST ensure that any code you have that - // is protected by the lease must terminate **before** - // you call cancel. Otherwise, you could have a background - // loop still running and another process could - // get elected before your background loop finished, violating - // the stated goal of the lease. - ReleaseOnCancel: true, - LeaseDuration: time.Duration(c.LeaseDuration) * time.Second, - RenewDeadline: time.Duration(c.RenewDeadline) * time.Second, - RetryPeriod: time.Duration(c.RetryPeriod) * time.Second, - Callbacks: leaderelection.LeaderCallbacks{ - OnStartedLeading: func(ctx context.Context) { - - // we're notified when we start - log.Info("This node is starting with leadership of the cluster") - err = cluster.Network.AddIP() - if err != nil { - log.Warnf("%v", err) - } - - if c.EnablePacket { - // We're not using Packet with BGP - if !c.EnableBGP { - // Attempt to attach the EIP in the standard manner - log.Debugf("Attaching the Packet EIP through the API to this host") - err = packet.AttachEIP(packetClient, c, id) - if err != nil { - log.Error(err) - } - } - } - - if c.EnableBGP { - // Lets advertise the VIP over BGP, the host needs to be passed using CIDR notation - cidrVip := fmt.Sprintf("%s/%s", cluster.Network.IP(), c.VIPCIDR) - log.Debugf("Attempting to advertise the address [%s] over BGP", cidrVip) - - err = bgpServer.AddHost(cidrVip) - if err != nil { - log.Error(err) - } - } - - if c.EnableLoadBalancer { - // Once we have the VIP running, start the load balancer(s) that bind to the VIP - for x := range c.LoadBalancers { - - if c.LoadBalancers[x].BindToVip == true { - err = VipLB.Add(cluster.Network.IP(), &c.LoadBalancers[x]) - if err != nil { - log.Warnf("Error creating loadbalancer [%s] type [%s] -> error [%s]", c.LoadBalancers[x].Name, c.LoadBalancers[x].Type, err) - - // Stop all load balancers associated with the VIP - err = VipLB.StopAll() - if err != nil { - log.Warnf("%v", err) - } - - err = cluster.Network.DeleteIP() - if err != nil { - log.Warnf("%v", err) - } - } - } - } - } - - if c.GratuitousARP == true { - ctxArp, cancelArp = context.WithCancel(context.Background()) - - go func(ctx context.Context) { - for { - select { - case <-ctx.Done(): // if cancel() execute - return - default: - // Gratuitous ARP, will broadcast to new MAC <-> IP - err = vip.ARPSendGratuitous(cluster.Network.IP(), c.Interface) - if err != nil { - log.Warnf("%v", err) - } - } - time.Sleep(3 * time.Second) - } - }(ctxArp) - } - }, - OnStoppedLeading: func() { - // we can do cleanup here - log.Info("This node is becoming a follower within the cluster") - - // Stop the Arp context if it is running - cancelArp() - - // Stop the BGP server - if bgpServer != nil { - err = bgpServer.Close() - if err != nil { - log.Warnf("%v", err) - } - } - - // Stop all load balancers associated with the VIP - err = VipLB.StopAll() - if err != nil { - log.Warnf("%v", err) - } - - err = cluster.Network.DeleteIP() - if err != nil { - log.Warnf("%v", err) - } - }, - OnNewLeader: func(identity string) { - // we're notified when new leader elected - log.Infof("Node [%s] is assuming leadership of the cluster", identity) - - if identity == id { - // We have the lock - } - }, - }, - }) - - //<-signalChan - log.Infof("Shutting down Kube-Vip Leader Election cluster") - - // Force a removal of the VIP (ignore the error if we don't have it) - cluster.Network.DeleteIP() - - return nil -} diff --git a/pkg/cluster/clusterLeaderElection.go b/pkg/cluster/clusterLeaderElection.go new file mode 100644 index 00000000..1eebb02c --- /dev/null +++ b/pkg/cluster/clusterLeaderElection.go @@ -0,0 +1,375 @@ +package cluster + +import ( + "context" + "fmt" + "os" + "os/signal" + "path/filepath" + "syscall" + "time" + + "github.com/davecgh/go-spew/spew" + "github.com/kube-vip/kube-vip/pkg/bgp" + "github.com/kube-vip/kube-vip/pkg/equinixmetal" + "github.com/kube-vip/kube-vip/pkg/etcd" + "github.com/kube-vip/kube-vip/pkg/k8s" + "github.com/kube-vip/kube-vip/pkg/kubevip" + "github.com/kube-vip/kube-vip/pkg/loadbalancer" + + "github.com/packethost/packngo" + + log "github.com/sirupsen/logrus" + clientv3 "go.etcd.io/etcd/client/v3" + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/leaderelection" + "k8s.io/client-go/tools/leaderelection/resourcelock" + watchtools "k8s.io/client-go/tools/watch" +) + +// Manager degines the manager of the load-balancing services +type Manager struct { + KubernetesClient *kubernetes.Clientset + // This channel is used to signal a shutdown + SignalChan chan os.Signal + + EtcdClient *clientv3.Client +} + +// NewManager will create a new managing object +func NewManager(path string, inCluster bool, port int) (*Manager, error) { + var hostname string + + // If inCluster is set then it will likely have started as a static pod or won't have the + // VIP up before trying to connect to the API server, we set the API endpoint to this machine to + // ensure connectivity. Else if the path passed is empty and not running in the cluster, + // attempt to look for a kubeconfig in the default HOME dir. + + hostname = fmt.Sprintf("kubernetes:%v", port) + + if len(path) == 0 && !inCluster { + path = filepath.Join(os.Getenv("HOME"), ".kube", "config") + + // We modify the config so that we can always speak to the correct host + id, err := os.Hostname() + if err != nil { + return nil, err + } + + hostname = fmt.Sprintf("%s:%v", id, port) + } + + clientset, err := k8s.NewClientset(path, inCluster, hostname) + if err != nil { + return nil, fmt.Errorf("error creating a new k8s clientset: %v", err) + } + + return &Manager{ + KubernetesClient: clientset, + }, nil +} + +// StartCluster - Begins a running instance of the Leader Election cluster +func (cluster *Cluster) StartCluster(c *kubevip.Config, sm *Manager, bgpServer *bgp.Server) error { + id, err := os.Hostname() + if err != nil { + return err + } + + log.Infof("Beginning cluster membership, namespace [%s], lock name [%s], id [%s]", c.Namespace, c.LeaseName, id) + + // use a Go context so we can tell the leaderelection code when we + // want to step down + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // use a Go context so we can tell the arp loop code when we + // want to step down + ctxArp, cancelArp := context.WithCancel(context.Background()) + defer cancelArp() + + // use a Go context so we can tell the dns loop code when we + // want to step down + ctxDNS, cancelDNS := context.WithCancel(context.Background()) + defer cancelDNS() + + // listen for interrupts or the Linux SIGTERM signal and cancel + // our context, which the leader election code will observe and + // step down + signalChan := make(chan os.Signal, 1) + // Add Notification for Userland interrupt + signal.Notify(signalChan, syscall.SIGINT) + + // Add Notification for SIGTERM (sent from Kubernetes) + signal.Notify(signalChan, syscall.SIGTERM) + + go func() { + <-signalChan + log.Info("Received termination, signaling cluster shutdown") + // Cancel the context, which will in turn cancel the leadership + cancel() + // Cancel the arp context, which will in turn stop any broadcasts + }() + + // (attempt to) Remove the virtual IP, in case it already exists + + for i := range cluster.Network { + err = cluster.Network[i].DeleteIP() + if err != nil { + log.Errorf("could not delete virtualIP: %v", err) + } + } + + // Defer a function to check if the bgpServer has been created and if so attempt to close it + defer func() { + if bgpServer != nil { + bgpServer.Close() + } + }() + + // If Equinix Metal is enabled then we can begin our preparation work + var packetClient *packngo.Client + if c.EnableMetal { + if c.ProviderConfig != "" { + key, project, err := equinixmetal.GetPacketConfig(c.ProviderConfig) + if err != nil { + log.Error(err) + } else { + // Set the environment variable with the key for the project + os.Setenv("PACKET_AUTH_TOKEN", key) + // Update the configuration with the project key + c.MetalProjectID = project + } + } + packetClient, err = packngo.NewClient() + if err != nil { + log.Error(err) + } + + // We're using Equinix Metal with BGP, populate the Peer information from the API + if c.EnableBGP { + log.Infoln("Looking up the BGP configuration from Equinix Metal") + err = equinixmetal.BGPLookup(packetClient, c) + if err != nil { + log.Error(err) + } + } + } + + if c.EnableBGP && bgpServer == nil { + // Lets start BGP + log.Info("Starting the BGP server to advertise VIP routes to VGP peers") + bgpServer, err = bgp.NewBGPServer(&c.BGPConfig, nil) + if err != nil { + log.Error(err) + } + } + + run := &runConfig{ + config: c, + leaseID: id, + sm: sm, + onStartedLeading: func(ctx context.Context) { + // As we're leading lets start the vip service + err := cluster.vipService(ctxArp, ctxDNS, c, sm, bgpServer, packetClient) + if err != nil { + log.Errorf("Error starting the VIP service on the leader [%s]", err) + } + }, + onStoppedLeading: func() { + // we can do cleanup here + log.Info("This node is becoming a follower within the cluster") + + // Stop the dns context + cancelDNS() + // Stop the Arp context if it is running + cancelArp() + + // Stop the BGP server + if bgpServer != nil { + err := bgpServer.Close() + if err != nil { + log.Warnf("%v", err) + } + } + + for i := range cluster.Network { + err := cluster.Network[i].DeleteIP() + if err != nil { + log.Warnf("%v", err) + } + } + + log.Fatal("lost leadership, restarting kube-vip") + }, + onNewLeader: func(identity string) { + // we're notified when new leader elected + log.Infof("Node [%s] is assuming leadership of the cluster", identity) + }, + } + + switch c.LeaderElectionType { + case "kubernetes", "": + cluster.runKubernetesLeaderElectionOrDie(ctx, run) + case "etcd": + cluster.runEtcdLeaderElectionOrDie(ctx, run) + default: + log.Info(fmt.Sprintf("LeaderElectionMode %s not supported, exiting", c.LeaderElectionType)) + } + + return nil +} + +type runConfig struct { + config *kubevip.Config + leaseID string + sm *Manager + + // onStartedLeading is called when this member starts leading. + onStartedLeading func(context.Context) + // onStoppedLeading is called when this member stops leading. + onStoppedLeading func() + // onNewLeader is called when the client observes a leader that is + // not the previously observed leader. This includes the first observed + // leader when the client starts. + onNewLeader func(identity string) +} + +func (cluster *Cluster) runKubernetesLeaderElectionOrDie(ctx context.Context, run *runConfig) { + // we use the Lease lock type since edits to Leases are less common + // and fewer objects in the cluster watch "all Leases". + lock := &resourcelock.LeaseLock{ + LeaseMeta: metav1.ObjectMeta{ + Name: run.config.LeaseName, + Namespace: run.config.Namespace, + Annotations: run.config.LeaseAnnotations, + }, + Client: run.sm.KubernetesClient.CoordinationV1(), + LockConfig: resourcelock.ResourceLockConfig{ + Identity: run.leaseID, + }, + } + + // start the leader election code loop + leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{ + Lock: lock, + // IMPORTANT: you MUST ensure that any code you have that + // is protected by the lease must terminate **before** + // you call cancel. Otherwise, you could have a background + // loop still running and another process could + // get elected before your background loop finished, violating + // the stated goal of the lease. + ReleaseOnCancel: true, + LeaseDuration: time.Duration(run.config.LeaseDuration) * time.Second, + RenewDeadline: time.Duration(run.config.RenewDeadline) * time.Second, + RetryPeriod: time.Duration(run.config.RetryPeriod) * time.Second, + Callbacks: leaderelection.LeaderCallbacks{ + OnStartedLeading: run.onStartedLeading, + OnStoppedLeading: run.onStoppedLeading, + OnNewLeader: run.onNewLeader, + }, + }) +} + +func (cluster *Cluster) runEtcdLeaderElectionOrDie(ctx context.Context, run *runConfig) { + etcd.RunElectionOrDie(ctx, &etcd.LeaderElectionConfig{ + EtcdConfig: etcd.ClientConfig{Client: run.sm.EtcdClient}, + Name: run.config.LeaseName, + MemberID: run.leaseID, + LeaseDurationSeconds: int64(run.config.LeaseDuration), + Callbacks: etcd.LeaderCallbacks{ + OnStartedLeading: run.onStartedLeading, + OnStoppedLeading: run.onStoppedLeading, + OnNewLeader: run.onNewLeader, + }, + }) +} + +func (sm *Manager) NodeWatcher(lb *loadbalancer.IPVSLoadBalancer, port int) error { + // Use a restartable watcher, as this should help in the event of etcd or timeout issues + log.Infof("Kube-Vip is watching nodes for control-plane labels") + + listOptions := metav1.ListOptions{ + LabelSelector: "node-role.kubernetes.io/control-plane", + } + + rw, err := watchtools.NewRetryWatcher("1", &cache.ListWatch{ + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return sm.KubernetesClient.CoreV1().Nodes().Watch(context.Background(), listOptions) + }, + }) + if err != nil { + return fmt.Errorf("error creating label watcher: %s", err.Error()) + } + + go func() { + <-sm.SignalChan + log.Info("Received termination, signaling shutdown") + // Cancel the context + rw.Stop() + }() + + ch := rw.ResultChan() + // defer rw.Stop() + + for event := range ch { + // We need to inspect the event and get ResourceVersion out of it + switch event.Type { + case watch.Added, watch.Modified: + node, ok := event.Object.(*v1.Node) + if !ok { + return fmt.Errorf("unable to parse Kubernetes Node from Annotation watcher") + } + // Find the node IP address (this isn't foolproof) + for x := range node.Status.Addresses { + if node.Status.Addresses[x].Type == v1.NodeInternalIP { + err = lb.AddBackend(node.Status.Addresses[x].Address, port) + if err != nil { + log.Errorf("add IPVS backend [%v]", err) + } + } + } + case watch.Deleted: + node, ok := event.Object.(*v1.Node) + if !ok { + return fmt.Errorf("unable to parse Kubernetes Node from Annotation watcher") + } + + // Find the node IP address (this isn't foolproof) + for x := range node.Status.Addresses { + if node.Status.Addresses[x].Type == v1.NodeInternalIP { + err = lb.RemoveBackend(node.Status.Addresses[x].Address, port) + if err != nil { + log.Errorf("Del IPVS backend [%v]", err) + } + } + } + + log.Infof("Node [%s] has been deleted", node.Name) + + case watch.Bookmark: + // Un-used + case watch.Error: + log.Error("Error attempting to watch Kubernetes Nodes") + + // This round trip allows us to handle unstructured status + errObject := apierrors.FromObject(event.Object) + statusErr, ok := errObject.(*apierrors.StatusError) + if !ok { + log.Errorf(spew.Sprintf("Received an error which is not *metav1.Status but %#+v", event.Object)) + } + + status := statusErr.ErrStatus + log.Errorf("%v", status) + default: + } + } + + log.Infoln("Exiting Node watcher") + return nil +} diff --git a/pkg/cluster/clusterRaft.go b/pkg/cluster/clusterRaft.go deleted file mode 100644 index 0f0f1554..00000000 --- a/pkg/cluster/clusterRaft.go +++ /dev/null @@ -1,296 +0,0 @@ -package cluster - -import ( - "fmt" - "net" - "time" - - "github.com/hashicorp/raft" - "github.com/plunder-app/kube-vip/pkg/kubevip" - "github.com/plunder-app/kube-vip/pkg/loadbalancer" - "github.com/plunder-app/kube-vip/pkg/vip" - log "github.com/sirupsen/logrus" -) - -// StartRaftCluster - Begins a running instance of the Raft cluster -func (cluster *Cluster) StartRaftCluster(c *kubevip.Config) error { - - // Create local configuration address - localAddress := fmt.Sprintf("%s:%d", c.LocalPeer.Address, c.LocalPeer.Port) - - // Begin the Raft configuration - config := raft.DefaultConfig() - config.LocalID = raft.ServerID(c.LocalPeer.ID) - logger := log.StandardLogger().Writer() - config.LogOutput = logger - - // Initialize communication - address, err := net.ResolveTCPAddr("tcp", localAddress) - if err != nil { - return err - } - - // Create transport - transport, err := raft.NewTCPTransport(localAddress, address, 3, 10*time.Second, logger) - if err != nil { - return err - } - - // Create Raft structures - snapshots := raft.NewInmemSnapshotStore() - logStore := raft.NewInmemStore() - stableStore := raft.NewInmemStore() - - // Cluster configuration - configuration := raft.Configuration{} - - // Add Local Peer - configuration.Servers = append(configuration.Servers, raft.Server{ - ID: raft.ServerID(c.LocalPeer.ID), - Address: raft.ServerAddress(fmt.Sprintf("%s:%d", c.LocalPeer.Address, c.LocalPeer.Port))}) - - // If we want to start a node as leader then we will not add any remote peers, this will leave this as a cluster of one - // The remotePeers will add themselves to the cluster as they're added - if c.StartAsLeader != true { - for x := range c.RemotePeers { - // Make sure that we don't add in this server twice - if c.LocalPeer.Address != c.RemotePeers[x].Address { - - // Build the address from the peer configuration - peerAddress := fmt.Sprintf("%s:%d", c.RemotePeers[x].Address, c.RemotePeers[x].Port) - - // Set this peer into the raft configuration - configuration.Servers = append(configuration.Servers, raft.Server{ - ID: raft.ServerID(c.RemotePeers[x].ID), - Address: raft.ServerAddress(peerAddress)}) - } - } - log.Info("This node will attempt to start as Follower") - } else { - log.Info("This node will attempt to start as Leader") - } - - // Bootstrap cluster - if err := raft.BootstrapCluster(config, logStore, stableStore, snapshots, transport, configuration); err != nil { - return err - } - - // Create RAFT instance - raftServer, err := raft.NewRaft(config, cluster.stateMachine, logStore, stableStore, snapshots, transport) - if err != nil { - return err - } - - cluster.stop = make(chan bool, 1) - cluster.completed = make(chan bool, 1) - ticker := time.NewTicker(time.Second) - isLeader := c.StartAsLeader - - // (attempt to) Remove the virtual IP, incase it already exists - cluster.Network.DeleteIP() - - // leader log broadcast - this counter is used to stop flooding STDOUT with leader log entries - var leaderbroadcast int - // Managers for Vip load balancers and none-vip loadbalancers - nonVipLB := loadbalancer.LBManager{} - VipLB := loadbalancer.LBManager{} - - // Iterate through all Configurations - for x := range c.LoadBalancers { - // If the load balancer doesn't bind to the VIP - if c.LoadBalancers[x].BindToVip == false { - err = nonVipLB.Add("", &c.LoadBalancers[x]) - if err != nil { - log.Warnf("Error creating loadbalancer [%s] type [%s] -> error [%s]", c.LoadBalancers[x].Name, c.LoadBalancers[x].Type, err) - } - } - } - - // On a cold start the node will sleep for 5 seconds to ensure that leader elections are complete - log.Infoln("This instance will wait approximately 5 seconds, from cold start to ensure cluster elections are complete") - time.Sleep(time.Second * 5) - - go func() { - for { - if c.AddPeersAsBackends == true { - // Get addresses and change backends - - // c.LoadBalancers[0].Backends - // for x := range raftServer.GetConfiguration().Configuration().Servers { - // raftServer.GetConfiguration().Configuration().Servers[x].Address - // } - - } - // Broadcast the current leader on this node if it's the correct time (every leaderLogcount * time.Second) - if leaderbroadcast == leaderLogcount { - log.Infof("The Node [%s] is leading", raftServer.Leader()) - // Reset the timer - leaderbroadcast = 0 - - // ensure that if this node is the leader, it is set as the leader - if localAddress == string(raftServer.Leader()) { - // Re-broadcast arp to ensure network stays up to date - if c.GratuitousARP == true { - // Gratuitous ARP, will broadcast to new MAC <-> IP - err = vip.ARPSendGratuitous(cluster.Network.IP(), c.Interface) - if err != nil { - log.Warnf("%v", err) - } - } - if !isLeader { - log.Infoln("This node is leading, but isnt the leader (correcting)") - isLeader = true - } - } else { - // (attempt to) Remove the virtual IP, incase it already exists to keep nodes clean - cluster.Network.DeleteIP() - isLeader = false - } - - } - leaderbroadcast++ - - select { - case leader := <-raftServer.LeaderCh(): - log.Infoln("New Election event") - if leader { - isLeader = true - - log.Info("This node is assuming leadership of the cluster") - err = cluster.Network.AddIP() - if err != nil { - log.Warnf("%v", err) - } - - // Once we have the VIP running, start the load balancer(s) that bind to the VIP - - for x := range c.LoadBalancers { - - if c.LoadBalancers[x].BindToVip == true { - err = VipLB.Add(cluster.Network.IP(), &c.LoadBalancers[x]) - if err != nil { - log.Warnf("Error creating loadbalancer [%s] type [%s] -> error [%s]", c.LoadBalancers[x].Name, c.LoadBalancers[x].Type, err) - log.Errorf("Dropping Leadership to another node in the cluster") - raftServer.LeadershipTransfer() - - // Stop all load balancers associated with the VIP - err = VipLB.StopAll() - if err != nil { - log.Warnf("%v", err) - } - - err = cluster.Network.DeleteIP() - if err != nil { - log.Warnf("%v", err) - } - } - } - } - - if c.GratuitousARP == true { - // Gratuitous ARP, will broadcast to new MAC <-> IP - err = vip.ARPSendGratuitous(cluster.Network.IP(), c.Interface) - if err != nil { - log.Warnf("%v", err) - } - } - } else { - isLeader = false - - log.Info("This node is becoming a follower within the cluster") - - // Stop all load balancers associated with the VIP - err = VipLB.StopAll() - if err != nil { - log.Warnf("%v", err) - } - - err = cluster.Network.DeleteIP() - if err != nil { - log.Warnf("%v", err) - } - } - - case <-ticker.C: - - if isLeader { - - result, err := cluster.Network.IsSet() - if err != nil { - log.WithFields(log.Fields{"error": err, "ip": cluster.Network.IP(), "interface": cluster.Network.Interface()}).Error("Could not check ip") - } - - if result == false { - log.Error("This node is leader and is adopting the virtual IP") - - err = cluster.Network.AddIP() - if err != nil { - log.Warnf("%v", err) - } - // Once we have the VIP running, start the load balancer(s) that bind to the VIP - - for x := range c.LoadBalancers { - - if c.LoadBalancers[x].BindToVip == true { - err = VipLB.Add(cluster.Network.IP(), &c.LoadBalancers[x]) - if err != nil { - log.Warnf("Error creating loadbalancer [%s] type [%s] -> error [%s]", c.LoadBalancers[x].Name, c.LoadBalancers[x].Type, err) - } - } - } - if c.GratuitousARP == true { - // Gratuitous ARP, will broadcast to new MAC <-> IP - err = vip.ARPSendGratuitous(cluster.Network.IP(), c.Interface) - if err != nil { - log.Warnf("%v", err) - } - } - } - } - - case <-cluster.stop: - log.Info("[RAFT] Stopping this node") - log.Info("[LOADBALANCER] Stopping load balancers") - - // Stop all load balancers associated with the VIP - err = VipLB.StopAll() - if err != nil { - log.Warnf("%v", err) - } - - // Stop all load balancers associated with the Host - err = nonVipLB.StopAll() - if err != nil { - log.Warnf("%v", err) - } - - if isLeader { - log.Info("[VIP] Releasing the Virtual IP") - err = cluster.Network.DeleteIP() - if err != nil { - log.Warnf("%v", err) - } - } - - close(cluster.completed) - - return - } - } - }() - - log.Info("Started") - - return nil -} - -// Stop - Will stop the Cluster and release VIP if needed -func (cluster *Cluster) Stop() { - // Close the stop chanel, which will shut down the VIP (if needed) - close(cluster.stop) - - // Wait until the completed channel is closed, signallign all shutdown tasks completed - <-cluster.completed - - log.Info("Stopped") -} diff --git a/pkg/cluster/service.go b/pkg/cluster/service.go new file mode 100644 index 00000000..445ee779 --- /dev/null +++ b/pkg/cluster/service.go @@ -0,0 +1,295 @@ +package cluster + +import ( + "context" + "fmt" + "os" + "os/signal" + "syscall" + "time" + + "github.com/kube-vip/kube-vip/pkg/bgp" + "github.com/kube-vip/kube-vip/pkg/equinixmetal" + "github.com/kube-vip/kube-vip/pkg/kubevip" + "github.com/kube-vip/kube-vip/pkg/loadbalancer" + "github.com/kube-vip/kube-vip/pkg/vip" + "github.com/packethost/packngo" + log "github.com/sirupsen/logrus" +) + +func (cluster *Cluster) vipService(ctxArp, ctxDNS context.Context, c *kubevip.Config, sm *Manager, bgpServer *bgp.Server, packetClient *packngo.Client) error { + id, err := os.Hostname() + if err != nil { + return err + } + + // listen for interrupts or the Linux SIGTERM signal and cancel + // our context, which the leader election code will observe and + // step down + signalChan := make(chan os.Signal, 1) + // Add Notification for Userland interrupt + signal.Notify(signalChan, syscall.SIGINT) + + // Add Notification for SIGTERM (sent from Kubernetes) + signal.Notify(signalChan, syscall.SIGTERM) + + for i := range cluster.Network { + + if cluster.Network[i].IsDDNS() { + if err := cluster.StartDDNS(ctxDNS); err != nil { + log.Error(err) + } + } + + // start the dns updater if address is dns + if cluster.Network[i].IsDNS() { + log.Infof("starting the DNS updater for the address %s", cluster.Network[i].DNSName()) + ipUpdater := vip.NewIPUpdater(cluster.Network[i]) + ipUpdater.Run(ctxDNS) + } + + err = cluster.Network[i].AddIP() + if err != nil { + log.Fatalf("%v", err) + } + + if c.EnableMetal { + // We're not using Equinix Metal with BGP + if !c.EnableBGP { + // Attempt to attach the EIP in the standard manner + log.Debugf("Attaching the Equinix Metal EIP through the API to this host") + err = equinixmetal.AttachEIP(packetClient, c, id) + if err != nil { + log.Error(err) + } + } + } + + if c.EnableBGP { + // Lets advertise the VIP over BGP, the host needs to be passed using CIDR notation + cidrVip := fmt.Sprintf("%s/%s", cluster.Network[i].IP(), c.VIPCIDR) + log.Debugf("Attempting to advertise the address [%s] over BGP", cidrVip) + + err = bgpServer.AddHost(cidrVip) + if err != nil { + log.Error(err) + } + } + + if c.EnableLoadBalancer { + + log.Infof("Starting IPVS LoadBalancer") + + lb, err := loadbalancer.NewIPVSLB(cluster.Network[i].IP(), c.LoadBalancerPort, c.LoadBalancerForwardingMethod) + if err != nil { + log.Errorf("Error creating IPVS LoadBalancer [%s]", err) + } + + go func() { + err = sm.NodeWatcher(lb, c.Port) + if err != nil { + log.Errorf("Error watching node labels [%s]", err) + } + }() + // Shutdown function that will wait on this signal, unless we call it ourselves + go func() { + <-signalChan + err = lb.RemoveIPVSLB() + if err != nil { + log.Errorf("Error stopping IPVS LoadBalancer [%s]", err) + } + log.Info("Stopping IPVS LoadBalancer") + }() + } + + if c.EnableARP { + // ctxArp, cancelArp = context.WithCancel(context.Background()) + + go func(ctx context.Context) { + ipString := cluster.Network[i].IP() + isIPv6 := vip.IsIPv6(ipString) + + var ndp *vip.NdpResponder + if isIPv6 { + ndp, err = vip.NewNDPResponder(c.Interface) + if err != nil { + log.Fatalf("failed to create new NDP Responder") + } + } + + if ndp != nil { + defer ndp.Close() + } + log.Infof("Gratuitous Arp broadcast will repeat every 3 seconds for [%s]", ipString) + for { + select { + case <-ctx.Done(): // if cancel() execute + return + default: + cluster.ensureIPAndSendGratuitous(c.Interface, ndp) + } + time.Sleep(3 * time.Second) + } + }(ctxArp) + } + + if c.EnableRoutingTable { + err = cluster.Network[i].AddRoute() + if err != nil { + log.Warnf("%v", err) + } + } + } + + return nil +} + +// StartLoadBalancerService will start a VIP instance and leave it for kube-proxy to handle +func (cluster *Cluster) StartLoadBalancerService(c *kubevip.Config, bgp *bgp.Server) { + // use a Go context so we can tell the arp loop code when we + // want to step down + //nolint + ctxArp, cancelArp := context.WithCancel(context.Background()) + + cluster.stop = make(chan bool, 1) + cluster.completed = make(chan bool, 1) + + for i := range cluster.Network { + network := cluster.Network[i] + + err := network.DeleteIP() + if err != nil { + log.Warnf("Attempted to clean existing VIP => %v", err) + } + if c.EnableRoutingTable && (c.EnableLeaderElection || c.EnableServicesElection) { + err = network.AddRoute() + if err != nil { + log.Warnf("%v", err) + } + } else if !c.EnableRoutingTable { + err = network.AddIP() + if err != nil { + log.Warnf("%v", err) + } + } + + if c.EnableARP { + // ctxArp, cancelArp = context.WithCancel(context.Background()) + + ipString := network.IP() + + var ndp *vip.NdpResponder + if vip.IsIPv6(ipString) { + ndp, err = vip.NewNDPResponder(c.Interface) + if err != nil { + log.Fatalf("failed to create new NDP Responder") + } + } + go func(ctx context.Context) { + if ndp != nil { + defer ndp.Close() + } + log.Debugf("(svcs) broadcasting ARP update for %s via %s, every %dms", ipString, c.Interface, c.ArpBroadcastRate) + + for { + select { + case <-ctx.Done(): // if cancel() execute + log.Debugf("(svcs) ending ARP update for %s via %s, every %dms", ipString, c.Interface, c.ArpBroadcastRate) + return + default: + cluster.ensureIPAndSendGratuitous(c.Interface, ndp) + } + if c.ArpBroadcastRate < 500 { + log.Errorf("arp broadcast rate is [%d], this shouldn't be lower that 300ms (defaulting to 3000)", c.ArpBroadcastRate) + c.ArpBroadcastRate = 3000 + } + time.Sleep(time.Duration(c.ArpBroadcastRate) * time.Millisecond) + } + }(ctxArp) + } + + if c.EnableBGP && (c.EnableLeaderElection || c.EnableServicesElection) { + // Lets advertise the VIP over BGP, the host needs to be passed using CIDR notation + cidrVip := fmt.Sprintf("%s/%s", network.IP(), c.VIPCIDR) + log.Debugf("(svcs) attempting to advertise the address [%s] over BGP", cidrVip) + err = bgp.AddHost(cidrVip) + if err != nil { + log.Error(err) + } + } + } + + go func() { + <-cluster.stop + // Stop the Arp context if it is running + cancelArp() + + if c.EnableRoutingTable && (c.EnableLeaderElection || c.EnableServicesElection) { + for i := range cluster.Network { + if err := cluster.Network[i].DeleteRoute(); err != nil { + log.Warnf("%v", err) + } + } + + close(cluster.completed) + return + } + + log.Info("[LOADBALANCER] Stopping load balancers") + + for i := range cluster.Network { + log.Infof("[VIP] Releasing the Virtual IP [%s]", cluster.Network[i].IP()) + if err := cluster.Network[i].DeleteIP(); err != nil { + log.Warnf("%v", err) + } + } + + close(cluster.completed) + }() +} + +// ensureIPAndSendGratuitous - adds IP to the interface if missing, and send +// either a gratuitous ARP or gratuitous NDP. Re-adds the interface if it is IPv6 +// and in a dadfailed state. +func (cluster *Cluster) ensureIPAndSendGratuitous(iface string, ndp *vip.NdpResponder) { + for i := range cluster.Network { + ipString := cluster.Network[i].IP() + isIPv6 := vip.IsIPv6(ipString) + // Check if IP is dadfailed + if cluster.Network[i].IsDADFAIL() { + log.Warnf("IP address is in dadfailed state, removing [%s] from interface [%s]", ipString, iface) + err := cluster.Network[i].DeleteIP() + if err != nil { + log.Warnf("%v", err) + } + } + + // Ensure the address exists on the interface before attempting to ARP + set, err := cluster.Network[i].IsSet() + if err != nil { + log.Warnf("%v", err) + } + if !set { + log.Warnf("Re-applying the VIP configuration [%s] to the interface [%s]", ipString, iface) + err = cluster.Network[i].AddIP() + if err != nil { + log.Warnf("%v", err) + } + } + + if isIPv6 { + // Gratuitous NDP, will broadcast new MAC <-> IPv6 address + err := ndp.SendGratuitous(ipString) + if err != nil { + log.Warnf("%v", err) + } + } else { + // Gratuitous ARP, will broadcast to new MAC <-> IPv4 address + err := vip.ARPSendGratuitous(ipString, iface) + if err != nil { + log.Warnf("%v", err) + } + } + } + +} diff --git a/pkg/cluster/singleNode.go b/pkg/cluster/singleNode.go index e24328e4..da983a8f 100644 --- a/pkg/cluster/singleNode.go +++ b/pkg/cluster/singleNode.go @@ -1,18 +1,21 @@ package cluster import ( + "context" + + "github.com/packethost/packngo" log "github.com/sirupsen/logrus" - "github.com/plunder-app/kube-vip/pkg/kubevip" - "github.com/plunder-app/kube-vip/pkg/loadbalancer" - "github.com/plunder-app/kube-vip/pkg/vip" + "github.com/kube-vip/kube-vip/pkg/bgp" + "github.com/kube-vip/kube-vip/pkg/kubevip" + "github.com/kube-vip/kube-vip/pkg/vip" ) // StartSingleNode will start a single node cluster func (cluster *Cluster) StartSingleNode(c *kubevip.Config, disableVIP bool) error { // Start kube-vip as a single node server - // TODO - Split all this code out as a seperate function + // TODO - Split all this code out as a separate function log.Infoln("Starting kube-vip as a single node cluster") log.Info("This node is assuming leadership of the cluster") @@ -20,84 +23,57 @@ func (cluster *Cluster) StartSingleNode(c *kubevip.Config, disableVIP bool) erro cluster.stop = make(chan bool, 1) cluster.completed = make(chan bool, 1) - // Managers for Vip load balancers and none-vip loadbalancers - nonVipLB := loadbalancer.LBManager{} - VipLB := loadbalancer.LBManager{} - - // Iterate through all Configurations - for x := range c.LoadBalancers { - // If the load balancer doesn't bind to the VIP - if c.LoadBalancers[x].BindToVip == false { - err := nonVipLB.Add("", &c.LoadBalancers[x]) + for i := range cluster.Network { + if !disableVIP { + err := cluster.Network[i].DeleteIP() if err != nil { - log.Warnf("Error creating loadbalancer [%s] type [%s] -> error [%s]", c.LoadBalancers[x].Name, c.LoadBalancers[x].Type, err) + log.Warnf("Attempted to clean existing VIP => %v", err) } - } - } - - if !disableVIP { - err := cluster.Network.DeleteIP() - if err != nil { - log.Warnf("Attempted to clean existing VIP => %v", err) - } + err = cluster.Network[i].AddIP() + if err != nil { + log.Warnf("%v", err) + } - err = cluster.Network.AddIP() - if err != nil { - log.Warnf("%v", err) } - // Once we have the VIP running, start the load balancer(s) that bind to the VIP - for x := range c.LoadBalancers { - - if c.LoadBalancers[x].BindToVip == true { - err = VipLB.Add(cluster.Network.IP(), &c.LoadBalancers[x]) - if err != nil { - log.Warnf("Error creating loadbalancer [%s] type [%s] -> error [%s]", c.LoadBalancers[x].Name, c.LoadBalancers[x].Type, err) - } + if c.EnableARP { + // Gratuitous ARP, will broadcast to new MAC <-> IP + err := vip.ARPSendGratuitous(cluster.Network[i].IP(), c.Interface) + if err != nil { + log.Warnf("%v", err) } } } - if c.GratuitousARP == true { - // Gratuitous ARP, will broadcast to new MAC <-> IP - err := vip.ARPSendGratuitous(cluster.Network.IP(), c.Interface) - if err != nil { - log.Warnf("%v", err) - } - } - go func() { - for { - select { - case <-cluster.stop: - log.Info("[LOADBALANCER] Stopping load balancers") + <-cluster.stop - // Stop all load balancers associated with the VIP - err := VipLB.StopAll() + if !disableVIP { + for i := range cluster.Network { + log.Infof("[VIP] Releasing the Virtual IP [%s]", cluster.Network[i].IP()) + err := cluster.Network[i].DeleteIP() if err != nil { log.Warnf("%v", err) } - - // Stop all load balancers associated with the Host - err = nonVipLB.StopAll() - if err != nil { - log.Warnf("%v", err) - } - - if !disableVIP { - - log.Info("[VIP] Releasing the Virtual IP") - err = cluster.Network.DeleteIP() - if err != nil { - log.Warnf("%v", err) - } - } - close(cluster.completed) - return } } + close(cluster.completed) }() log.Infoln("Started Load Balancer and Virtual IP") return nil } + +func (cluster *Cluster) StartVipService(c *kubevip.Config, sm *Manager, bgp *bgp.Server, packetClient *packngo.Client) error { + // use a Go context so we can tell the arp loop code when we + // want to step down + ctxArp, cancelArp := context.WithCancel(context.Background()) + defer cancelArp() + + // use a Go context so we can tell the dns loop code when we + // want to step down + ctxDNS, cancelDNS := context.WithCancel(context.Background()) + defer cancelDNS() + + return cluster.vipService(ctxArp, ctxDNS, c, sm, bgp, packetClient) +} diff --git a/pkg/cluster/state.go b/pkg/cluster/state.go deleted file mode 100644 index e212fbe4..00000000 --- a/pkg/cluster/state.go +++ /dev/null @@ -1,39 +0,0 @@ -package cluster - -import ( - "io" - - "github.com/hashicorp/raft" -) - -// FSM - Finite State Machine for Raft -type FSM struct { -} - -// Apply - TODO -func (fsm FSM) Apply(log *raft.Log) interface{} { - return nil -} - -// Restore - TODO -func (fsm FSM) Restore(snap io.ReadCloser) error { - return nil -} - -// Snapshot - TODO, returns an empty snapshot -func (fsm FSM) Snapshot() (raft.FSMSnapshot, error) { - return Snapshot{}, nil -} - -// Snapshot - -type Snapshot struct { -} - -// Persist - -func (snapshot Snapshot) Persist(sink raft.SnapshotSink) error { - return nil -} - -// Release - -func (snapshot Snapshot) Release() { -} diff --git a/pkg/detector/interfaces.go b/pkg/detector/interfaces.go index d04e112a..00b29d69 100644 --- a/pkg/detector/interfaces.go +++ b/pkg/detector/interfaces.go @@ -22,7 +22,7 @@ func FindIPAddress(addrName string) (string, string, error) { } for _, a := range addrs { if ipnet, ok := a.(*net.IPNet); ok && !ipnet.IP.IsLoopback() { - if ipnet.IP.To4() != nil { + if ipnet.IP.To16() != nil { address = ipnet.IP.String() // If we're not searching for a specific adapter return the first one if addrName == "" { diff --git a/pkg/packet/bgp.go b/pkg/equinixmetal/bgp.go similarity index 58% rename from pkg/packet/bgp.go rename to pkg/equinixmetal/bgp.go index a8cc5924..95b12a03 100644 --- a/pkg/packet/bgp.go +++ b/pkg/equinixmetal/bgp.go @@ -1,27 +1,31 @@ -package packet +package equinixmetal import ( "fmt" + "github.com/kube-vip/kube-vip/pkg/bgp" + "github.com/kube-vip/kube-vip/pkg/kubevip" "github.com/packethost/packngo" - "github.com/plunder-app/kube-vip/pkg/bgp" - "github.com/plunder-app/kube-vip/pkg/kubevip" log "github.com/sirupsen/logrus" ) -// BGPLookup will use the Packet API functions to populate the BGP information +// BGPLookup will use the Equinix Metal API functions to populate the BGP information func BGPLookup(c *packngo.Client, k *kubevip.Config) error { - - proj := findProject(k.PacketProject, c) - if proj == nil { - return fmt.Errorf("Unable to find Project [%s]", k.PacketProject) + var thisDevice *packngo.Device + if k.MetalProjectID == "" { + proj := findProject(k.MetalProject, c) + if proj == nil { + return fmt.Errorf("Unable to find Project [%s]", k.MetalProject) + } + thisDevice = findSelf(c, proj.ID) + } else { + thisDevice = findSelf(c, k.MetalProjectID) } - thisDevice := findSelf(c, proj.ID) if thisDevice == nil { - return fmt.Errorf("Unable to find local/this device in packet API") + return fmt.Errorf("Unable to find local/this device in Equinix Metal API") } - fmt.Printf("Querying BGP settings for [%s]", thisDevice.Hostname) + log.Infof("Querying BGP settings for [%s]", thisDevice.Hostname) neighbours, _, err := c.Devices.ListBGPNeighbors(thisDevice.ID, &packngo.ListOptions{}) if err != nil { return err @@ -47,8 +51,10 @@ func BGPLookup(c *packngo.Client, k *kubevip.Config) error { // Add the peer(s) for x := range neighbours[0].PeerIps { peer := bgp.Peer{ - Address: neighbours[0].PeerIps[x], - AS: uint32(neighbours[0].PeerAs), + Address: neighbours[0].PeerIps[x], + AS: uint32(neighbours[0].PeerAs), + MultiHop: neighbours[0].Multihop, + Password: neighbours[0].Md5Password, } k.BGPConfig.Peers = append(k.BGPConfig.Peers, peer) } diff --git a/pkg/equinixmetal/eip.go b/pkg/equinixmetal/eip.go new file mode 100644 index 00000000..1db4666a --- /dev/null +++ b/pkg/equinixmetal/eip.go @@ -0,0 +1,65 @@ +package equinixmetal + +import ( + "fmt" + "path" + + "github.com/kube-vip/kube-vip/pkg/kubevip" + "github.com/packethost/packngo" + log "github.com/sirupsen/logrus" +) + +// AttachEIP will use the Equinix Metal APIs to move an EIP and attach to a host +func AttachEIP(c *packngo.Client, k *kubevip.Config, _ string) error { + // Use MetalProjectID if it is defined + projID := k.MetalProjectID + + if projID == "" { + // Fallback to attempting to find the project by name + proj := findProject(k.MetalProject, c) + if proj == nil { + return fmt.Errorf("unable to find Project [%s]", k.MetalProject) + } + + projID = proj.ID + } + + // Prefer Address over VIP + vip := k.Address + if vip == "" { + vip = k.VIP + } + + ips, _, _ := c.ProjectIPs.List(projID, &packngo.ListOptions{}) + for _, ip := range ips { + // Find the device id for our EIP + if ip.Address == vip { + log.Infof("Found EIP ->%s ID -> %s\n", ip.Address, ip.ID) + // If attachments already exist then remove them + if len(ip.Assignments) != 0 { + hrefID := path.Base(ip.Assignments[0].Href) + _, err := c.DeviceIPs.Unassign(hrefID) + if err != nil { + return fmt.Errorf("unable to unassign deviceIP %q: %v", hrefID, err) + } + } + } + } + + // Lookup this server through the Equinix Metal API + thisDevice := findSelf(c, projID) + if thisDevice == nil { + return fmt.Errorf("unable to find local/this device in Equinix Metal API") + } + + // Assign the EIP to this device + log.Infof("Assigning EIP to -> %s\n", thisDevice.Hostname) + _, _, err := c.DeviceIPs.Assign(thisDevice.ID, &packngo.AddressStruct{ + Address: vip, + }) + if err != nil { + return err + } + + return nil +} diff --git a/pkg/equinixmetal/utils.go b/pkg/equinixmetal/utils.go new file mode 100644 index 00000000..6403fd24 --- /dev/null +++ b/pkg/equinixmetal/utils.go @@ -0,0 +1,58 @@ +package equinixmetal + +import ( + "encoding/json" + "fmt" + "os" + + "github.com/packethost/packngo" + log "github.com/sirupsen/logrus" +) + +func findProject(project string, c *packngo.Client) *packngo.Project { + l := &packngo.ListOptions{Includes: []string{project}} + ps, _, err := c.Projects.List(l) + if err != nil { + log.Error(err) + } + for _, p := range ps { + + // Find our project + if p.Name == project { + return &p + } + } + return nil +} + +func findSelf(c *packngo.Client, projectID string) *packngo.Device { + // Go through devices + dev, _, _ := c.Devices.List(projectID, &packngo.ListOptions{}) + for _, d := range dev { + me, _ := os.Hostname() + if me == d.Hostname { + return &d + } + } + return nil +} + +// GetPacketConfig will lookup the configuration from a file path +func GetPacketConfig(providerConfig string) (string, string, error) { + var config struct { + AuthToken string `json:"apiKey"` + ProjectID string `json:"projectId"` + } + // get our token and project + if providerConfig != "" { + configBytes, err := os.ReadFile(providerConfig) + if err != nil { + return "", "", fmt.Errorf("failed to get read configuration file at path %s: %v", providerConfig, err) + } + err = json.Unmarshal(configBytes, &config) + if err != nil { + return "", "", fmt.Errorf("failed to process json of configuration file at path %s: %v", providerConfig, err) + } + } + return config.AuthToken, config.ProjectID, nil +} diff --git a/pkg/etcd/client.go b/pkg/etcd/client.go new file mode 100644 index 00000000..bc0d7540 --- /dev/null +++ b/pkg/etcd/client.go @@ -0,0 +1,26 @@ +package etcd + +import ( + "go.etcd.io/etcd/client/pkg/v3/transport" + clientv3 "go.etcd.io/etcd/client/v3" + + "github.com/kube-vip/kube-vip/pkg/kubevip" +) + +func NewClient(c *kubevip.Config) (*clientv3.Client, error) { + tlsInfo := transport.TLSInfo{ + TrustedCAFile: c.Etcd.CAFile, + CertFile: c.Etcd.ClientCertFile, + KeyFile: c.Etcd.ClientKeyFile, + } + + clientTLS, err := tlsInfo.ClientConfig() + if err != nil { + return nil, err + } + + return clientv3.New(clientv3.Config{ + Endpoints: c.Etcd.Endpoints, + TLS: clientTLS, + }) +} diff --git a/pkg/etcd/election.go b/pkg/etcd/election.go new file mode 100644 index 00000000..a9b5d425 --- /dev/null +++ b/pkg/etcd/election.go @@ -0,0 +1,226 @@ +package etcd + +import ( + "context" + "hash/fnv" + "time" + + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + pb "go.etcd.io/etcd/api/v3/etcdserverpb" + clientv3 "go.etcd.io/etcd/client/v3" + "go.etcd.io/etcd/client/v3/concurrency" +) + +// LeaderElectionConfig allows to configure the leader election params. +type LeaderElectionConfig struct { + // EtcdConfig contains the client to connect to the etcd cluster. + EtcdConfig ClientConfig + + // Name uniquely identifies this leader election. All members of the same election + // should use the same value here. + Name string + + // MemberID identifies uniquely this contestant from other in the leader election. + // It will be converted to an int64 using a hash, so theoretically collisions are possible + // when using a string. If you want to guarantee safety, us MemberUniqueID to specify a unique + // int64 directly. + // If two processes start a leader election using the same MemberID, one of them will + // fail. + MemberID string + + // MemberUniqueID is the int equivalent to MemberID that allows to override the default conversion + // from string to int using hashing. + MemberUniqueID *int64 + + // LeaseDurationSeconds is the duration that non-leader candidates will + // wait to force acquire leadership. + // This is just a request to the etcd server but it's not guaranteed, the server + // might decide to make the duration longer. + LeaseDurationSeconds int64 + + // Callbacks are callbacks that are triggered during certain lifecycle + // events of the LeaderElector + Callbacks LeaderCallbacks +} + +// LeaderCallbacks are callbacks that are triggered during certain +// lifecycle events of the election. +type LeaderCallbacks struct { + // OnStartedLeading is called when this member starts leading. + OnStartedLeading func(context.Context) + // OnStoppedLeading is called when this member stops leading. + OnStoppedLeading func() + // OnNewLeader is called when the client observes a leader that is + // not the previously observed leader. This includes the first observed + // leader when the client starts. + OnNewLeader func(identity string) +} + +// ClientConfig contains the client to connect to the etcd cluster. +type ClientConfig struct { + Client *clientv3.Client +} + +// RunElectionOrDie behaves the same way as RunElection but panics if there is an error. +func RunElectionOrDie(ctx context.Context, config *LeaderElectionConfig) { + if err := RunElection(ctx, config); err != nil { + panic(err) + } +} + +// RunElection starts a client with the provided config or panics. +// RunElection blocks until leader election loop is +// stopped by ctx or it has stopped holding the leader lease. +func RunElection(ctx context.Context, config *LeaderElectionConfig) error { + var memberID int64 + if config.MemberUniqueID != nil { + memberID = *config.MemberUniqueID + } else { + h := fnv.New64a() + if _, err := h.Write(append([]byte(config.Name), []byte(config.MemberID)...)); err != nil { + return err + } + memberID = int64(h.Sum64()) + } + + ttl := config.LeaseDurationSeconds + r := &pb.LeaseGrantRequest{TTL: ttl, ID: memberID} + lease, err := clientv3.RetryLeaseClient( + config.EtcdConfig.Client, + ).LeaseGrant(ctx, r) + if err != nil { + return errors.Wrap(err, "creating lease") + } + + leaseID := clientv3.LeaseID(lease.ID) + + s, err := concurrency.NewSession( + config.EtcdConfig.Client, + concurrency.WithTTL(int(lease.TTL)), + concurrency.WithLease(leaseID), + ) + if err != nil { + return err + } + + election := concurrency.NewElection(s, config.Name) + + m := &member{ + client: config.EtcdConfig.Client, + election: election, + callbacks: config.Callbacks, + memberID: config.MemberID, + weAreTheLeader: make(chan struct{}, 1), + leaseTTL: lease.TTL, + } + + go m.tryToBeLeader(ctx) + m.watchLeaderChanges(ctx) + + return nil +} + +type member struct { + key string + client *clientv3.Client + election *concurrency.Election + isLeader bool + currentLeaderKey string + callbacks LeaderCallbacks + memberID string + weAreTheLeader chan struct{} + leaseTTL int64 +} + +func (m *member) watchLeaderChanges(ctx context.Context) { + observeCtx, observeCancel := context.WithCancel(ctx) + defer observeCancel() + changes := m.election.Observe(observeCtx) + +watcher: + for { + select { + case <-ctx.Done(): + break watcher + case <-m.weAreTheLeader: + + m.isLeader = true + m.key = m.election.Key() // by this time, this should already be set, since Campaign has already returned + log.Debugf("[%s] Marking self as leader with key %s\n", m.memberID, m.key) + case response := <-changes: + log.Debugf("[%s] Leader Changes: %+v\n", m.memberID, response) + if len(response.Kvs) == 0 { + // There is a race condition where just after we stop being the leader + // if there are no more leaders, we might get a response with no key-values + // just before the response channel is closed or the context is cancel + // In that case, just continue and let one of those two things happen + continue + } + newLeaderKey := response.Kvs[0].Key + if m.isLeader && m.key != string(newLeaderKey) { + // We stopped being leaders + + // exit the loop, so we cancel the observe context so we stop watching + // for new leaders. That will close the channel and make this function exit, + // which also makes the routine to finish and RunElection returns + break watcher + } + + if m.currentLeaderKey != string(newLeaderKey) { + // we observed a leader, this could be us or someone else + m.currentLeaderKey = string(newLeaderKey) + m.callbacks.OnNewLeader(string(response.Kvs[0].Value)) + } + } + } + + // If we are here, either we have stopped being leaders or we lost the watcher + // Make sure we call OnStoppedLeading if we were the leader. + if m.isLeader { + m.callbacks.OnStoppedLeading() + } + + log.Debugf("[%s] Exiting watcher\n", m.memberID) +} + +func (m *member) tryToBeLeader(ctx context.Context) { + if err := m.election.Campaign(ctx, m.memberID); err != nil { + log.Errorf("Failed trying to become the leader: %s", err) + // Resign just in case we acquired leadership just before failing + if err := m.election.Resign(m.client.Ctx()); err != nil { + log.Warnf("Failed to resign after we failed becoming the leader, this might not be a problem if we were never the leader: %s", err) + } + return + // TODO: what to do here? + // We probably want watchLeaderChanges to exit as well, since Run + // is expecting us to try to become the leader, but if we are here, + // we won't. So if we don't panic, we need to signal it somehow + } + + // Inform the observer that we are the leader as soon as possible, + // so it can detect if we stop being it + m.weAreTheLeader <- struct{}{} + + // Once we are the leader, start the routine to resign if context is canceled + go m.resignOnCancel(ctx) + + // After becoming the leader, we wait for at least a lease TTL to wait for + // the previous leader to detect the new leadership (if there was one) and + // stop its processes + // TODO: is this too cautious? + log.Debugf("[%s] Waiting %d seconds before running OnStartedLeading", m.memberID, m.leaseTTL) + time.Sleep(time.Second * time.Duration(m.leaseTTL)) + + // We are the leader, execute our code + m.callbacks.OnStartedLeading(ctx) + + // Here the routine dies if OnStartedLeading doesn't block, there is nothing else to do +} + +func (m *member) resignOnCancel(ctx context.Context) { + <-ctx.Done() + if err := m.election.Resign(m.client.Ctx()); err != nil { + log.Errorf("Failed to resign after the context was canceled: %s", err) + } +} diff --git a/pkg/etcd/election_test.go b/pkg/etcd/election_test.go new file mode 100644 index 00000000..1ecf0e54 --- /dev/null +++ b/pkg/etcd/election_test.go @@ -0,0 +1,169 @@ +//go:build integration +// +build integration + +package etcd_test + +import ( + "context" + "log" + "math/rand" + "sync" + "testing" + "time" + + "github.com/kube-vip/kube-vip/pkg/etcd" + . "github.com/onsi/gomega" + clientv3 "go.etcd.io/etcd/client/v3" + "go.uber.org/zap" +) + +func TestRunElectionWithMemberIDCollision(t *testing.T) { + t.Parallel() + g := NewWithT(t) + ctx := context.Background() + cli := client(g) + defer cli.Close() + + electionName := randomElectionNameForTest("memberIDConflict") + log.Printf("Election name %s\n", electionName) + memberCtx, cancelMember1 := context.WithCancel(ctx) + config := &etcd.LeaderElectionConfig{ + EtcdConfig: etcd.ClientConfig{ + Client: cli, + }, + Name: electionName, + MemberID: "my-host", + LeaseDurationSeconds: 1, + Callbacks: etcd.LeaderCallbacks{ + OnStartedLeading: func(ctx context.Context) { + log.Println("I'm the leader!!!!") + log.Println("Renouncing as leader by canceling context") + cancelMember1() + }, + OnNewLeader: func(identity string) { + log.Printf("New leader: %s\n", identity) + }, + OnStoppedLeading: func() { + log.Println("I'm not the leader anymore") + }, + }, + } + + wg := &sync.WaitGroup{} + wg.Add(2) + + go func() { + defer wg.Done() + g.Expect(etcd.RunElection(memberCtx, config)).To(Succeed()) + }() + + go func() { + defer wg.Done() + time.Sleep(time.Millisecond * 50) // make sure the first one becomes leader + g.Expect(etcd.RunElection(ctx, config)).Should(MatchError(ContainSubstring("creating lease"))) + }() + + wg.Wait() +} + +func TestRunElectionWithTwoMembersAndReelection(t *testing.T) { + t.Parallel() + g := NewWithT(t) + ctx := context.Background() + cli := client(g) + defer cli.Close() + + cliMember1 := client(g) + defer cliMember1.Close() + + electionName := randomElectionNameForTest("steppingDown") + configBase := etcd.LeaderElectionConfig{ + EtcdConfig: etcd.ClientConfig{ + Client: cli, + }, + Name: electionName, + LeaseDurationSeconds: 1, + } + + member1Ctx, _ := context.WithCancel(ctx) + member2Ctx, cancelMember2 := context.WithCancel(ctx) + + config1 := configBase + config1.EtcdConfig.Client = cliMember1 + config1.MemberID = "my-host" + uniqueID := rand.Int63() + config1.MemberUniqueID = &uniqueID + config1.Callbacks = baseCallbacksForName(config1.MemberID) + config1.Callbacks.OnStartedLeading = func(_ context.Context) { + log.Println("I'm my-host, the new leader!!!!") + log.Println("Loosing the leadership on purpose by stopping renewing the lease") + g.Expect(cliMember1.Lease.Close()).To(Succeed()) + log.Println("Member1 leases closed") + } + + config2 := configBase + config2.MemberID = "my-other-host" + config2.Callbacks = baseCallbacksForName(config2.MemberID) + config2.Callbacks.OnStartedLeading = func(_ context.Context) { + log.Println("I'm my-other-host, the new leader!!!!") + log.Println("Renouncing as leader by canceling context") + cancelMember2() + } + + wg := &sync.WaitGroup{} + wg.Add(2) + + go func() { + defer wg.Done() + g.Expect(etcd.RunElection(member1Ctx, &config1)).To(Succeed()) + log.Println("Member1 routine done") + }() + + go func() { + defer wg.Done() + time.Sleep(time.Millisecond * 50) // Make sure member1 becomes leader + g.Expect(etcd.RunElection(member2Ctx, &config2)).To(Succeed()) + log.Println("Member2 routine done") + }() + + wg.Wait() +} + +func baseCallbacksForName(name string) etcd.LeaderCallbacks { + return etcd.LeaderCallbacks{ + OnStartedLeading: func(ctx context.Context) { + log.Printf("[%s] I'm the new leader!!!!\n", name) + }, + OnNewLeader: func(identity string) { + log.Printf("[%s] New leader: %s\n", name, identity) + }, + OnStoppedLeading: func() { + log.Printf("[%s] I'm not the leader anymore\n", name) + }, + } +} + +func randomElectionNameForTest(name string) string { + return name + "-" + randomString(6) +} + +const charSet = "0123456789abcdefghijklmnopqrstuvwxyz" + +var rnd = rand.New(rand.NewSource(time.Now().UnixNano())) + +func randomString(n int) string { + result := make([]byte, n) + for i := range result { + result[i] = charSet[rnd.Intn(len(charSet))] + } + return string(result) +} + +func client(g Gomega) *clientv3.Client { + c, err := clientv3.New(clientv3.Config{ + Endpoints: []string{"localhost:2379"}, + Logger: zap.NewNop(), + }) + g.Expect(err).NotTo(HaveOccurred()) + return c +} diff --git a/pkg/etcd/etcd_suite_test.go b/pkg/etcd/etcd_suite_test.go new file mode 100644 index 00000000..0842d845 --- /dev/null +++ b/pkg/etcd/etcd_suite_test.go @@ -0,0 +1,153 @@ +//go:build integration +// +build integration + +package etcd_test + +import ( + "context" + "fmt" + "log" + "os" + "os/exec" + "path/filepath" + "strconv" + "testing" + "time" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const ( + etcdVersion = "v3.5.9" + etcdBinDir = "bin" + etcdBinPath = etcdBinDir + "/etcd" + downloadURL = "https://storage.googleapis.com/etcd" + tmpDownloadFile = "etcd.tar.gz" + pidFile = "etcd.pid" +) + +func TestMain(m *testing.M) { + logrus.SetLevel(logrus.DebugLevel) + ctx := context.Background() + expectSuccess(startEtcd(ctx), "starting etcd") + + os.Exit(runTestsWithCleanup(m, func() { + expectSuccess(stopEtcd(), "stopping etcd") + })) +} + +func runTestsWithCleanup(m *testing.M, cleanup func()) int { + defer cleanup() + return m.Run() +} + +func expectSuccess(err error, msg string) { + if err != nil { + log.Fatalf("%s: %s\n", msg, err) + } +} + +func startEtcd(ctx context.Context) error { + if _, err := os.Stat(pidFile); err == nil { + log.Println("Etcd already running, reusing") + return nil + } + + etcdPath, err := installEtcd(ctx) + if err != nil { + errors.Wrap(err, "installing etcd for tests") + } + + etcdCmd := exec.Command(etcdPath, "--data-dir", "./etcd-data") + if os.Getenv("ETCD_SERVER_LOGS") == "true" { + log.Println("Enabling etcd server logs") + etcdCmd.Stdout = os.Stdout + etcdCmd.Stderr = os.Stderr + } + log.Println("Starting etcd") + if err := etcdCmd.Start(); err != nil { + errors.Wrap(err, "starting etcd for tests") + } + + if err := os.WriteFile(pidFile, []byte(strconv.Itoa(etcdCmd.Process.Pid)), 0o600); err != nil { + return err + } + + log.Println("Waiting for etcd to be up") + time.Sleep(time.Second) + + return nil +} + +func installEtcd(ctx context.Context) (string, error) { + projectRoot, err := filepath.Abs("../../") + if err != nil { + return "", err + } + binDir := filepath.Join(projectRoot, etcdBinDir) + etcdPath := filepath.Join(projectRoot, etcdBinPath) + + if _, err := os.Stat(etcdPath); err == nil { + log.Println("Etcd already installed, skipping") + return etcdPath, nil + } + + if err := os.MkdirAll(binDir, 0o755); err != nil { + return "", err + } + + download := fmt.Sprintf("%s/%s/etcd-%s-linux-amd64.tar.gz", downloadURL, etcdVersion, etcdVersion) + + // Hacky to run bash, but simplifies this code a lot + cmd := fmt.Sprintf("curl -sL %s | tar -xzvf - -C %s --strip-components=1", download, binDir) + out, err := exec.CommandContext(ctx, "bash", "-c", cmd).CombinedOutput() + if err != nil { + return "", errors.Wrapf(err, "downloading etcd: %s", string(out)) + } + + return etcdPath, nil +} + +func stopEtcd() error { + if os.Getenv("REUSE_ETCD") == "true" { + log.Println("REUSE_ETCD=true, leaving etcd running") + return nil + } + + if _, err := os.Stat(pidFile); os.IsNotExist(err) { + log.Println("Etcd pid file doesn't exit, skipping cleanup") + return nil + } + + dat, err := os.ReadFile(pidFile) + if err != nil { + return err + } + pid, err := strconv.Atoi(string(dat)) + if err != nil { + return err + } + + etcdProcess, err := os.FindProcess(pid) + if err != nil { + return err + } + + log.Println("Stopping etcd") + if err := etcdProcess.Kill(); err != nil { + return errors.Wrap(err, "Failed stopping etcd") + } + + log.Println("Deleting etcd data") + if err := os.RemoveAll("./etcd-data"); err != nil { + return errors.Wrap(err, "deleting etcd data") + } + + log.Println("Deleting etcd pid file") + if err := os.RemoveAll(pidFile); err != nil { + return errors.Wrap(err, "deleting pid file") + } + + return nil +} diff --git a/pkg/iptables/iptables.go b/pkg/iptables/iptables.go new file mode 100644 index 00000000..adb1ca05 --- /dev/null +++ b/pkg/iptables/iptables.go @@ -0,0 +1,737 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package iptables + +import ( + "bytes" + "fmt" + "io" + "net" + "os/exec" + "regexp" + "strconv" + "strings" + "syscall" +) + +// Adds the output of stderr to exec.ExitError +type Error struct { + exec.ExitError + cmd exec.Cmd + msg string + exitStatus *int //for overriding +} + +func (e *Error) ExitStatus() int { + if e.exitStatus != nil { + return *e.exitStatus + } + return e.Sys().(syscall.WaitStatus).ExitStatus() +} + +func (e *Error) Error() string { + return fmt.Sprintf("running %v: exit status %v: %v", e.cmd.Args, e.ExitStatus(), e.msg) +} + +// IsNotExist returns true if the error is due to the chain or rule not existing +func (e *Error) IsNotExist() bool { + if e.ExitStatus() != 1 { + return false + } + msgNoRuleExist := "Bad rule (does a matching rule exist in that chain?).\n" + msgNoChainExist := "No chain/target/match by that name.\n" + return strings.Contains(e.msg, msgNoRuleExist) || strings.Contains(e.msg, msgNoChainExist) +} + +// Protocol to differentiate between IPv4 and IPv6 +type Protocol byte + +const ( + ProtocolIPv4 Protocol = iota + ProtocolIPv6 +) + +const ( + TableFilter = "filter" + ChainInput = "INPUT" +) + +type IPTables struct { + path string + proto Protocol + hasCheck bool + hasWait bool + waitSupportSecond bool + hasRandomFully bool + v1 int + v2 int + v3 int + mode string // the underlying iptables operating mode, e.g. nf_tables + timeout int // time to wait for the iptables lock, default waits forever + + nftables bool +} + +// Stat represents a structured statistic entry. +type Stat struct { + Packets uint64 `json:"pkts"` + Bytes uint64 `json:"bytes"` + Target string `json:"target"` + Protocol string `json:"prot"` + Opt string `json:"opt"` + Input string `json:"in"` + Output string `json:"out"` + Source *net.IPNet `json:"source"` + Destination *net.IPNet `json:"destination"` + Options string `json:"options"` +} + +type Option func(*IPTables) + +func IPFamily(proto Protocol) Option { + return func(ipt *IPTables) { + ipt.proto = proto + } +} + +func Timeout(timeout int) Option { + return func(ipt *IPTables) { + ipt.timeout = timeout + } +} + +func EnableNFTables(enable bool) Option { + return func(ipt *IPTables) { + ipt.nftables = enable + } +} + +// New creates a new IPTables configured with the options passed as parameter. +// For backwards compatibility, by default always uses IPv4 and timeout 0. +// i.e. you can create an IPv6 IPTables using a timeout of 5 seconds passing +// the IPFamily and Timeout options as follow: +// +// ip6t := New(IPFamily(ProtocolIPv6), Timeout(5)) +func New(opts ...Option) (*IPTables, error) { + + ipt := &IPTables{ + proto: ProtocolIPv4, + timeout: 0, + } + + for _, opt := range opts { + opt(ipt) + } + + path, err := exec.LookPath(getIptablesCommand(ipt.proto, ipt.nftables)) + if err != nil { + return nil, err + } + ipt.path = path + + vstring, err := getIptablesVersionString(path) + if err != nil { + return nil, fmt.Errorf("could not get iptables version: %v", err) + } + v1, v2, v3, mode, err := extractIptablesVersion(vstring) + if err != nil { + return nil, fmt.Errorf("failed to extract iptables version from [%s]: %v", vstring, err) + } + ipt.v1 = v1 + ipt.v2 = v2 + ipt.v3 = v3 + ipt.mode = mode + + checkPresent, waitPresent, waitSupportSecond, randomFullyPresent := getIptablesCommandSupport(v1, v2, v3) + ipt.hasCheck = checkPresent + ipt.hasWait = waitPresent + ipt.waitSupportSecond = waitSupportSecond + ipt.hasRandomFully = randomFullyPresent + + return ipt, nil +} + +// New creates a new IPTables for the given proto. +// The proto will determine which command is used, either "iptables" or "ip6tables". +func NewWithProtocol(proto Protocol) (*IPTables, error) { + return New(IPFamily(proto), Timeout(0)) +} + +// Proto returns the protocol used by this IPTables. +func (ipt *IPTables) Proto() Protocol { + return ipt.proto +} + +// Exists checks if given rulespec in specified table/chain exists +func (ipt *IPTables) Exists(table, chain string, rulespec ...string) (bool, error) { + if !ipt.hasCheck { + return ipt.existsForOldIptables(table, chain, rulespec) + + } + cmd := append([]string{"-t", table, "-C", chain}, rulespec...) + err := ipt.run(cmd...) + eerr, eok := err.(*Error) + switch { + case err == nil: + return true, nil + case eok && eerr.ExitStatus() == 1: + return false, nil + default: + return false, err + } +} + +// Insert inserts rulespec to specified table/chain (in specified pos) +func (ipt *IPTables) Insert(table, chain string, pos int, rulespec ...string) error { + cmd := append([]string{"-t", table, "-I", chain, strconv.Itoa(pos)}, rulespec...) + return ipt.run(cmd...) +} + +// InsertUnique acts like Insert except that it won't insert a duplicate (no matter the position in the chain) +func (ipt *IPTables) InsertUnique(table, chain string, pos int, rulespec ...string) error { + exists, err := ipt.Exists(table, chain, rulespec...) + if err != nil { + return err + } + + if !exists { + return ipt.Insert(table, chain, pos, rulespec...) + } + + return nil +} + +// Append appends rulespec to specified table/chain +func (ipt *IPTables) Append(table, chain string, rulespec ...string) error { + cmd := append([]string{"-t", table, "-A", chain}, rulespec...) + return ipt.run(cmd...) +} + +// AppendUnique acts like Append except that it won't add a duplicate +func (ipt *IPTables) AppendUnique(table, chain string, rulespec ...string) error { + exists, err := ipt.Exists(table, chain, rulespec...) + if err != nil { + return err + } + + if !exists { + return ipt.Append(table, chain, rulespec...) + } + + return nil +} + +// Delete removes rulespec in specified table/chain +func (ipt *IPTables) Delete(table, chain string, rulespec ...string) error { + cmd := append([]string{"-t", table, "-D", chain}, rulespec...) + return ipt.run(cmd...) +} + +func (ipt *IPTables) DeleteIfExists(table, chain string, rulespec ...string) error { + exists, err := ipt.Exists(table, chain, rulespec...) + if err == nil && exists { + err = ipt.Delete(table, chain, rulespec...) + } + return err +} + +// List rules in specified table/chain +func (ipt *IPTables) ListByID(table, chain string, id int) (string, error) { + args := []string{"-t", table, "-S", chain, strconv.Itoa(id)} + rule, err := ipt.executeList(args) + if err != nil { + return "", err + } + return rule[0], nil +} + +// List rules in specified table/chain +func (ipt *IPTables) List(table, chain string) ([]string, error) { + args := []string{"-t", table, "-S", chain} + return ipt.executeList(args) +} + +// List rules (with counters) in specified table/chain +func (ipt *IPTables) ListWithCounters(table, chain string) ([]string, error) { + args := []string{"-t", table, "-v", "-S", chain} + return ipt.executeList(args) +} + +// ListChains returns a slice containing the name of each chain in the specified table. +func (ipt *IPTables) ListChains(table string) ([]string, error) { + args := []string{"-t", table, "-S"} + + result, err := ipt.executeList(args) + if err != nil { + return nil, err + } + + // Iterate over rules to find all default (-P) and user-specified (-N) chains. + // Chains definition always come before rules. + // Format is the following: + // -P OUTPUT ACCEPT + // -N Custom + var chains []string + for _, val := range result { + if strings.HasPrefix(val, "-P") || strings.HasPrefix(val, "-N") { + chains = append(chains, strings.Fields(val)[1]) + } else { + break + } + } + return chains, nil +} + +// '-S' is fine with non existing rule index as long as the chain exists +// therefore pass index 1 to reduce overhead for large chains +func (ipt *IPTables) ChainExists(table, chain string) (bool, error) { + err := ipt.run("-t", table, "-S", chain, "1") + eerr, eok := err.(*Error) + switch { + case err == nil: + return true, nil + case eok && eerr.ExitStatus() == 1: + return false, nil + default: + return false, err + } +} + +// Stats lists rules including the byte and packet counts +func (ipt *IPTables) Stats(table, chain string) ([][]string, error) { + args := []string{"-t", table, "-L", chain, "-n", "-v", "-x"} + lines, err := ipt.executeList(args) + if err != nil { + return nil, err + } + + appendSubnet := func(addr string) string { + if strings.IndexByte(addr, byte('/')) < 0 { + if strings.IndexByte(addr, '.') < 0 { + return addr + "/128" + } + return addr + "/32" + } + return addr + } + + ipv6 := ipt.proto == ProtocolIPv6 + + rows := [][]string{} + for i, line := range lines { + // Skip over chain name and field header + if i < 2 { + continue + } + + // Fields: + // 0=pkts 1=bytes 2=target 3=prot 4=opt 5=in 6=out 7=source 8=destination 9=options + line = strings.TrimSpace(line) + fields := strings.Fields(line) + + // The ip6tables verbose output cannot be naively split due to the default "opt" + // field containing 2 single spaces. + if ipv6 { + // Check if field 6 is "opt" or "source" address + dest := fields[6] + ip, _, _ := net.ParseCIDR(dest) + if ip == nil { + ip = net.ParseIP(dest) + } + + // If we detected a CIDR or IP, the "opt" field is empty.. insert it. + if ip != nil { + f := []string{} + f = append(f, fields[:4]...) + f = append(f, " ") // Empty "opt" field for ip6tables + f = append(f, fields[4:]...) + fields = f + } + } + + // Adjust "source" and "destination" to include netmask, to match regular + // List output + fields[7] = appendSubnet(fields[7]) + fields[8] = appendSubnet(fields[8]) + + // Combine "options" fields 9... into a single space-delimited field. + options := fields[9:] + fields = fields[:9] + fields = append(fields, strings.Join(options, " ")) + rows = append(rows, fields) + } + return rows, nil +} + +// ParseStat parses a single statistic row into a Stat struct. The input should +// be a string slice that is returned from calling the Stat method. +func (ipt *IPTables) ParseStat(stat []string) (parsed Stat, err error) { + // For forward-compatibility, expect at least 10 fields in the stat + if len(stat) < 10 { + return parsed, fmt.Errorf("stat contained fewer fields than expected") + } + + // Convert the fields that are not plain strings + parsed.Packets, err = strconv.ParseUint(stat[0], 0, 64) + if err != nil { + return parsed, fmt.Errorf(err.Error(), "could not parse packets") + } + parsed.Bytes, err = strconv.ParseUint(stat[1], 0, 64) + if err != nil { + return parsed, fmt.Errorf(err.Error(), "could not parse bytes") + } + _, parsed.Source, err = net.ParseCIDR(stat[7]) + if err != nil { + return parsed, fmt.Errorf(err.Error(), "could not parse source") + } + _, parsed.Destination, err = net.ParseCIDR(stat[8]) + if err != nil { + return parsed, fmt.Errorf(err.Error(), "could not parse destination") + } + + // Put the fields that are strings + parsed.Target = stat[2] + parsed.Protocol = stat[3] + parsed.Opt = stat[4] + parsed.Input = stat[5] + parsed.Output = stat[6] + parsed.Options = stat[9] + + return parsed, nil +} + +// StructuredStats returns statistics as structured data which may be further +// parsed and marshaled. +func (ipt *IPTables) StructuredStats(table, chain string) ([]Stat, error) { + rawStats, err := ipt.Stats(table, chain) + if err != nil { + return nil, err + } + + structStats := []Stat{} + for _, rawStat := range rawStats { + stat, err := ipt.ParseStat(rawStat) + if err != nil { + return nil, err + } + structStats = append(structStats, stat) + } + + return structStats, nil +} + +func (ipt *IPTables) executeList(args []string) ([]string, error) { + var stdout bytes.Buffer + if err := ipt.runWithOutput(args, &stdout); err != nil { + return nil, err + } + + rules := strings.Split(stdout.String(), "\n") + + // strip trailing newline + if len(rules) > 0 && rules[len(rules)-1] == "" { + rules = rules[:len(rules)-1] + } + + for i, rule := range rules { + rules[i] = filterRuleOutput(rule) + } + + return rules, nil +} + +// NewChain creates a new chain in the specified table. +// If the chain already exists, it will result in an error. +func (ipt *IPTables) NewChain(table, chain string) error { + return ipt.run("-t", table, "-N", chain) +} + +const existsErr = 1 + +// ClearChain flushed (deletes all rules) in the specified table/chain. +// If the chain does not exist, a new one will be created +func (ipt *IPTables) ClearChain(table, chain string) error { + err := ipt.NewChain(table, chain) + + eerr, eok := err.(*Error) + switch { + case err == nil: + return nil + case eok && eerr.ExitStatus() == existsErr: + // chain already exists. Flush (clear) it. + return ipt.run("-t", table, "-F", chain) + default: + return err + } +} + +// RenameChain renames the old chain to the new one. +func (ipt *IPTables) RenameChain(table, oldChain, newChain string) error { + return ipt.run("-t", table, "-E", oldChain, newChain) +} + +// DeleteChain deletes the chain in the specified table. +// The chain must be empty +func (ipt *IPTables) DeleteChain(table, chain string) error { + return ipt.run("-t", table, "-X", chain) +} + +func (ipt *IPTables) ClearAndDeleteChain(table, chain string) error { + exists, err := ipt.ChainExists(table, chain) + if err != nil || !exists { + return err + } + err = ipt.run("-t", table, "-F", chain) + if err == nil { + err = ipt.run("-t", table, "-X", chain) + } + return err +} + +func (ipt *IPTables) ClearAll() error { + return ipt.run("-F") +} + +func (ipt *IPTables) DeleteAll() error { + return ipt.run("-X") +} + +// ChangePolicy changes policy on chain to target +func (ipt *IPTables) ChangePolicy(table, chain, target string) error { + return ipt.run("-t", table, "-P", chain, target) +} + +// Check if the underlying iptables command supports the --random-fully flag +func (ipt *IPTables) HasRandomFully() bool { + return ipt.hasRandomFully +} + +// Return version components of the underlying iptables command +func (ipt *IPTables) GetIptablesVersion() (int, int, int) { + return ipt.v1, ipt.v2, ipt.v3 +} + +// run runs an iptables command with the given arguments, ignoring +// any stdout output +func (ipt *IPTables) run(args ...string) error { + return ipt.runWithOutput(args, nil) +} + +// runWithOutput runs an iptables command with the given arguments, +// writing any stdout output to the given writer +func (ipt *IPTables) runWithOutput(args []string, stdout io.Writer) error { + args = append([]string{ipt.path}, args...) + if ipt.hasWait { + args = append(args, "--wait") + if ipt.timeout != 0 && ipt.waitSupportSecond { + args = append(args, strconv.Itoa(ipt.timeout)) + } + } else { + fmu, err := newXtablesFileLock() + if err != nil { + return err + } + ul, err := fmu.tryLock() + if err != nil { + syscall.Close(fmu.fd) + return err + } + defer func() { + _ = ul.Unlock() + }() + } + + var stderr bytes.Buffer + cmd := exec.Cmd{ + Path: ipt.path, + Args: args, + Stdout: stdout, + Stderr: &stderr, + } + + if err := cmd.Run(); err != nil { + switch e := err.(type) { + case *exec.ExitError: + return &Error{*e, cmd, stderr.String(), nil} + default: + return err + } + } + + return nil +} + +// getIptablesCommand returns the correct command for the given protocol, either "iptables" or "ip6tables". +func getIptablesCommand(proto Protocol, nftables bool) string { + if proto == ProtocolIPv6 { + if nftables { + return "ip6tables-nft" + } + return "ip6tables-legacy" + } + + if nftables { + return "iptables-nft" + } + return "iptables-legacy" +} + +// Checks if iptables has the "-C" and "--wait" flag +func getIptablesCommandSupport(v1 int, v2 int, v3 int) (bool, bool, bool, bool) { + return iptablesHasCheckCommand(v1, v2, v3), iptablesHasWaitCommand(v1, v2, v3), iptablesWaitSupportSecond(v1, v2), iptablesHasRandomFully(v1, v2, v3) +} + +// getIptablesVersion returns the first three components of the iptables version +// and the operating mode (e.g. nf_tables or legacy) +// e.g. "iptables v1.3.66" would return (1, 3, 66, legacy, nil) +func extractIptablesVersion(str string) (int, int, int, string, error) { + versionMatcher := regexp.MustCompile(`v([0-9]+)\.([0-9]+)\.([0-9]+)(?:\s+\((\w+))?`) + result := versionMatcher.FindStringSubmatch(str) + if result == nil { + return 0, 0, 0, "", fmt.Errorf("no iptables version found in string: %s", str) + } + + v1, err := strconv.Atoi(result[1]) + if err != nil { + return 0, 0, 0, "", err + } + + v2, err := strconv.Atoi(result[2]) + if err != nil { + return 0, 0, 0, "", err + } + + v3, err := strconv.Atoi(result[3]) + if err != nil { + return 0, 0, 0, "", err + } + + mode := "legacy" + if result[4] != "" { + mode = result[4] + } + return v1, v2, v3, mode, nil +} + +// Runs "iptables --version" to get the version string +func getIptablesVersionString(path string) (string, error) { + cmd := exec.Command(path, "--version") + var out bytes.Buffer + cmd.Stdout = &out + err := cmd.Run() + if err != nil { + return "", err + } + return out.String(), nil +} + +// Checks if an iptables version is after 1.4.11, when --check was added +func iptablesHasCheckCommand(v1 int, v2 int, v3 int) bool { + if v1 > 1 { + return true + } + if v1 == 1 && v2 > 4 { + return true + } + if v1 == 1 && v2 == 4 && v3 >= 11 { + return true + } + return false +} + +// Checks if an iptables version is after 1.4.20, when --wait was added +func iptablesHasWaitCommand(v1 int, v2 int, v3 int) bool { //nolint + if v1 > 1 { + return true + } + if v1 == 1 && v2 > 4 { + return true + } + if v1 == 1 && v2 == 4 && v3 >= 20 { + return true + } + return false +} + +// Checks if an iptablse version is after 1.6.0, when --wait support second +func iptablesWaitSupportSecond(v1 int, v2 int) bool { + if v1 > 1 { + return true + } + if v1 == 1 && v2 >= 6 { + return true + } + return false +} + +// Checks if an iptables version is after 1.6.2, when --random-fully was added +func iptablesHasRandomFully(v1 int, v2 int, v3 int) bool { + if v1 > 1 { + return true + } + if v1 == 1 && v2 > 6 { + return true + } + if v1 == 1 && v2 == 6 && v3 >= 2 { + return true + } + return false +} + +// Checks if a rule specification exists for a table +func (ipt *IPTables) existsForOldIptables(table, chain string, rulespec []string) (bool, error) { + rs := strings.Join(append([]string{"-A", chain}, rulespec...), " ") + args := []string{"-t", table, "-S"} + var stdout bytes.Buffer + err := ipt.runWithOutput(args, &stdout) + if err != nil { + return false, err + } + return strings.Contains(stdout.String(), rs), nil +} + +// counterRegex is the regex used to detect nftables counter format +var counterRegex = regexp.MustCompile(`^\[([0-9]+):([0-9]+)\] `) + +// filterRuleOutput works around some inconsistencies in output. +// For example, when iptables is in legacy vs. nftables mode, it produces +// different results. +func filterRuleOutput(rule string) string { + out := rule + + // work around an output difference in nftables mode where counters + // are output in iptables-save format, rather than iptables -S format + // The string begins with "[0:0]" + // + // Fixes #49 + if groups := counterRegex.FindStringSubmatch(out); groups != nil { + // drop the brackets + out = out[len(groups[0]):] + out = fmt.Sprintf("%s -c %s %s", out, groups[1], groups[2]) + } + + return out +} + +func GetIPTablesRuleSpecification(rule, specification string) string { + parts := strings.Split(rule, " ") + for i, part := range parts { + if part == specification && i+1 < len(parts) { + return parts[i+1] + } + } + + return "" +} diff --git a/pkg/iptables/lock.go b/pkg/iptables/lock.go new file mode 100644 index 00000000..11c08aac --- /dev/null +++ b/pkg/iptables/lock.go @@ -0,0 +1,84 @@ +// Copyright 2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package iptables + +import ( + "os" + "sync" + "syscall" +) + +const ( + // In earlier versions of iptables, the xtables lock was implemented + // via a Unix socket, but now flock is used via this lockfile: + // http://git.netfilter.org/iptables/commit/?id=aa562a660d1555b13cffbac1e744033e91f82707 + // Note the LSB-conforming "/run" directory does not exist on old + // distributions, so assume "/var" is symlinked + xtablesLockFilePath = "/var/run/xtables.lock" + + defaultFilePerm = 0600 +) + +type Unlocker interface { + Unlock() error +} + +type nopUnlocker struct{} + +func (n nopUnlocker) Unlock() error { return nil } + +type fileLock struct { + // mu is used to protect against concurrent invocations from within this process + mu sync.Mutex + fd int +} + +// tryLock takes an exclusive lock on the xtables lock file without blocking. +// This is best-effort only: if the exclusive lock would block (i.e. because +// another process already holds it), no error is returned. Otherwise, any +// error encountered during the locking operation is returned. +// The returned Unlocker should be used to release the lock when the caller is +// done invoking iptables commands. +func (l *fileLock) tryLock() (Unlocker, error) { + l.mu.Lock() + err := syscall.Flock(l.fd, syscall.LOCK_EX|syscall.LOCK_NB) + switch err { + case syscall.EWOULDBLOCK: + l.mu.Unlock() + return nopUnlocker{}, nil + case nil: + return l, nil + default: + l.mu.Unlock() + return nil, err + } +} + +// Unlock closes the underlying file, which implicitly unlocks it as well. It +// also unlocks the associated mutex. +func (l *fileLock) Unlock() error { + defer l.mu.Unlock() + return syscall.Close(l.fd) +} + +// newXtablesFileLock opens a new lock on the xtables lockfile without +// acquiring the lock +func newXtablesFileLock() (*fileLock, error) { + fd, err := syscall.Open(xtablesLockFilePath, os.O_CREATE, defaultFilePerm) + if err != nil { + return nil, err + } + return &fileLock{fd: fd}, nil +} diff --git a/pkg/k8s/client.go b/pkg/k8s/client.go new file mode 100644 index 00000000..a03fe59a --- /dev/null +++ b/pkg/k8s/client.go @@ -0,0 +1,110 @@ +package k8s + +import ( + "crypto/tls" + "fmt" + "net" + "time" + + log "github.com/sirupsen/logrus" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" +) + +// NewClientset takes an optional configPath and creates a new clientset. +// If the configPath is not specified, and inCluster is true, then an +// InClusterConfig is used. +// Also takes a hostname which allow for overriding the config's hostname +// before generating a client. +func NewClientset(configPath string, inCluster bool, hostname string) (*kubernetes.Clientset, error) { + return newClientset(configPath, inCluster, hostname, time.Second*10) +} + +func newClientset(configPath string, inCluster bool, hostname string, timeout time.Duration) (*kubernetes.Clientset, error) { + config, err := restConfig(configPath, inCluster, timeout) + if err != nil { + panic(err.Error()) + } + + if len(hostname) > 0 { + config.Host = hostname + } + + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + return nil, fmt.Errorf("error creating kubernetes client: %s", err.Error()) + } + return clientset, nil +} + +func restConfig(kubeconfig string, inCluster bool, timeout time.Duration) (*rest.Config, error) { + cfg, err := rest.InClusterConfig() + if err != nil { + log.Debugf("[k8s client] we try the incluster first, this error [%v] can safely be ignored", err) + } + + if kubeconfig != "" && !inCluster { + cfg, err = clientcmd.BuildConfigFromFlags("", kubeconfig) + } + + if err != nil { + return nil, err + } + + // Override some of the defaults allowing a little bit more flexibility speaking with the API server + // these should hopefully be redundant, however issues will still be logged. + cfg.QPS = 100 + cfg.Burst = 250 + cfg.Timeout = timeout + return cfg, nil +} + +func findAddressFromRemoteCert(address string) ([]net.IP, error) { + + // TODO: should we care at this point, probably not as we just want the certificates + conf := &tls.Config{ + MinVersion: tls.VersionTLS12, + InsecureSkipVerify: true, //nolint + } + d := &net.Dialer{ + Timeout: time.Duration(3) * time.Second, + } + + // Create the TCP connection + conn, err := tls.DialWithDialer(d, "tcp", address, conf) + if err != nil { + return nil, err + } + + defer conn.Close() + // Grab the certificactes + certs := conn.ConnectionState().PeerCertificates + if len(certs) > 1 { + return nil, fmt.Errorf("[k8s client] not designed to recive multiple certs from API server") + } + + return certs[0].IPAddresses, nil +} + +func FindWorkingKubernetesAddress(configPath string, inCluster bool) (*kubernetes.Clientset, error) { + // check with loopback, and retrieve its certificate + ips, err := findAddressFromRemoteCert("127.0.0.1:6443") + if err != nil { + return nil, err + } + for x := range ips { + log.Debugf("[k8s client] checking with IP address [%s]", ips[x].String()) + + k, err := newClientset(configPath, inCluster, ips[x].String()+":6443", time.Second*2) + if err != nil { + log.Info(err) + } + _, err = k.DiscoveryClient.ServerVersion() + if err == nil { + log.Infof("[k8s client] working with IP address [%s]", ips[x].String()) + return NewClientset(configPath, inCluster, ips[x].String()+":6443") + } + } + return nil, fmt.Errorf("unable to find a working address for the local API server [%v]", err) +} diff --git a/pkg/k8s/client_test.go b/pkg/k8s/client_test.go new file mode 100644 index 00000000..9180f99e --- /dev/null +++ b/pkg/k8s/client_test.go @@ -0,0 +1,70 @@ +package k8s + +//"192.168.0.174:6443" + +// func Test_findAddressFromRemoteCert(t *testing.T) { +// type args struct { +// address string +// } +// tests := []struct { +// name string +// args args +// want []net.IP +// wantErr bool +// }{ +// { +// name: "test server", +// args: args{address: "192.168.0.174:6443"}, +// want: []net.IP{net.IPv4(10, 96, 0, 1), net.IPv4(192, 168, 0, 174)}, +// wantErr: false, +// }, +// } +// for _, tt := range tests { +// t.Run(tt.name, func(t *testing.T) { +// got, err := findAddressFromRemoteCert(tt.args.address) +// if (err != nil) != tt.wantErr { +// t.Errorf("findAddressFromRemoteCert() error = %v, wantErr %v", err, tt.wantErr) +// return +// } +// if !reflect.DeepEqual(got, tt.want) { +// t.Errorf("findAddressFromRemoteCert() = %v, want %v", got, tt.want) +// } +// }) +// } +// } + +// //findAddressFromRemoteCert() = +// //[10.96.0.1 192.168.0.174] +// //[10.96.0.1 192.168.0.174] + +// func Test_findWorkingKubernetesAddress(t *testing.T) { +// type args struct { +// configPath string +// inCluster bool +// } +// tests := []struct { +// name string +// args args +// want *kubernetes.Clientset +// wantErr bool +// }{ +// { +// name: "test", +// args: args{ +// configPath: "/home/dan/super-admin.conf", +// inCluster: false, +// }, +// wantErr: false, +// }, +// } +// for _, tt := range tests { +// t.Run(tt.name, func(t *testing.T) { +// _, err := FindWorkingKubernetesAddress(tt.args.configPath, tt.args.inCluster) +// if (err != nil) != tt.wantErr { +// t.Errorf("findWorkingKubernetesAddress() error = %v, wantErr %v", err, tt.wantErr) +// return +// } + +// }) +// } +// } diff --git a/pkg/kubevip/config_endpoints.go b/pkg/kubevip/config_endpoints.go deleted file mode 100644 index c9717319..00000000 --- a/pkg/kubevip/config_endpoints.go +++ /dev/null @@ -1,71 +0,0 @@ -package kubevip - -import ( - "fmt" - "net/url" - "strconv" - - log "github.com/sirupsen/logrus" -) - -func init() { - // Start the index negative as it will be incrememnted of first approach - endPointIndex = -1 -} - -// ValidateBackEndURLS will run through the endpoints and ensure that they're a valid URL -func ValidateBackEndURLS(endpoints *[]BackEnd) error { - - for i := range *endpoints { - log.Debugf("Parsing [%s]", (*endpoints)[i].RawURL) - u, err := url.Parse((*endpoints)[i].RawURL) - if err != nil { - return err - } - - // No error is returned if the prefix/schema is missing - // If the Host is empty then we were unable to parse correctly (could be prefix is missing) - if u.Host == "" { - return fmt.Errorf("Unable to parse [%s], ensure it's prefixed with http(s)://", (*endpoints)[i].RawURL) - } - (*endpoints)[i].Address = u.Hostname() - // if a port is specified then update the internal endpoint stuct, if not rely on the schema - if u.Port() != "" { - portNum, err := strconv.Atoi(u.Port()) - if err != nil { - return err - } - (*endpoints)[i].Port = portNum - } - (*endpoints)[i].ParsedURL = u - } - return nil -} - -// ReturnEndpointAddr - returns an endpoint -func (lb LoadBalancer) ReturnEndpointAddr() (string, error) { - if len(lb.Backends) == 0 { - return "", fmt.Errorf("No Backends configured") - } - if endPointIndex < len(lb.Backends)-1 { - endPointIndex++ - } else { - // reset the index to the beginning - endPointIndex = 0 - } - // TODO - weighting, decision algorythmn - return fmt.Sprintf("%s:%d", lb.Backends[endPointIndex].Address, lb.Backends[endPointIndex].Port), nil -} - -// ReturnEndpointURL - returns an endpoint -func (lb LoadBalancer) ReturnEndpointURL() *url.URL { - - if endPointIndex != len(lb.Backends)-1 { - endPointIndex++ - } else { - // reset the index to the beginning - endPointIndex = 0 - } - // TODO - weighting, decision algorythmn - return lb.Backends[endPointIndex].ParsedURL -} diff --git a/pkg/kubevip/config_environment.go b/pkg/kubevip/config_environment.go new file mode 100644 index 00000000..1f820421 --- /dev/null +++ b/pkg/kubevip/config_environment.go @@ -0,0 +1,554 @@ +package kubevip + +import ( + "encoding/json" + "os" + "strconv" + + "github.com/kube-vip/kube-vip/pkg/bgp" + "github.com/kube-vip/kube-vip/pkg/detector" +) + +// ParseEnvironment - will popultate the configuration from environment variables +func ParseEnvironment(c *Config) error { + if c == nil { + return nil + } + // Ensure that logging is set through the environment variables + env := os.Getenv(vipLogLevel) + // Set default value + if env == "" { + env = "4" + } + + if env != "" { + logLevel, err := strconv.ParseUint(env, 10, 32) + if err != nil { + panic("Unable to parse environment variable [vip_loglevel], should be int") + } + c.Logging = int(logLevel) + } + + // Find interface + env = os.Getenv(vipInterface) + if env != "" { + c.Interface = env + } + + // Find (services) interface + env = os.Getenv(vipServicesInterface) + if env != "" { + c.ServicesInterface = env + } + + // Find provider configuration + env = os.Getenv(providerConfig) + if env != "" { + c.ProviderConfig = env + } + + // Find Kubernetes Leader Election configuration + env = os.Getenv(vipLeaderElection) + if env != "" { + b, err := strconv.ParseBool(env) + if err != nil { + return err + } + c.EnableLeaderElection = b + } + + // Attempt to find the Lease name from the environment variables + env = os.Getenv(vipLeaseName) + if env != "" { + c.LeaseName = env + } + + // Attempt to find the Lease configuration from the environment variables + env = os.Getenv(vipLeaseDuration) + if env != "" { + i, err := strconv.ParseInt(env, 10, 32) + if err != nil { + return err + } + c.LeaseDuration = int(i) + } + + env = os.Getenv(vipRenewDeadline) + if env != "" { + i, err := strconv.ParseInt(env, 10, 32) + if err != nil { + return err + } + c.RenewDeadline = int(i) + } + + env = os.Getenv(vipRetryPeriod) + if env != "" { + i, err := strconv.ParseInt(env, 10, 32) + if err != nil { + return err + } + c.RetryPeriod = int(i) + } + + // Attempt to find the Lease annotations from the environment variables + env = os.Getenv(vipLeaseAnnotations) + if env != "" { + err := json.Unmarshal([]byte(env), &c.LeaseAnnotations) + if err != nil { + return err + } + } + + // Find vip address + env = os.Getenv(vipAddress) + if env != "" { + // TODO - parse address net.Host() + c.VIP = env + // } else { + // c.VIP = os.Getenv(address) + } + + // Find address + env = os.Getenv(address) + if env != "" { + // TODO - parse address net.Host() + c.Address = env + } + + // Find vip port + env = os.Getenv(port) + if env != "" { + i, err := strconv.ParseInt(env, 10, 32) + if err != nil { + return err + } + c.Port = int(i) + } + + // Find vipDdns + env = os.Getenv(vipDdns) + if env != "" { + b, err := strconv.ParseBool(env) + if err != nil { + return err + } + c.DDNS = b + } + + // Find the namespace that the control plane should use (for leaderElection lock) + env = os.Getenv(cpNamespace) + if env != "" { + c.Namespace = env + } + + // Find controlplane toggle + env = os.Getenv(cpEnable) + if env != "" { + b, err := strconv.ParseBool(env) + if err != nil { + return err + } + c.EnableControlPlane = b + } + + // Find controlplane toggle + env = os.Getenv(cpDetect) + if env != "" { + b, err := strconv.ParseBool(env) + if err != nil { + return err + } + c.DetectControlPlane = b + } + + // Find Services toggle + env = os.Getenv(svcEnable) + if env != "" { + b, err := strconv.ParseBool(env) + if err != nil { + return err + } + c.EnableServices = b + + // Find Services leader Election + env = os.Getenv(svcElection) + if env != "" { + b, err := strconv.ParseBool(env) + if err != nil { + return err + } + c.EnableServicesElection = b + } + + // Find load-balancer class only + env = os.Getenv(lbClassOnly) + if env != "" { + b, err := strconv.ParseBool(env) + if err != nil { + return err + } + c.LoadBalancerClassOnly = b + } + + // Load-balancer class name + env = os.Getenv(lbClassName) + if env != "" { + c.LoadBalancerClassName = env + } + + // Find the namespace that the control plane should use (for leaderElection lock) + env = os.Getenv(svcNamespace) + if env != "" { + c.ServiceNamespace = env + } + + // Gets the leaseName for services in arp mode + env = os.Getenv(svcLeaseName) + if env != "" { + c.ServicesLeaseName = env + } + } + + // Find vip address cidr range + env = os.Getenv(vipCidr) + if env != "" { + c.VIPCIDR = env + } + + // Find vip address subnet + env = os.Getenv(vipSubnet) + if env != "" { + c.VIPSubnet = env + } + + // Find Single Node + env = os.Getenv(vipSingleNode) + if env != "" { + b, err := strconv.ParseBool(env) + if err != nil { + return err + } + c.SingleNode = b + } + + // Find annotation configuration + env = os.Getenv(annotations) + if env != "" { + c.Annotations = env + } + + // Find Start As Leader + // TODO - does this need deprecating? + // Required when the host sets itself as leader before the state change + env = os.Getenv(vipStartLeader) + if env != "" { + b, err := strconv.ParseBool(env) + if err != nil { + return err + } + c.StartAsLeader = b + } + + // Find if ARP is enabled + env = os.Getenv(vipArp) + if env != "" { + b, err := strconv.ParseBool(env) + if err != nil { + return err + } + c.EnableARP = b + } + + // Find if ARP is enabled + env = os.Getenv(vipArpRate) + if env != "" { + i64, err := strconv.ParseInt(env, 10, 32) + if err != nil { + return err + } + c.ArpBroadcastRate = i64 + } else { + // default to three seconds + c.ArpBroadcastRate = 3000 + } + + // Wireguard Mode + env = os.Getenv(vipWireguard) + if env != "" { + b, err := strconv.ParseBool(env) + if err != nil { + return err + } + c.EnableWireguard = b + } + + // Routing Table Mode + env = os.Getenv(vipRoutingTable) + if env != "" { + b, err := strconv.ParseBool(env) + if err != nil { + return err + } + c.EnableRoutingTable = b + } + + // Routing Table ID + env = os.Getenv(vipRoutingTableID) + if env != "" { + i, err := strconv.ParseInt(env, 10, 32) + if err != nil { + return err + } + c.RoutingTableID = int(i) + } + + // Routing Table Type + env = os.Getenv(vipRoutingTableType) + if env != "" { + i, err := strconv.ParseInt(env, 10, 32) + if err != nil { + return err + } + c.RoutingTableType = int(i) + } + + // DNS mode + env = os.Getenv(dnsMode) + if env != "" { + c.DNSMode = env + } + + // Disable updates for services (status.LoadBalancer.Ingress will not be updated) + env = os.Getenv(disableServiceUpdates) + if env != "" { + b, err := strconv.ParseBool(env) + if err != nil { + return err + } + c.DisableServiceUpdates = b + } + + // BGP Server options + env = os.Getenv(bgpEnable) + if env != "" { + b, err := strconv.ParseBool(env) + if err != nil { + return err + } + c.EnableBGP = b + } + + // BGP Router interface determines an interface that we can use to find an address for + env = os.Getenv(bgpRouterInterface) + if env != "" { + _, address, err := detector.FindIPAddress(env) + if err != nil { + return err + } + c.BGPConfig.RouterID = address + } + + // RouterID + env = os.Getenv(bgpRouterID) + if env != "" { + c.BGPConfig.RouterID = env + } + + // AS + env = os.Getenv(bgpRouterAS) + if env != "" { + u64, err := strconv.ParseUint(env, 10, 32) + if err != nil { + return err + } + c.BGPConfig.AS = uint32(u64) + } + + // Peer AS + env = os.Getenv(bgpPeerAS) + if env != "" { + u64, err := strconv.ParseUint(env, 10, 32) + if err != nil { + return err + } + c.BGPPeerConfig.AS = uint32(u64) + } + + // Peer AS + env = os.Getenv(bgpPeers) + if env != "" { + peers, err := bgp.ParseBGPPeerConfig(env) + if err != nil { + return err + } + c.BGPConfig.Peers = peers + } + + // BGP Peer mutlihop + env = os.Getenv(bgpMultiHop) + if env != "" { + b, err := strconv.ParseBool(env) + if err != nil { + return err + } + c.BGPPeerConfig.MultiHop = b + } + + // BGP Peer password + env = os.Getenv(bgpPeerPassword) + if env != "" { + c.BGPPeerConfig.Password = env + } + + // BGP Source Interface + env = os.Getenv(bgpSourceIF) + if env != "" { + c.BGPConfig.SourceIF = env + } + + // BGP Source Address + env = os.Getenv(bgpSourceIP) + if env != "" { + c.BGPConfig.SourceIP = env + } + + // BGP Peer options, add them if relevant + env = os.Getenv(bgpPeerAddress) + if env != "" { + c.BGPPeerConfig.Address = env + // If we've added in a peer configuration, then we should add it to the BGP configuration + c.BGPConfig.Peers = append(c.BGPConfig.Peers, c.BGPPeerConfig) + } + + // BGP Timers options + env = os.Getenv(bgpHoldTime) + if env != "" { + u64, err := strconv.ParseUint(env, 10, 32) + if err != nil { + return err + } + c.BGPConfig.HoldTime = u64 + } + env = os.Getenv(bgpKeepaliveInterval) + if env != "" { + u64, err := strconv.ParseUint(env, 10, 32) + if err != nil { + return err + } + c.BGPConfig.KeepaliveInterval = u64 + } + + // Enable the Equinix Metal API calls + env = os.Getenv(vipPacket) + if env != "" { + b, err := strconv.ParseBool(env) + if err != nil { + return err + } + c.EnableMetal = b + } + + // Find the Equinix Metal project name + env = os.Getenv(vipPacketProject) + if env != "" { + // TODO - parse address net.Host() + c.MetalProject = env + } + + // Find the Equinix Metal project ID + env = os.Getenv(vipPacketProjectID) + if env != "" { + // TODO - parse address net.Host() + c.MetalProjectID = env + } + + // Enable the load-balancer + env = os.Getenv(lbEnable) + if env != "" { + b, err := strconv.ParseBool(env) + if err != nil { + return err + } + c.EnableLoadBalancer = b + } + + // Find loadbalancer port + env = os.Getenv(lbPort) + if env != "" { + i, err := strconv.ParseInt(env, 10, 32) + if err != nil { + return err + } + c.LoadBalancerPort = int(i) + } + + // Find loadbalancer forwarding method + env = os.Getenv(lbForwardingMethod) + if env != "" { + c.LoadBalancerForwardingMethod = env + } + + env = os.Getenv(EnableServiceSecurity) + if env != "" { + b, err := strconv.ParseBool(env) + if err != nil { + return err + } + c.EnableServiceSecurity = b + } + + // Find if node labeling is enabled + env = os.Getenv(EnableNodeLabeling) + if env != "" { + b, err := strconv.ParseBool(env) + if err != nil { + return err + } + c.EnableNodeLabeling = b + } + + // Find Prometheus configuration + env = os.Getenv(prometheusServer) + if env != "" { + c.PrometheusHTTPServer = env + } + + // Set Egress configuration(s) + env = os.Getenv(egressPodCidr) + if env != "" { + c.EgressPodCidr = env + } + + env = os.Getenv(egressServiceCidr) + if env != "" { + c.EgressServiceCidr = env + } + + // if this is set then we're enabling nftables + env = os.Getenv(egressWithNftables) + if env != "" { + b, err := strconv.ParseBool(env) + if err != nil { + return err + } + c.EgressWithNftables = b + } + + // check to see if we're using a specific path to the Kubernetes config file + env = os.Getenv(k8sConfigFile) + if env != "" { + c.K8sConfigFile = env + } + + env = os.Getenv(enableEndpointSlices) + if env != "" { + b, err := strconv.ParseBool(env) + if err != nil { + return err + } + c.EnableEndpointSlices = b + } + + return nil +} diff --git a/pkg/kubevip/config_envvar.go b/pkg/kubevip/config_envvar.go new file mode 100644 index 00000000..aa3de266 --- /dev/null +++ b/pkg/kubevip/config_envvar.go @@ -0,0 +1,198 @@ +package kubevip + +// Environment variables +const ( + + // vipArp - defines if the arp broadcast should be enabled + vipArp = "vip_arp" + + // vip_arpRate - defines the rate of gARP broadcasts + vipArpRate = "vip_arpRate" + + // vipLeaderElection - defines if the kubernetes algorithm should be used + vipLeaderElection = "vip_leaderelection" + + // vipLeaseName - defines the name of the lease lock + vipLeaseName = "vip_leasename" + + // vipLeaderElection - defines if the kubernetes algorithm should be used + vipLeaseDuration = "vip_leaseduration" + + // vipLeaderElection - defines if the kubernetes algorithm should be used + vipRenewDeadline = "vip_renewdeadline" + + // vipLeaderElection - defines if the kubernetes algorithm should be used + vipRetryPeriod = "vip_retryperiod" + + // vipLeaderElection - defines the annotations given to the lease lock + vipLeaseAnnotations = "vip_leaseannotations" + + // vipLogLevel - defines the level of logging to produce (5 being the most verbose) + vipLogLevel = "vip_loglevel" + + // vipInterface - defines the interface that the vip should bind too + vipInterface = "vip_interface" + + // vipServicesInterface - defines the interface that the service vips should bind too + vipServicesInterface = "vip_servicesinterface" + + // vipCidr - defines the cidr that the vip will use (for BGP) + vipCidr = "vip_cidr" + + // vipSubnet - defines the subnet that the vip will use + vipSubnet = "vip_subnet" + + // egressPodCidr - defines the cidr that egress will ignore + egressPodCidr = "egress_podcidr" + + // egressServiceCidr - defines the cidr that egress will ignore + egressServiceCidr = "egress_servicecidr" + + // egressWithNftables - enables using nftables over iptables + egressWithNftables = "egress_withnftables" + + ///////////////////////////////////// + // TO DO: + // Determine how to tidy this mess up + ///////////////////////////////////// + + // vipAddress - defines the address that the vip will expose + // DEPRECATED: will be removed in a next release + vipAddress = "vip_address" + + // address - defines the address that would be used as a vip + // it may be an IP or a DNS name, in case of a DNS name + // kube-vip will try to resolve it and use the IP as a VIP + address = "address" + + // port - defines the port for the VIP + port = "port" + + // annotations + annotations = "annotation" + + // vipDdns - defines if use dynamic dns to allocate IP for "address" + vipDdns = "vip_ddns" + + // vipSingleNode - defines the vip start as a single node cluster + vipSingleNode = "vip_singlenode" + + // vipStartLeader - will start this instance as the leader of the cluster + vipStartLeader = "vip_startleader" + + // vipPacket defines that the packet API will be used for EIP + vipPacket = "vip_packet" + + // vipPacketProject defines which project within Packet to use + vipPacketProject = "vip_packetproject" + + // vipPacketProjectID defines which projectID within Packet to use + vipPacketProjectID = "vip_packetprojectid" + + // providerConfig defines a path to a configuration that should be parsed + providerConfig = "provider_config" + + // bgpEnable defines if BGP should be enabled + bgpEnable = "bgp_enable" + // bgpRouterID defines the routerID for the BGP server + bgpRouterID = "bgp_routerid" + // bgpRouterInterface defines the interface that we can find the address for + bgpRouterInterface = "bgp_routerinterface" + // bgpRouterAS defines the AS for the BGP server + bgpRouterAS = "bgp_as" + // bgpPeerAddress defines the address for a BGP peer + bgpPeerAddress = "bgp_peeraddress" + // bgpPeers defines the address for a BGP peer + bgpPeers = "bgp_peers" + // bgpPeerAS defines the AS for a BGP peer + bgpPeerAS = "bgp_peeras" + // bgpPeerAS defines the AS for a BGP peer + bgpPeerPassword = "bgp_peerpass" // nolint + // bgpMultiHop enables mulithop routing + bgpMultiHop = "bgp_multihop" + // bgpSourceIF defines the source interface for BGP peering + bgpSourceIF = "bgp_sourceif" + // bgpSourceIP defines the source address for BGP peering + bgpSourceIP = "bgp_sourceip" + // bgpHoldTime defines bgp timers hold time + bgpHoldTime = "bgp_hold_time" + // bgpKeepaliveInterval defines bgp timers keepalive interval + bgpKeepaliveInterval = "bgp_keepalive_interval" + + // vipWireguard - defines if wireguard will be used for vips + vipWireguard = "vip_wireguard" //nolint + + // vipRoutingTable - defines if table mode will be used for vips + vipRoutingTable = "vip_routingtable" //nolint + + // vipRoutingTableID - defines which table mode will be used for vips + vipRoutingTableID = "vip_routingtableid" //nolint + + // vipRoutingTableType - defines which table type will be used for vip routes + // valid values for this variable can be found in: + // https://pkg.go.dev/golang.org/x/sys/unix#RTN_UNSPEC + // Note that route type have the prefix `RTN_`, and you + // specify the integer value, not the name. For example: + // you should say `vip_routingtabletype=2` for RTN_LOCAL + vipRoutingTableType = "vip_routingtabletype" //nolint + + // cpNamespace defines the namespace the control plane pods will run in + cpNamespace = "cp_namespace" + + // cpEnable enables the control plane feature + cpEnable = "cp_enable" + + // cpDetect will attempt to automatically find a working address for the control plane from loopback + cpDetect = "cp_detect" + + // svcEnable enables the Kubernetes service feature + svcEnable = "svc_enable" + + // svcNamespace defines the namespace the service pods will run in + svcNamespace = "svc_namespace" + + // svcElection enables election per Kubernetes service + svcElection = "svc_election" + + // svcLeaseName Name of the lease that is used for leader election for services (in arp mode) + svcLeaseName = "svc_leasename" + + // lbClassOnly enables load-balancer for class "kube-vip.io/kube-vip-class" only + lbClassOnly = "lb_class_only" + + // lbClassName enables load-balancer for a specific class only + lbClassName = "lb_class_name" + + // lbEnable defines if the load-balancer should be enabled + lbEnable = "lb_enable" + + // lbPort defines the port of load-balancer + lbPort = "lb_port" + + // lbForwardingMethod defines the forwarding method of load-balancer + lbForwardingMethod = "lb_fwdmethod" + + // EnableServiceSecurity defines if the load-balancer should only allow traffic to service ports + EnableServiceSecurity = "enable_service_security" + + // EnableNodeLabeling, will enable node labeling as the node becomes leader + EnableNodeLabeling = "enable_node_labeling" + + // prometheusServer defines the address prometheus listens on + prometheusServer = "prometheus_server" + + // vipConfigMap defines the configmap that kube-vip will watch for service definitions + // vipConfigMap = "vip_configmap" + + //k8sConfigFile defines the path to the configfile used to speak with the API server + k8sConfigFile = "k8s_config_file" + + // dnsMode defines mode that DNS lookup will be performed with (first, ipv4, ipv6, dual) + dnsMode = "dns_mode" + + // disableServiceUpdates disables service updating + disableServiceUpdates = "disable_service_updates" + + // enableEndpointSlices enables use of EndpointSlices instead of Endpoints + enableEndpointSlices = "enable_endpointslices" +) diff --git a/pkg/kubevip/config_generator.go b/pkg/kubevip/config_generator.go index 7a075286..3d6e5373 100644 --- a/pkg/kubevip/config_generator.go +++ b/pkg/kubevip/config_generator.go @@ -2,424 +2,252 @@ package kubevip import ( "fmt" - "os" "strconv" - "strings" - "github.com/ghodss/yaml" appv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// Environment variables -const ( - - //vipArp - defines if the arp broadcast should be enabled - vipArp = "vip_arp" - - //vipLeaderElection - defines if the kubernetes algorithim should be used - vipLeaderElection = "vip_leaderelection" - - //vipLeaderElection - defines if the kubernetes algorithim should be used - vipLeaseDuration = "vip_leaseduration" - - //vipLeaderElection - defines if the kubernetes algorithim should be used - vipRenewDeadline = "vip_renewdeadline" - - //vipLeaderElection - defines if the kubernetes algorithim should be used - vipRetryPeriod = "vip_retryperiod" - - //vipLogLevel - defines the level of logging to produce (5 being the most verbose) - vipLogLevel = "vip_loglevel" - - //vipInterface - defines the interface that the vip should bind too - vipInterface = "vip_interface" - - //vipAddress - defines the address that the vip will expose - // DEPRECATED: will be removed in a next release - vipAddress = "vip_address" - - //vipCidr - defines the cidr that the vip will use - vipCidr = "vip_cidr" - - //address - defines the address that would be used as a vip - // it may be an IP or a DNS name, in case of a DNS name - // kube-vip will try to resolve it and use the IP as a VIP - address = "address" - - //vipSingleNode - defines the vip start as a single node cluster - vipSingleNode = "vip_singlenode" - - //vipStartLeader - will start this instance as the leader of the cluster - vipStartLeader = "vip_startleader" - - //vipPeers defines the configuration of raft peer(s) - vipPeers = "vip_peers" - - //vipLocalPeer defines the configuration of the local raft peer - vipLocalPeer = "vip_localpeer" - - //vipRemotePeers defines the configuration of the local raft peer - vipRemotePeers = "vip_remotepeers" - - //vipAddPeersToLB defines that RAFT peers should be added to the load-balancer - vipAddPeersToLB = "vip_addpeerstolb" + applyCoreV1 "k8s.io/client-go/applyconfigurations/core/v1" + applyMetaV1 "k8s.io/client-go/applyconfigurations/meta/v1" + applyRbacV1 "k8s.io/client-go/applyconfigurations/rbac/v1" - //vipPacket defines that the packet API will be used tor EIP - vipPacket = "vip_packet" - - //vipPacket defines which project within Packet to use - vipPacketProject = "vip_packetproject" - - //bgpEnable defines if BGP should be enabled - bgpEnable = "bgp_enable" - //bgpRouterID defines the routerID for the BGP server - bgpRouterID = "bgp_routerid" - //bgpRouterAS defines the AS for the BGP server - bgpRouterAS = "bgp_as" - //bgpPeerAddress defines the address for a BGP peer - bgpPeerAddress = "bgp_peeraddress" - //bgpPeerAS defines the AS for a BGP peer - bgpPeerAS = "bgp_peeras" - - //lbEnable defines if the load-balancer should be enabled - lbEnable = "lb_enable" - - //lbBindToVip defines if the load-balancer should bind ONLY to the virtual IP - lbBindToVip = "lb_bindtovip" - - //lbName defines the name of load-balancer - lbName = "lb_name" - - //lbType defines the type of load-balancer - lbType = "lb_type" - - //lbPort defines the port of load-balancer - lbPort = "lb_port" - - //lbBackendPort defines a port that ALL backends are using - lbBackendPort = "lb_backendport" - - //lbBackends defines the backends of load-balancer - lbBackends = "lb_backends" - - //vipConfigMap defines the configmap that kube-vip will watch for service definitions - vipConfigMap = "vip_configmap" + "sigs.k8s.io/yaml" ) -// ParseEnvironment - will popultate the configuration from environment variables -func ParseEnvironment(c *Config) error { - - // Find interface - env := os.Getenv(vipInterface) - if env != "" { - c.Interface = env +// GenerateSA will create the service account for kube-vip +func GenerateSA() *applyCoreV1.ServiceAccountApplyConfiguration { + kind := "ServiceAccount" + name := "kube-vip" + namespace := "kube-system" + newManifest := &applyCoreV1.ServiceAccountApplyConfiguration{ + TypeMetaApplyConfiguration: applyMetaV1.TypeMetaApplyConfiguration{APIVersion: &corev1.SchemeGroupVersion.Version, Kind: &kind}, + ObjectMetaApplyConfiguration: &applyMetaV1.ObjectMetaApplyConfiguration{ + Name: &name, + Namespace: &namespace, + }, } + return newManifest +} - // Find Kubernetes Leader Election configuration - - env = os.Getenv(vipLeaderElection) - if env != "" { - b, err := strconv.ParseBool(env) - if err != nil { - return err - } - c.EnableLeaderElection = b - } +// GenerateCR will generate the Cluster role for kube-vip +func GenerateCR() *applyRbacV1.ClusterRoleApplyConfiguration { + name := "system:kube-vip-role" + roleRefKind := "ClusterRole" + apiVersion := "rbac.authorization.k8s.io/v1" - // Attempt to find the Lease configuration from teh environment variables - env = os.Getenv(vipLeaseDuration) - if env != "" { - i, err := strconv.ParseInt(env, 8, 0) - if err != nil { - return err - } - c.LeaseDuration = int(i) + newManifest := &applyRbacV1.ClusterRoleApplyConfiguration{ + TypeMetaApplyConfiguration: applyMetaV1.TypeMetaApplyConfiguration{APIVersion: &apiVersion, Kind: &roleRefKind}, + ObjectMetaApplyConfiguration: &applyMetaV1.ObjectMetaApplyConfiguration{ + Name: &name, + }, + Rules: []applyRbacV1.PolicyRuleApplyConfiguration{ + { + APIGroups: []string{""}, + Resources: []string{"services/status"}, + Verbs: []string{"update"}, + }, + { + APIGroups: []string{""}, + Resources: []string{"services", "endpoints"}, + Verbs: []string{"list", "get", "watch", "endoints"}, + }, + { + APIGroups: []string{""}, + Resources: []string{"nodes"}, + Verbs: []string{"list", "get", "watch", "update", "patch"}, + }, + { + APIGroups: []string{"coordination.k8s.io"}, + Resources: []string{"leases"}, + Verbs: []string{"list", "get", "watch", "update", "create"}, + }, + }, } + return newManifest +} - env = os.Getenv(vipRenewDeadline) - if env != "" { - i, err := strconv.ParseInt(env, 8, 0) - if err != nil { - return err - } - c.RenewDeadline = int(i) +// GenerateCRB will generate the clusterRoleBinding +func GenerateCRB() *applyRbacV1.ClusterRoleBindingApplyConfiguration { + kind := "ClusterRoleBinding" + apiVersion := "rbac.authorization.k8s.io/v1" + subjectKind := "ServiceAccount" + apiGroup := "rbac.authorization.k8s.io" + roleRefKind := "ClusterRole" + roleRefName := "system:kube-vip-role" + name := "kube-vip" + bindName := "system:kube-vip-role-binding" + namespace := "kube-system" + + newManifest := &applyRbacV1.ClusterRoleBindingApplyConfiguration{ + TypeMetaApplyConfiguration: applyMetaV1.TypeMetaApplyConfiguration{APIVersion: &apiVersion, Kind: &kind}, + ObjectMetaApplyConfiguration: &applyMetaV1.ObjectMetaApplyConfiguration{ + Name: &bindName, + }, + RoleRef: &applyRbacV1.RoleRefApplyConfiguration{ + APIGroup: &apiGroup, + Kind: &roleRefKind, + Name: &roleRefName, + }, + Subjects: []applyRbacV1.SubjectApplyConfiguration{ + { + Kind: &subjectKind, + Name: &name, + Namespace: &namespace, + }, + }, } + return newManifest +} - env = os.Getenv(vipRetryPeriod) - if env != "" { - i, err := strconv.ParseInt(env, 8, 0) - if err != nil { - return err - } - c.RetryPeriod = int(i) - } +// generatePodSpec will take a kube-vip config and generate a Pod spec +func generatePodSpec(c *Config, imageVersion string, inCluster bool) *corev1.Pod { + command := "manager" - // Find vip address - env = os.Getenv(vipAddress) - if env != "" { - // TODO - parse address net.Host() - c.VIP = env + // Determine where the pods should be living (for multi-tenancy) + var namespace string + if c.ServiceNamespace != "" { + namespace = c.ServiceNamespace } else { - c.Address = os.Getenv(address) - } - - // Find vip address cidr range - env = os.Getenv(vipCidr) - if env != "" { - // TODO - parse address net.Host() - c.VIPCIDR = env - } - - // Find Single Node - env = os.Getenv(vipSingleNode) - if env != "" { - b, err := strconv.ParseBool(env) - if err != nil { - return err - } - c.SingleNode = b - } - - // Find Start As Leader - // TODO - does this need depricating? - // Required when the host sets itself as leader before the state change - env = os.Getenv(vipStartLeader) - if env != "" { - b, err := strconv.ParseBool(env) - if err != nil { - return err - } - c.StartAsLeader = b - } - - // Find ARP - env = os.Getenv(vipArp) - if env != "" { - b, err := strconv.ParseBool(env) - if err != nil { - return err - } - c.GratuitousARP = b + namespace = metav1.NamespaceSystem } - //Removal of seperate peer - env = os.Getenv(vipLocalPeer) - if env != "" { - // Parse the string in format :
: - peer, err := ParsePeerConfig(env) - if err != nil { - return err - } - c.LocalPeer = *peer - } - - env = os.Getenv(vipPeers) - if env != "" { - // TODO - perhaps make this optional? - // Remove existing peers - c.RemotePeers = []RaftPeer{} - - // Parse the remote peers (comma seperated) - s := strings.Split(env, ",") - if len(s) == 0 { - return fmt.Errorf("The Remote Peer List [%s] is unable to be parsed, should be in comma seperated format :
:", env) - } - for x := range s { - // Parse the each remote peer string in format :
: - peer, err := ParsePeerConfig(s[x]) - if err != nil { - return err - } - - c.RemotePeers = append(c.RemotePeers, *peer) - - } + // build environment variables + newEnvironment := []corev1.EnvVar{ + { + Name: vipArp, + Value: strconv.FormatBool(c.EnableARP), + }, + { + Name: port, + Value: fmt.Sprintf("%d", c.Port), + }, } - // Find Add Peers as Backends - env = os.Getenv(vipAddPeersToLB) - if env != "" { - b, err := strconv.ParseBool(env) - if err != nil { - return err + // If we're specifically saying which interface to use then add it to the manifest + if c.Interface != "" { + iface := []corev1.EnvVar{ + { + Name: vipInterface, + Value: c.Interface, + }, } - c.AddPeersAsBackends = b + newEnvironment = append(newEnvironment, iface...) } - // BGP Server options - env = os.Getenv(bgpEnable) - if env != "" { - b, err := strconv.ParseBool(env) - if err != nil { - return err + // Detect if we should be using a separate interface for sercices + if c.ServicesInterface != "" { + // build environment variables + svcInterface := []corev1.EnvVar{ + { + Name: vipServicesInterface, + Value: c.ServicesInterface, + }, } - c.EnableBGP = b + newEnvironment = append(newEnvironment, svcInterface...) } - // RouterID - env = os.Getenv(bgpRouterID) - if env != "" { - c.BGPConfig.RouterID = env - } - // AS - env = os.Getenv(bgpRouterAS) - if env != "" { - u64, err := strconv.ParseUint(env, 10, 32) - if err != nil { - return err + // If a CIDR is used add it to the manifest + if c.VIPCIDR != "" { + // build environment variables + cidr := []corev1.EnvVar{ + { + Name: vipCidr, + Value: c.VIPCIDR, + }, } - c.BGPConfig.AS = uint32(u64) + newEnvironment = append(newEnvironment, cidr...) } - // BGP Peer options - env = os.Getenv(bgpPeerAddress) - if env != "" { - c.BGPPeerConfig.Address = env - } - // Peer AS - env = os.Getenv(bgpPeerAS) - if env != "" { - u64, err := strconv.ParseUint(env, 10, 32) - if err != nil { - return err + // If a subnet is required for the VIP + if c.VIPSubnet != "" { + // build environment variables + cidr := []corev1.EnvVar{ + { + Name: vipSubnet, + Value: c.VIPSubnet, + }, } - c.BGPPeerConfig.AS = uint32(u64) + newEnvironment = append(newEnvironment, cidr...) } - // Enable the Packet API calls - env = os.Getenv(vipPacket) - if env != "" { - b, err := strconv.ParseBool(env) - if err != nil { - return err + if c.DNSMode != "" { + // build environment variables + dnsModeSelector := []corev1.EnvVar{ + { + Name: dnsMode, + Value: c.DNSMode, + }, } - c.EnablePacket = b + newEnvironment = append(newEnvironment, dnsModeSelector...) } - // Find the Packet project name - env = os.Getenv(vipPacketProject) - if env != "" { - // TODO - parse address net.Host() - c.PacketProject = env - } - - // Enable the load-balancer - env = os.Getenv(lbEnable) - if env != "" { - b, err := strconv.ParseBool(env) - if err != nil { - return err + // If we're doing the hybrid mode + if c.EnableControlPlane { + cp := []corev1.EnvVar{ + { + Name: cpEnable, + Value: strconv.FormatBool(c.EnableControlPlane), + }, + { + Name: cpNamespace, + Value: c.Namespace, + }, } - c.EnableLoadBalancer = b - } - - // Load Balancer configuration - return parseEnvironmentLoadBalancer(c) -} - -func parseEnvironmentLoadBalancer(c *Config) error { - // Check if an existing load-balancer configuration already exists - if len(c.LoadBalancers) == 0 { - c.LoadBalancers = append(c.LoadBalancers, LoadBalancer{}) - } - - // Find LoadBalancer Port - env := os.Getenv(lbPort) - if env != "" { - i, err := strconv.ParseInt(env, 8, 0) - if err != nil { - return err + if c.DDNS { + cp = append(cp, corev1.EnvVar{ + Name: vipDdns, + Value: strconv.FormatBool(c.DDNS), + }) } - c.LoadBalancers[0].Port = int(i) - } - - // Find Type of LoadBalancer - env = os.Getenv(lbType) - if env != "" { - c.LoadBalancers[0].Type = env - } - - // Find Type of LoadBalancer Name - env = os.Getenv(lbName) - if env != "" { - c.LoadBalancers[0].Name = env - } - - // Find If LB should bind to Vip - env = os.Getenv(lbBindToVip) - if env != "" { - b, err := strconv.ParseBool(env) - if err != nil { - return err + if c.DetectControlPlane { + cp = append(cp, corev1.EnvVar{ + Name: cpDetect, + Value: strconv.FormatBool(c.DetectControlPlane), + }) } - c.LoadBalancers[0].BindToVip = b + newEnvironment = append(newEnvironment, cp...) } - // Find global backendport - env = os.Getenv(lbBackendPort) - if env != "" { - i, err := strconv.ParseInt(env, 8, 0) - if err != nil { - return err + // If we're doing the hybrid mode + if c.EnableServices { + svc := []corev1.EnvVar{ + { + Name: svcEnable, + Value: strconv.FormatBool(c.EnableServices), + }, + { + Name: svcLeaseName, + Value: c.ServicesLeaseName, + }, } - c.LoadBalancers[0].BackendPort = int(i) - } - - // Parse backends - env = os.Getenv(lbBackends) - if env != "" { - // TODO - perhaps make this optional? - // Remove existing backends - c.LoadBalancers[0].Backends = []BackEnd{} - - // Parse the remote peers (comma seperated) - s := strings.Split(env, ",") - if len(s) == 0 { - return fmt.Errorf("The Backends List [%s] is unable to be parsed, should be in comma seperated format
:", env) + newEnvironment = append(newEnvironment, svc...) + if c.EnableServicesElection { + svcElection := []corev1.EnvVar{ + { + Name: svcElection, + Value: strconv.FormatBool(c.EnableServicesElection), + }, + } + newEnvironment = append(newEnvironment, svcElection...) } - for x := range s { - // Parse the each remote peer string in format
: - - be, err := ParseBackendConfig(s[x]) - if err != nil { - return err + if c.LoadBalancerClassOnly { + lbClassOnlyVar := []corev1.EnvVar{ + { + Name: lbClassOnly, + Value: strconv.FormatBool(c.LoadBalancerClassOnly), + }, } - - c.LoadBalancers[0].Backends = append(c.LoadBalancers[0].Backends, *be) - + newEnvironment = append(newEnvironment, lbClassOnlyVar...) } - } - return nil -} - -// generatePodSpec will take a kube-vip config and generate a Pod spec -func generatePodSpec(c *Config, imageVersion string) *corev1.Pod { - - // build environment variables - newEnvironment := []corev1.EnvVar{ - { - Name: vipArp, - Value: strconv.FormatBool(c.GratuitousARP), - }, - { - Name: vipInterface, - Value: c.Interface, - }, - } - - // If a CIDR is used add it to the manifest - if c.VIPCIDR != "" { - // build environment variables - cidr := []corev1.EnvVar{ - { - Name: vipCidr, - Value: c.VIPCIDR, - }, + if c.EnableServiceSecurity { + EnableServiceSecurityVar := []corev1.EnvVar{ + { + Name: EnableServiceSecurity, + Value: strconv.FormatBool(c.EnableServiceSecurity), + }, + } + newEnvironment = append(newEnvironment, EnableServiceSecurityVar...) } - newEnvironment = append(newEnvironment, cidr...) - } // If Leader election is enabled then add the configuration to the manifest @@ -430,6 +258,10 @@ func generatePodSpec(c *Config, imageVersion string) *corev1.Pod { Name: vipLeaderElection, Value: strconv.FormatBool(c.EnableLeaderElection), }, + { + Name: vipLeaseName, + Value: c.LeaseName, + }, { Name: vipLeaseDuration, Value: fmt.Sprintf("%d", c.LeaseDuration), @@ -459,52 +291,100 @@ func generatePodSpec(c *Config, imageVersion string) *corev1.Pod { } newEnvironment = append(newEnvironment, leaderElection...) - } else { - // Generate Raft configuration - raft := []corev1.EnvVar{ + } + + // If we're enabling node labeling on leader election + if c.EnableNodeLabeling { + EnableNodeLabeling := []corev1.EnvVar{ { - Name: vipStartLeader, - Value: strconv.FormatBool(c.StartAsLeader), + Name: EnableNodeLabeling, + Value: strconv.FormatBool(c.EnableNodeLabeling), }, + } + newEnvironment = append(newEnvironment, EnableNodeLabeling...) + } + + // If we're specifying an annotation configuration + if c.Annotations != "" { + annotations := []corev1.EnvVar{ { - Name: vipAddPeersToLB, - Value: strconv.FormatBool(c.AddPeersAsBackends), + Name: annotations, + Value: c.Annotations, }, + } + newEnvironment = append(newEnvironment, annotations...) + + } + + // If we're specifying a configuration + if c.ProviderConfig != "" { + provider := []corev1.EnvVar{ { - Name: vipLocalPeer, - Value: fmt.Sprintf("%s:%s:%d", c.LocalPeer.ID, c.LocalPeer.Address, c.LocalPeer.Port), + Name: providerConfig, + Value: c.ProviderConfig, }, } - newEnvironment = append(newEnvironment, raft...) + newEnvironment = append(newEnvironment, provider...) } - // If Packet is enabled then add it to the manifest - if c.EnablePacket { + // If Equinix Metal is enabled then add it to the manifest + if c.EnableMetal { packet := []corev1.EnvVar{ { Name: vipPacket, - Value: strconv.FormatBool(c.EnablePacket), + Value: strconv.FormatBool(c.EnableMetal), }, { Name: vipPacketProject, - Value: c.PacketProject, + Value: c.MetalProject, + }, + { + Name: vipPacketProjectID, + Value: c.MetalProjectID, }, { Name: "PACKET_AUTH_TOKEN", - Value: c.PacketAPIKey, + Value: c.MetalAPIKey, }, } newEnvironment = append(newEnvironment, packet...) + } + // Detect and enable wireguard mode + if c.EnableWireguard { + wireguard := []corev1.EnvVar{ + { + Name: vipWireguard, + Value: strconv.FormatBool(c.EnableWireguard), + }, + } + newEnvironment = append(newEnvironment, wireguard...) + } + + // Detect and enable routing table mode + if c.EnableRoutingTable { + routingtable := []corev1.EnvVar{ + { + Name: vipRoutingTable, + Value: strconv.FormatBool(c.EnableRoutingTable), + }, + } + newEnvironment = append(newEnvironment, routingtable...) } - // If BGP is enabled then add it to the manifest + // If BGP, but we're not using Equinix Metal if c.EnableBGP { bgp := []corev1.EnvVar{ { Name: bgpEnable, Value: strconv.FormatBool(c.EnableBGP), }, + } + newEnvironment = append(newEnvironment, bgp...) + } + // If BGP, but we're not using Equinix Metal + if c.EnableBGP && !c.EnableMetal { + bgpConfig := []corev1.EnvVar{ { Name: bgpRouterID, Value: c.BGPConfig.RouterID, @@ -517,12 +397,51 @@ func generatePodSpec(c *Config, imageVersion string) *corev1.Pod { Name: bgpPeerAddress, Value: c.BGPPeerConfig.Address, }, + { + Name: bgpPeerPassword, + Value: c.BGPPeerConfig.Password, + }, { Name: bgpPeerAS, Value: fmt.Sprintf("%d", c.BGPPeerConfig.AS), }, } - newEnvironment = append(newEnvironment, bgp...) + + // Detect if we should be using a source interface for speaking to a bgp peer + if c.BGPConfig.SourceIF != "" { + bgpConfig = append(bgpConfig, corev1.EnvVar{ + Name: bgpSourceIF, + Value: c.BGPConfig.SourceIF, + }, + ) + } + // Detect if we should be using a source address for speaking to a bgp peer + + if c.BGPConfig.SourceIP != "" { + bgpConfig = append(bgpConfig, corev1.EnvVar{ + Name: bgpSourceIP, + Value: c.BGPConfig.SourceIP, + }, + ) + } + + var peers string + if len(c.BGPPeers) != 0 { + for x := range c.BGPPeers { + if x != 0 { + peers = fmt.Sprintf("%s,%s", peers, c.BGPPeers[x]) + } else { + peers = c.BGPPeers[x] + } + } + bgpConfig = append(bgpConfig, corev1.EnvVar{ + Name: bgpPeers, + Value: peers, + }, + ) + } + + newEnvironment = append(newEnvironment, bgpConfig...) } @@ -534,20 +453,12 @@ func generatePodSpec(c *Config, imageVersion string) *corev1.Pod { Value: strconv.FormatBool(c.EnableLoadBalancer), }, { - Name: lbBackendPort, - Value: fmt.Sprintf("%d", c.LoadBalancers[0].Port), - }, - { - Name: lbName, - Value: c.LoadBalancers[0].Name, - }, - { - Name: lbType, - Value: c.LoadBalancers[0].Type, + Name: lbPort, + Value: fmt.Sprintf("%d", c.LoadBalancerPort), }, { - Name: lbBindToVip, - Value: strconv.FormatBool(c.LoadBalancers[0].BindToVip), + Name: lbForwardingMethod, + Value: c.LoadBalancerForwardingMethod, }, } @@ -566,25 +477,32 @@ func generatePodSpec(c *Config, imageVersion string) *corev1.Pod { }) } - // Parse peers into a comma seperated string - if len(c.RemotePeers) != 0 { - var peers string - for x := range c.RemotePeers { - if x != 0 { - peers = fmt.Sprintf("%s,%s:%s:%d", peers, c.RemotePeers[x].ID, c.RemotePeers[x].Address, c.RemotePeers[x].Port) + if c.PrometheusHTTPServer != "" { + prometheus := []corev1.EnvVar{ + { + Name: prometheusServer, + Value: c.PrometheusHTTPServer, + }, + } + newEnvironment = append(newEnvironment, prometheus...) + } - } else { - peers = fmt.Sprintf("%s:%s:%d", c.RemotePeers[x].ID, c.RemotePeers[x].Address, c.RemotePeers[x].Port) + if c.EnableEndpointSlices { + newEnvironment = append(newEnvironment, corev1.EnvVar{ + Name: enableEndpointSlices, + Value: strconv.FormatBool(c.EnableEndpointSlices), + }) + } - } - //peers = fmt.Sprintf("%s,%s:%s:%d", peers, c.RemotePeers[x].ID, c.RemotePeers[x].Address, c.RemotePeers[x].Port) - //fmt.Sprintf("", peers) - } - peerEnvirontment := corev1.EnvVar{ - Name: vipPeers, - Value: peers, + if c.DisableServiceUpdates { + // Disable service updates + disServiceUpdates := []corev1.EnvVar{ + { + Name: disableServiceUpdates, + Value: strconv.FormatBool(c.DisableServiceUpdates), + }, } - newEnvironment = append(newEnvironment, peerEnvirontment) + newEnvironment = append(newEnvironment, disServiceUpdates...) } newManifest := &corev1.Pod{ @@ -594,75 +512,101 @@ func generatePodSpec(c *Config, imageVersion string) *corev1.Pod { }, ObjectMeta: metav1.ObjectMeta{ Name: "kube-vip", - Namespace: "kube-system", + Namespace: namespace, }, Spec: corev1.PodSpec{ Containers: []corev1.Container{ { Name: "kube-vip", - Image: fmt.Sprintf("plndr/kube-vip:%s", imageVersion), + Image: fmt.Sprintf("ghcr.io/kube-vip/kube-vip:%s", imageVersion), ImagePullPolicy: corev1.PullAlways, SecurityContext: &corev1.SecurityContext{ Capabilities: &corev1.Capabilities{ Add: []corev1.Capability{ "NET_ADMIN", - "SYS_TIME", + "NET_RAW", }, }, }, Args: []string{ - "start", + command, }, Env: newEnvironment, - VolumeMounts: []corev1.VolumeMount{ - { - Name: "kubeconfig", - MountPath: "/etc/kubernetes/admin.conf", - }, - { - Name: "ca-certs", - MountPath: "/etc/ssl/certs", - ReadOnly: true, - }, - }, }, }, - Volumes: []corev1.Volume{ - { - Name: "kubeconfig", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/etc/kubernetes/admin.conf", - }, - }, + HostNetwork: true, + }, + } + + if inCluster { + // If we're running this inCluster then the account name will be required + newManifest.Spec.ServiceAccountName = "kube-vip" + } else { + // If this isn't inside a cluster then add the external path mount + adminConfMount := corev1.VolumeMount{ + Name: "kubeconfig", + MountPath: "/etc/kubernetes/admin.conf", + } + newManifest.Spec.Containers[0].VolumeMounts = append(newManifest.Spec.Containers[0].VolumeMounts, adminConfMount) + adminConfVolume := corev1.Volume{ + Name: "kubeconfig", + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: c.K8sConfigFile, }, - { - Name: "ca-certs", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/etc/ssl/certs", - }, - }, + }, + } + newManifest.Spec.Volumes = append(newManifest.Spec.Volumes, adminConfVolume) + // Add Host modification + + hostAlias := corev1.HostAlias{ + IP: "127.0.0.1", + Hostnames: []string{"kubernetes"}, + } + newManifest.Spec.HostAliases = append(newManifest.Spec.HostAliases, hostAlias) + } + + if c.ProviderConfig != "" { + providerConfigMount := corev1.VolumeMount{ + Name: "cloud-sa-volume", + MountPath: "/etc/cloud-sa", + ReadOnly: true, + } + newManifest.Spec.Containers[0].VolumeMounts = append(newManifest.Spec.Containers[0].VolumeMounts, providerConfigMount) + + providerConfigVolume := corev1.Volume{ + Name: "cloud-sa-volume", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "metal-cloud-config", }, }, - HostNetwork: true, - }, + } + newManifest.Spec.Volumes = append(newManifest.Spec.Volumes, providerConfigVolume) + } - return newManifest + return newManifest } // GeneratePodManifestFromConfig will take a kube-vip config and generate a manifest -func GeneratePodManifestFromConfig(c *Config, imageVersion string) string { - newManifest := generatePodSpec(c, imageVersion) +func GeneratePodManifestFromConfig(c *Config, imageVersion string, inCluster bool) string { + newManifest := generatePodSpec(c, imageVersion, inCluster) b, _ := yaml.Marshal(newManifest) return string(b) } -// GenerateDeamonsetManifestFromConfig will take a kube-vip config and generate a manifest -func GenerateDeamonsetManifestFromConfig(c *Config, imageVersion string) string { +// GenerateDaemonsetManifestFromConfig will take a kube-vip config and generate a manifest +func GenerateDaemonsetManifestFromConfig(c *Config, imageVersion string, inCluster, taint bool) string { + // Determine where the pod should be deployed + var namespace string + if c.ServiceNamespace != "" { + namespace = c.ServiceNamespace + } else { + namespace = metav1.NamespaceSystem + } - podSpec := generatePodSpec(c, imageVersion).Spec + podSpec := generatePodSpec(c, imageVersion, inCluster).Spec newManifest := &appv1.DaemonSet{ TypeMeta: metav1.TypeMeta{ Kind: "DaemonSet", @@ -670,35 +614,65 @@ func GenerateDeamonsetManifestFromConfig(c *Config, imageVersion string) string }, ObjectMeta: metav1.ObjectMeta{ Name: "kube-vip-ds", - Namespace: "kube-system", + Namespace: namespace, + Labels: map[string]string{ + "app.kubernetes.io/name": "kube-vip-ds", + "app.kubernetes.io/version": imageVersion, + }, }, Spec: appv1.DaemonSetSpec{ Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{ - "name": "kube-vip-ds", + "app.kubernetes.io/name": "kube-vip-ds", }, }, Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ - "name": "kube-vip-ds", + "app.kubernetes.io/name": "kube-vip-ds", + "app.kubernetes.io/version": imageVersion, }, }, Spec: podSpec, }, }, } - - newManifest.Spec.Template.Spec.Tolerations = []corev1.Toleration{ - { - Key: "node-role.kubernetes.io/master", - Effect: corev1.TaintEffectNoSchedule, - }, - } - newManifest.Spec.Template.Spec.NodeSelector = map[string]string{ - "node-role.kubernetes.io/master": "true", + if taint { + newManifest.Spec.Template.Spec.Tolerations = []corev1.Toleration{ + { + Effect: corev1.TaintEffectNoSchedule, + Operator: corev1.TolerationOpExists, + }, + { + Effect: corev1.TaintEffectNoExecute, + Operator: corev1.TolerationOpExists, + }, + } + newManifest.Spec.Template.Spec.Affinity = &corev1.Affinity{ + NodeAffinity: &corev1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ + NodeSelectorTerms: []corev1.NodeSelectorTerm{ + { + MatchExpressions: []corev1.NodeSelectorRequirement{ + { + Key: "node-role.kubernetes.io/master", + Operator: corev1.NodeSelectorOpExists, + }, + }, + }, + { + MatchExpressions: []corev1.NodeSelectorRequirement{ + { + Key: "node-role.kubernetes.io/control-plane", + Operator: corev1.NodeSelectorOpExists, + }, + }, + }, + }, + }, + }, + } } - b, _ := yaml.Marshal(newManifest) return string(b) } diff --git a/pkg/kubevip/config_generator_test.go b/pkg/kubevip/config_generator_test.go new file mode 100644 index 00000000..f39bfb3a --- /dev/null +++ b/pkg/kubevip/config_generator_test.go @@ -0,0 +1,23 @@ +package kubevip + +import "testing" + +func TestParseEnvironment(t *testing.T) { + + tests := []struct { + name string + c *Config + wantErr bool + }{ + {"", nil, false}, + {"", &Config{Interface: "eth0", ServicesInterface: "eth1"}, false}, + } + for _, tt := range tests { + t.Logf("%v", tt.c) + t.Run(tt.name, func(t *testing.T) { + if err := ParseEnvironment(tt.c); (err != nil) != tt.wantErr { + t.Errorf("ParseEnvironment() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} diff --git a/pkg/kubevip/config_manager.go b/pkg/kubevip/config_manager.go index 93dcdbcd..be605298 100644 --- a/pkg/kubevip/config_manager.go +++ b/pkg/kubevip/config_manager.go @@ -2,182 +2,47 @@ package kubevip import ( "fmt" - "io/ioutil" - "os" - "strconv" - "strings" - "github.com/ghodss/yaml" log "github.com/sirupsen/logrus" + "github.com/vishvananda/netlink" ) -var endPointIndex int // Holds the previous endpoint (for determining decisions on next endpoint) - -//ParseBackendConfig - -func ParseBackendConfig(ep string) (*BackEnd, error) { - endpoint := strings.Split(ep, ":") - if len(endpoint) != 2 { - return nil, fmt.Errorf("Ensure a backend is in in the format address:port, e.g. 10.0.0.1:8080") - } - p, err := strconv.Atoi(endpoint[1]) - if err != nil { - return nil, err - } - return &BackEnd{Address: endpoint[0], Port: p}, nil -} - -//ParsePeerConfig - -func ParsePeerConfig(ep string) (*RaftPeer, error) { - endpoint := strings.Split(ep, ":") - if len(endpoint) != 3 { - return nil, fmt.Errorf("Ensure a peer is in in the format id:address:port, e.g. server1:10.0.0.1:8080") - } - p, err := strconv.Atoi(endpoint[2]) - if err != nil { - return nil, err - } - return &RaftPeer{ID: endpoint[0], Address: endpoint[1], Port: p}, nil -} - -//OpenConfig will attempt to read a file and parse it's contents into a configuration -func OpenConfig(path string) (*Config, error) { - if path == "" { - return nil, fmt.Errorf("Path cannot be blank") - } - - log.Infof("Reading configuration from [%s]", path) - - // Check the actual path from the string - if _, err := os.Stat(path); !os.IsNotExist(err) { - // Attempt to read the data - configData, err := ioutil.ReadFile(path) - if err != nil { - return nil, err +func (c *Config) CheckInterface() error { + if c.Interface != "" { + if err := isValidInterface(c.Interface); err != nil { + return fmt.Errorf("%s is not valid interface, reason: %w", c.Interface, err) } - - // If data is read succesfully parse the yaml - var c Config - err = yaml.Unmarshal(configData, &c) - if err != nil { - return nil, err - } - return &c, nil - } - return nil, fmt.Errorf("Error reading [%s]", path) -} - -//PrintConfig - will print out an instance of the kubevip config -func (c *Config) PrintConfig() { - b, _ := yaml.Marshal(c) - fmt.Printf(string(b)) -} - -//ParseFlags will write the current configuration to a specified [path] -func (c *Config) ParseFlags(localPeer string, remotePeers, backends []string) error { - // Parse localPeer - p, err := ParsePeerConfig(localPeer) - if err != nil { - return err - } - c.LocalPeer = *p - - // Parse remotePeers - //Iterate backends - for i := range remotePeers { - p, err := ParsePeerConfig(remotePeers[i]) - if err != nil { - return err - - } - c.RemotePeers = append(c.RemotePeers, *p) - } - - //Iterate backends - for i := range backends { - b, err := ParseBackendConfig(backends[i]) - if err != nil { - return err + if c.ServicesInterface != "" { + if err := isValidInterface(c.ServicesInterface); err != nil { + return fmt.Errorf("%s is not valid interface, reason: %w", c.ServicesInterface, err) } - c.LoadBalancers[0].Backends = append(c.LoadBalancers[0].Backends, *b) } return nil } -//SampleConfig will create an example configuration and write it to the specified [path] -func SampleConfig() { - - // Generate Sample configuration - c := &Config{ - // Generate sample peers - RemotePeers: []RaftPeer{ - { - ID: "server2", - Address: "192.168.0.2", - Port: 10000, - }, - { - ID: "server3", - Address: "192.168.0.3", - Port: 10000, - }, - }, - LocalPeer: RaftPeer{ - ID: "server1", - Address: "192.168.0.1", - Port: 10000, - }, - // Virtual IP address - VIP: "192.168.0.100", - // Interface to bind to - Interface: "eth0", - // Load Balancer Configuration - LoadBalancers: []LoadBalancer{ - { - Name: "Kubernetes Control Plane", - Type: "http", - Port: 6443, - BindToVip: true, - Backends: []BackEnd{ - { - Address: "192.168.0.100", - Port: 6443, - }, - { - Address: "192.168.0.101", - Port: 6443, - }, - { - Address: "192.168.0.102", - Port: 6443, - }, - }, - }, - }, - } - b, _ := yaml.Marshal(c) - - fmt.Printf(string(b)) -} - -//WriteConfig will write the current configuration to a specified [path] -func (c *Config) WriteConfig(path string) error { - f, err := os.Create(path) +func isValidInterface(iface string) error { + l, err := netlink.LinkByName(iface) if err != nil { - return err + return fmt.Errorf("get %s failed, error: %w", iface, err) } - defer f.Close() + attrs := l.Attrs() - b, err := yaml.Marshal(c) - if err != nil { - return err - } - bytesWritten, err := f.Write(b) - if err != nil { - return err + // Some interfaces (included but not limited to lo and point-to-point + // interfaces) do not provide a operational status but are safe to use. + // From kernek.org: "Interface is in unknown state, neither driver nor + // userspace has set operational state. Interface must be considered for user + // data as setting operational state has not been implemented in every driver." + if attrs.OperState == netlink.OperUnknown { + log.Warningf( + "the status of the interface %s is unknown. Ensure your interface is ready to accept traffic, if so you can safely ignore this message", + iface, + ) + } else if attrs.OperState != netlink.OperUp { + return fmt.Errorf("%s is not up", iface) } - log.Debugf("wrote %d bytes\n", bytesWritten) + return nil } diff --git a/pkg/kubevip/config_types.go b/pkg/kubevip/config_types.go index 94324205..1edaa452 100644 --- a/pkg/kubevip/config_types.go +++ b/pkg/kubevip/config_types.go @@ -1,37 +1,92 @@ package kubevip import ( - "net/url" - - "github.com/plunder-app/kube-vip/pkg/bgp" + "github.com/kube-vip/kube-vip/pkg/bgp" ) -// Config defines all of the settings for the Virtual IP / Load-balancer +// Config defines all of the settings for the Kube-Vip Pod type Config struct { + // Logging, settings + Logging int `yaml:"logging"` + + // EnableARP, will use ARP to advertise the VIP address + EnableARP bool `yaml:"enableARP"` + + // EnableBGP, will use BGP to advertise the VIP address + EnableBGP bool `yaml:"enableBGP"` + + // EnableWireguard, will use wireguard to advertise the VIP address + EnableWireguard bool `yaml:"enableWireguard"` + + // EnableRoutingTable, will use the routing table to advertise the VIP address + EnableRoutingTable bool `yaml:"enableRoutingTable"` + + // EnableControlPlane, will enable the control plane functionality (used for hybrid behaviour) + EnableControlPlane bool `yaml:"enableControlPlane"` + + // DetectControlPlane, will attempt to find the control plane from loopback (127.0.0.1) + DetectControlPlane bool `yaml:"detectControlPlane"` + + // EnableServices, will enable the services functionality (used for hybrid behaviour) + EnableServices bool `yaml:"enableServices"` + + // EnableServicesElection, will enable leaderElection per service + EnableServicesElection bool `yaml:"enableServicesElection"` + + // EnableNodeLabeling, will enable node labeling as it becomes leader + EnableNodeLabeling bool `yaml:"enableNodeLabeling"` + + // LoadBalancerClassOnly, will enable load balancing only for services with LoadBalancerClass set to "kube-vip.io/kube-vip-class" + LoadBalancerClassOnly bool `yaml:"lbClassOnly"` + + // LoadBalancerClassName, will limit the load balancing services to services with LoadBalancerClass set to this value + LoadBalancerClassName string `yaml:"lbClassName"` - // LeaderElection defines the settings around Kubernetes LeaderElection - LeaderElection + // EnableServiceSecurity, will enable the use of iptables to secure services + EnableServiceSecurity bool `yaml:"EnableServiceSecurity"` - // LocalPeer is the configuration of this host - LocalPeer RaftPeer `yaml:"localPeer"` + // ArpBroadcastRate, defines how often kube-vip will update the network about updates to the network + ArpBroadcastRate int64 `yaml:"arpBroadcastRate"` - // Peers are all of the peers within the RAFT cluster - RemotePeers []RaftPeer `yaml:"remotePeers"` + // Annotations will define if we're going to wait and lookup configuration from Kubernetes node annotations + Annotations string + + // LeaderElectionType defines the backend to run the leader election: kubernetes or etcd. Defaults to kubernetes. + // Etcd doesn't support load balancer mode (EnableLoadBalancer=true) or any other feature that depends on the kube-api server. + LeaderElectionType string `yaml:"leaderElectionType"` + + // KubernetesLeaderElection defines the settings around Kubernetes KubernetesLeaderElection + KubernetesLeaderElection + + // Etcd defines all the settings for the etcd client. + Etcd Etcd // AddPeersAsBackends, this will automatically add RAFT peers as backends to a loadbalancer AddPeersAsBackends bool `yaml:"addPeersAsBackends"` - // VIP is the Virtual IP address exposed for the cluster + // VIP is the Virtual IP address exposed for the cluster (TODO: deprecate) VIP string `yaml:"vip"` + // VipSubnet is the Subnet that is applied to the VIP + VIPSubnet string `yaml:"vipSubnet"` + // VIPCIDR is cidr range for the VIP (primarily needed for BGP) VIPCIDR string `yaml:"vipCidr"` // Address is the IP or DNS Name to use as a VirtualIP Address string `yaml:"address"` - // GratuitousARP will broadcast an ARP update when the VIP changes host - GratuitousARP bool `yaml:"gratuitousARP"` + // Listen port for the VirtualIP + Port int `yaml:"port"` + + // Namespace will define which namespace the control plane pods will run in + Namespace string `yaml:"namespace"` + + // Namespace will define which namespace the control plane pods will run in + ServiceNamespace string `yaml:"serviceNamespace"` + + // use DDNS to allocate IP when Address is set to a DNS Name + DDNS bool `yaml:"ddns"` // SingleNode will start the cluster as a single Node (Raft disabled) SingleNode bool `yaml:"singleNode"` @@ -42,35 +97,85 @@ type Config struct { // Interface is the network interface to bind to (default: First Adapter) Interface string `yaml:"interface,omitempty"` + // ServicesInterface is the network interface to bind to for services (optional) + ServicesInterface string `yaml:"servicesInterface,omitempty"` + // EnableLoadBalancer, provides the flexibility to make the load-balancer optional EnableLoadBalancer bool `yaml:"enableLoadBalancer"` - // EnableBGP, will use BGP to advertise the VIP address - EnableBGP bool `yaml:"enableBGP"` + // Listen port for the IPVS Service + LoadBalancerPort int `yaml:"lbPort"` + + // Forwarding method for the IPVS Service + LoadBalancerForwardingMethod string `yaml:"lbForwardingMethod"` + + // Routing Table ID for when using routing table mode + RoutingTableID int `yaml:"routingTableID"` + + // Routing Table Type, what sort of route should be added to the routing table + RoutingTableType int `yaml:"routingTableType"` // BGP Configuration BGPConfig bgp.Config BGPPeerConfig bgp.Peer + BGPPeers []string - // EnablePacket, will use the packet API to update the EIP <-> VIP (if BGP is enabled then BGP will be used) - EnablePacket bool `yaml:"enablePacket"` + // EnableMetal, will use the metal API to update the EIP <-> VIP (if BGP is enabled then BGP will be used) + EnableMetal bool `yaml:"enableMetal"` - // PacketAPIKey, is the API token used to authenticate to the API - PacketAPIKey string + // MetalAPIKey, is the API token used to authenticate to the API + MetalAPIKey string - // PacketProject, is the name of a particular defined project - PacketProject string + // MetalProject, is the name of a particular defined project + MetalProject string + + // MetalProjectID, is the name of a particular defined project + MetalProjectID string + + // ProviderConfig, is the path to a provider configuration file + ProviderConfig string // LoadBalancers are the various services we can load balance over LoadBalancers []LoadBalancer `yaml:"loadBalancers,omitempty"` -} -// LeaderElection defines all of the settings for Kubernetes LeaderElection -type LeaderElection struct { + // The hostport used to expose Prometheus metrics over an HTTP server + PrometheusHTTPServer string `yaml:"prometheusHTTPServer,omitempty"` + + // Egress configuration + + // EgressPodCidr, this contains the pod cidr range to ignore Egress + EgressPodCidr string + + // EgressServiceCidr, this contains the service cidr range to ignore + EgressServiceCidr string + + // EgressWithNftables, this will use the iptables-nftables OVER iptables + EgressWithNftables bool - // EnableLeaderElection will use the Kubernetes leader election algorithim + // ServicesLeaseName, this will set the lease name for services leader in arp mode + ServicesLeaseName string `yaml:"servicesLeaseName"` + + // K8sConfigFile, this is the path to the config file used to speak with the API server + K8sConfigFile string `yaml:"k8sConfigFile"` + + // DNSMode, this will set the mode DSN lookup will be performed (first, ipv4, ipv6, dual) + DNSMode string `yaml:"dnsDualStackMode"` + + // DisableServiceUpdates, if true, kube-vip will only advertise service, but it will not update service's Status.LoadBalancer.Ingress slice + DisableServiceUpdates bool `yaml:"disableServiceUpdates"` + + // EnableEndpointSlices, if enabled, EndpointSlices will be used instead of Endpoints + EnableEndpointSlices bool `yaml:"enableEndpointSlices"` +} + +// KubernetesLeaderElection defines all of the settings for Kubernetes KubernetesLeaderElection +type KubernetesLeaderElection struct { + // EnableLeaderElection will use the Kubernetes leader election algorithm EnableLeaderElection bool `yaml:"enableLeaderElection"` + // LeaseName - name of the lease for leader election + LeaseName string `yaml:"leaseName"` + // Lease Duration - length of time a lease can be held for LeaseDuration int @@ -79,18 +184,17 @@ type LeaderElection struct { // RetryPerion - Number of times the host will retry to hold a lease RetryPeriod int -} - -// RaftPeer details the configuration of all cluster peers -type RaftPeer struct { - // ID is the unique identifier a peer instance - ID string `yaml:"id"` - // IP Address of a peer instance - Address string `yaml:"address"` + // LeaseAnnotations - annotations which will be given to the lease object + LeaseAnnotations map[string]string +} - // Listening port of this peer instance - Port int `yaml:"port"` +// Etcd defines all the settings for the etcd client. +type Etcd struct { + CAFile string + ClientCertFile string + ClientKeyFile string + Endpoints []string } // LoadBalancer contains the configuration of a load balancing instance @@ -107,24 +211,6 @@ type LoadBalancer struct { // BindToVip will bind the load balancer port to the VIP itself BindToVip bool `yaml:"bindToVip"` - //BackendPort, is a port that all backends are listening on (To be used to simplify building a list of backends) - BackendPort int `yaml:"backendPort"` - - //Backends, is an array of backend servers - Backends []BackEnd `yaml:"backends"` -} - -// BackEnd is a server we will load balance over -type BackEnd struct { - // Backend Port to Load Balance to - Port int `yaml:"port"` - - // Address of a server/service - Address string `yaml:"address"` - - // URL is a raw URL to a backend service - RawURL string `yaml:"url,omitempty"` - - // ParsedURL - A validated URL to a backend - ParsedURL *url.URL `yaml:"parsedURL,omitempty"` + // Forwarding method of LoadBalancer, either Local, Tunnel, DirectRoute or Bypass + ForwardingMethod string `yaml:"forwardingMethod"` } diff --git a/pkg/leaderElection/OWNERS b/pkg/leaderElection/OWNERS deleted file mode 100644 index 44d93f84..00000000 --- a/pkg/leaderElection/OWNERS +++ /dev/null @@ -1,14 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: -- mikedanese -- timothysc -reviewers: -- wojtek-t -- deads2k -- mikedanese -- gmarek -- eparis -- timothysc -- ingvagabund -- resouer diff --git a/pkg/leaderElection/healthzadaptor.go b/pkg/leaderElection/healthzadaptor.go deleted file mode 100644 index b9353729..00000000 --- a/pkg/leaderElection/healthzadaptor.go +++ /dev/null @@ -1,69 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package leaderelection - -import ( - "net/http" - "sync" - "time" -) - -// HealthzAdaptor associates the /healthz endpoint with the LeaderElection object. -// It helps deal with the /healthz endpoint being set up prior to the LeaderElection. -// This contains the code needed to act as an adaptor between the leader -// election code the health check code. It allows us to provide health -// status about the leader election. Most specifically about if the leader -// has failed to renew without exiting the process. In that case we should -// report not healthy and rely on the kubelet to take down the process. -type HealthzAdaptor struct { - pointerLock sync.Mutex - le *LeaderElector - timeout time.Duration -} - -// Name returns the name of the health check we are implementing. -func (l *HealthzAdaptor) Name() string { - return "leaderElection" -} - -// Check is called by the healthz endpoint handler. -// It fails (returns an error) if we own the lease but had not been able to renew it. -func (l *HealthzAdaptor) Check(req *http.Request) error { - l.pointerLock.Lock() - defer l.pointerLock.Unlock() - if l.le == nil { - return nil - } - return l.le.Check(l.timeout) -} - -// SetLeaderElection ties a leader election object to a HealthzAdaptor -func (l *HealthzAdaptor) SetLeaderElection(le *LeaderElector) { - l.pointerLock.Lock() - defer l.pointerLock.Unlock() - l.le = le -} - -// NewLeaderHealthzAdaptor creates a basic healthz adaptor to monitor a leader election. -// timeout determines the time beyond the lease expiry to be allowed for timeout. -// checks within the timeout period after the lease expires will still return healthy. -func NewLeaderHealthzAdaptor(timeout time.Duration) *HealthzAdaptor { - result := &HealthzAdaptor{ - timeout: timeout, - } - return result -} diff --git a/pkg/leaderElection/healthzadaptor_test.go b/pkg/leaderElection/healthzadaptor_test.go deleted file mode 100644 index d92f1336..00000000 --- a/pkg/leaderElection/healthzadaptor_test.go +++ /dev/null @@ -1,177 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package leaderelection - -import ( - "context" - "fmt" - "testing" - "time" - - "net/http" - - "k8s.io/apimachinery/pkg/util/clock" - rl "k8s.io/client-go/tools/leaderelection/resourcelock" -) - -type fakeLock struct { - identity string -} - -// Get is a dummy to allow us to have a fakeLock for testing. -func (fl *fakeLock) Get(ctx context.Context) (ler *rl.LeaderElectionRecord, rawRecord []byte, err error) { - return nil, nil, nil -} - -// Create is a dummy to allow us to have a fakeLock for testing. -func (fl *fakeLock) Create(ctx context.Context, ler rl.LeaderElectionRecord) error { - return nil -} - -// Update is a dummy to allow us to have a fakeLock for testing. -func (fl *fakeLock) Update(ctx context.Context, ler rl.LeaderElectionRecord) error { - return nil -} - -// RecordEvent is a dummy to allow us to have a fakeLock for testing. -func (fl *fakeLock) RecordEvent(string) {} - -// Identity is a dummy to allow us to have a fakeLock for testing. -func (fl *fakeLock) Identity() string { - return fl.identity -} - -// Describe is a dummy to allow us to have a fakeLock for testing. -func (fl *fakeLock) Describe() string { - return "Dummy implementation of lock for testing" -} - -// TestLeaderElectionHealthChecker tests that the healthcheck for leader election handles its edge cases. -func TestLeaderElectionHealthChecker(t *testing.T) { - current := time.Now() - req := &http.Request{} - - tests := []struct { - description string - expected error - adaptorTimeout time.Duration - elector *LeaderElector - }{ - { - description: "call check before leader elector initialized", - expected: nil, - adaptorTimeout: time.Second * 20, - elector: nil, - }, - { - description: "call check when the lease is far expired", - expected: fmt.Errorf("failed election to renew leadership on lease %s", "foo"), - adaptorTimeout: time.Second * 20, - elector: &LeaderElector{ - config: LeaderElectionConfig{ - Lock: &fakeLock{identity: "healthTest"}, - LeaseDuration: time.Minute, - Name: "foo", - }, - observedRecord: rl.LeaderElectionRecord{ - HolderIdentity: "healthTest", - }, - observedTime: current, - clock: clock.NewFakeClock(current.Add(time.Hour)), - }, - }, - { - description: "call check when the lease is far expired but held by another server", - expected: nil, - adaptorTimeout: time.Second * 20, - elector: &LeaderElector{ - config: LeaderElectionConfig{ - Lock: &fakeLock{identity: "healthTest"}, - LeaseDuration: time.Minute, - Name: "foo", - }, - observedRecord: rl.LeaderElectionRecord{ - HolderIdentity: "otherServer", - }, - observedTime: current, - clock: clock.NewFakeClock(current.Add(time.Hour)), - }, - }, - { - description: "call check when the lease is not expired", - expected: nil, - adaptorTimeout: time.Second * 20, - elector: &LeaderElector{ - config: LeaderElectionConfig{ - Lock: &fakeLock{identity: "healthTest"}, - LeaseDuration: time.Minute, - Name: "foo", - }, - observedRecord: rl.LeaderElectionRecord{ - HolderIdentity: "healthTest", - }, - observedTime: current, - clock: clock.NewFakeClock(current), - }, - }, - { - description: "call check when the lease is expired but inside the timeout", - expected: nil, - adaptorTimeout: time.Second * 20, - elector: &LeaderElector{ - config: LeaderElectionConfig{ - Lock: &fakeLock{identity: "healthTest"}, - LeaseDuration: time.Minute, - Name: "foo", - }, - observedRecord: rl.LeaderElectionRecord{ - HolderIdentity: "healthTest", - }, - observedTime: current, - clock: clock.NewFakeClock(current.Add(time.Minute).Add(time.Second)), - }, - }, - } - - for _, test := range tests { - adaptor := NewLeaderHealthzAdaptor(test.adaptorTimeout) - if adaptor.le != nil { - t.Errorf("[%s] leaderChecker started with a LeaderElector %v", test.description, adaptor.le) - } - if test.elector != nil { - test.elector.config.WatchDog = adaptor - adaptor.SetLeaderElection(test.elector) - if adaptor.le == nil { - t.Errorf("[%s] adaptor failed to set the LeaderElector", test.description) - } - } - err := adaptor.Check(req) - if test.expected == nil { - if err == nil { - continue - } - t.Errorf("[%s] called check, expected no error but received \"%v\"", test.description, err) - } else { - if err == nil { - t.Errorf("[%s] called check and failed to received the expected error \"%v\"", test.description, test.expected) - } - if err.Error() != test.expected.Error() { - t.Errorf("[%s] called check, expected %v, received %v", test.description, test.expected, err) - } - } - } -} diff --git a/pkg/leaderElection/leaderelection.go b/pkg/leaderElection/leaderelection.go deleted file mode 100644 index 155b7222..00000000 --- a/pkg/leaderElection/leaderelection.go +++ /dev/null @@ -1,390 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package leaderelection implements leader election of a set of endpoints. -// It uses an annotation in the endpoints object to store the record of the -// election state. This implementation does not guarantee that only one -// client is acting as a leader (a.k.a. fencing). -// -// A client only acts on timestamps captured locally to infer the state of the -// leader election. The client does not consider timestamps in the leader -// election record to be accurate because these timestamps may not have been -// produced by a local clock. The implemention does not depend on their -// accuracy and only uses their change to indicate that another client has -// renewed the leader lease. Thus the implementation is tolerant to arbitrary -// clock skew, but is not tolerant to arbitrary clock skew rate. -// -// However the level of tolerance to skew rate can be configured by setting -// RenewDeadline and LeaseDuration appropriately. The tolerance expressed as a -// maximum tolerated ratio of time passed on the fastest node to time passed on -// the slowest node can be approximately achieved with a configuration that sets -// the same ratio of LeaseDuration to RenewDeadline. For example if a user wanted -// to tolerate some nodes progressing forward in time twice as fast as other nodes, -// the user could set LeaseDuration to 60 seconds and RenewDeadline to 30 seconds. -// -// While not required, some method of clock synchronization between nodes in the -// cluster is highly recommended. It's important to keep in mind when configuring -// this client that the tolerance to skew rate varies inversely to master -// availability. -// -// Larger clusters often have a more lenient SLA for API latency. This should be -// taken into account when configuring the client. The rate of leader transitions -// should be monitored and RetryPeriod and LeaseDuration should be increased -// until the rate is stable and acceptably low. It's important to keep in mind -// when configuring this client that the tolerance to API latency varies inversely -// to master availability. -// -// DISCLAIMER: this is an alpha API. This library will likely change significantly -// or even be removed entirely in subsequent releases. Depend on this API at -// your own risk. -package leaderelection - -import ( - "bytes" - "context" - "fmt" - "time" - - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/clock" - "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/wait" - rl "k8s.io/client-go/tools/leaderelection/resourcelock" - - "k8s.io/klog" -) - -const ( - JitterFactor = 1.2 -) - -// NewLeaderElector creates a LeaderElector from a LeaderElectionConfig -func NewLeaderElector(lec LeaderElectionConfig) (*LeaderElector, error) { - if lec.LeaseDuration <= lec.RenewDeadline { - return nil, fmt.Errorf("leaseDuration must be greater than renewDeadline") - } - if lec.RenewDeadline <= time.Duration(JitterFactor*float64(lec.RetryPeriod)) { - return nil, fmt.Errorf("renewDeadline must be greater than retryPeriod*JitterFactor") - } - if lec.LeaseDuration < 1 { - return nil, fmt.Errorf("leaseDuration must be greater than zero") - } - if lec.RenewDeadline < 1 { - return nil, fmt.Errorf("renewDeadline must be greater than zero") - } - if lec.RetryPeriod < 1 { - return nil, fmt.Errorf("retryPeriod must be greater than zero") - } - if lec.Callbacks.OnStartedLeading == nil { - return nil, fmt.Errorf("OnStartedLeading callback must not be nil") - } - if lec.Callbacks.OnStoppedLeading == nil { - return nil, fmt.Errorf("OnStoppedLeading callback must not be nil") - } - - if lec.Lock == nil { - return nil, fmt.Errorf("Lock must not be nil.") - } - le := LeaderElector{ - config: lec, - clock: clock.RealClock{}, - metrics: globalMetricsFactory.newLeaderMetrics(), - } - le.metrics.leaderOff(le.config.Name) - return &le, nil -} - -type LeaderElectionConfig struct { - // Lock is the resource that will be used for locking - Lock rl.Interface - - // LeaseDuration is the duration that non-leader candidates will - // wait to force acquire leadership. This is measured against time of - // last observed ack. - // - // A client needs to wait a full LeaseDuration without observing a change to - // the record before it can attempt to take over. When all clients are - // shutdown and a new set of clients are started with different names against - // the same leader record, they must wait the full LeaseDuration before - // attempting to acquire the lease. Thus LeaseDuration should be as short as - // possible (within your tolerance for clock skew rate) to avoid a possible - // long waits in the scenario. - // - // Core clients default this value to 15 seconds. - LeaseDuration time.Duration - // RenewDeadline is the duration that the acting master will retry - // refreshing leadership before giving up. - // - // Core clients default this value to 10 seconds. - RenewDeadline time.Duration - // RetryPeriod is the duration the LeaderElector clients should wait - // between tries of actions. - // - // Core clients default this value to 2 seconds. - RetryPeriod time.Duration - - // Callbacks are callbacks that are triggered during certain lifecycle - // events of the LeaderElector - Callbacks LeaderCallbacks - - // WatchDog is the associated health checker - // WatchDog may be null if its not needed/configured. - WatchDog *HealthzAdaptor - - // ReleaseOnCancel should be set true if the lock should be released - // when the run context is cancelled. If you set this to true, you must - // ensure all code guarded by this lease has successfully completed - // prior to cancelling the context, or you may have two processes - // simultaneously acting on the critical path. - ReleaseOnCancel bool - - // Name is the name of the resource lock for debugging - Name string -} - -// LeaderCallbacks are callbacks that are triggered during certain -// lifecycle events of the LeaderElector. These are invoked asynchronously. -// -// possible future callbacks: -// * OnChallenge() -type LeaderCallbacks struct { - // OnStartedLeading is called when a LeaderElector client starts leading - OnStartedLeading func(context.Context) - // OnStoppedLeading is called when a LeaderElector client stops leading - OnStoppedLeading func() - // OnNewLeader is called when the client observes a leader that is - // not the previously observed leader. This includes the first observed - // leader when the client starts. - OnNewLeader func(identity string) -} - -// LeaderElector is a leader election client. -type LeaderElector struct { - config LeaderElectionConfig - // internal bookkeeping - observedRecord rl.LeaderElectionRecord - observedRawRecord []byte - observedTime time.Time - // used to implement OnNewLeader(), may lag slightly from the - // value observedRecord.HolderIdentity if the transition has - // not yet been reported. - reportedLeader string - - // clock is wrapper around time to allow for less flaky testing - clock clock.Clock - - metrics leaderMetricsAdapter - - // name is the name of the resource lock for debugging - name string -} - -// Run starts the leader election loop -func (le *LeaderElector) Run(ctx context.Context) { - defer func() { - runtime.HandleCrash() - le.config.Callbacks.OnStoppedLeading() - }() - if !le.acquire(ctx) { - return // ctx signalled done - } - ctx, cancel := context.WithCancel(ctx) - defer cancel() - go le.config.Callbacks.OnStartedLeading(ctx) - le.renew(ctx) -} - -// RunOrDie starts a client with the provided config or panics if the config -// fails to validate. -func RunOrDie(ctx context.Context, lec LeaderElectionConfig) { - le, err := NewLeaderElector(lec) - if err != nil { - panic(err) - } - if lec.WatchDog != nil { - lec.WatchDog.SetLeaderElection(le) - } - le.Run(ctx) -} - -// GetLeader returns the identity of the last observed leader or returns the empty string if -// no leader has yet been observed. -func (le *LeaderElector) GetLeader() string { - return le.observedRecord.HolderIdentity -} - -// IsLeader returns true if the last observed leader was this client else returns false. -func (le *LeaderElector) IsLeader() bool { - return le.observedRecord.HolderIdentity == le.config.Lock.Identity() -} - -// acquire loops calling tryAcquireOrRenew and returns true immediately when tryAcquireOrRenew succeeds. -// Returns false if ctx signals done. -func (le *LeaderElector) acquire(ctx context.Context) bool { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - succeeded := false - desc := le.config.Lock.Describe() - klog.Infof("attempting to acquire leader lease %v...", desc) - wait.JitterUntil(func() { - succeeded = le.tryAcquireOrRenew(ctx) - le.maybeReportTransition() - if !succeeded { - klog.V(4).Infof("failed to acquire lease %v", desc) - return - } - le.config.Lock.RecordEvent("became leader") - le.metrics.leaderOn(le.config.Name) - klog.Infof("successfully acquired lease %v", desc) - cancel() - }, le.config.RetryPeriod, JitterFactor, true, ctx.Done()) - return succeeded -} - -// renew loops calling tryAcquireOrRenew and returns immediately when tryAcquireOrRenew fails or ctx signals done. -func (le *LeaderElector) renew(ctx context.Context) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - wait.Until(func() { - timeoutCtx, timeoutCancel := context.WithTimeout(ctx, le.config.RenewDeadline) - defer timeoutCancel() - err := wait.PollImmediateUntil(le.config.RetryPeriod, func() (bool, error) { - return le.tryAcquireOrRenew(timeoutCtx), nil - }, timeoutCtx.Done()) - - le.maybeReportTransition() - desc := le.config.Lock.Describe() - if err == nil { - klog.V(5).Infof("successfully renewed lease %v", desc) - return - } - le.config.Lock.RecordEvent("stopped leading") - le.metrics.leaderOff(le.config.Name) - klog.Infof("failed to renew lease %v: %v", desc, err) - cancel() - }, le.config.RetryPeriod, ctx.Done()) - - // if we hold the lease, give it up - if le.config.ReleaseOnCancel { - le.release() - } -} - -// release attempts to release the leader lease if we have acquired it. -func (le *LeaderElector) release() bool { - if !le.IsLeader() { - return true - } - leaderElectionRecord := rl.LeaderElectionRecord{ - LeaseDurationSeconds: le.observedRecord.LeaseDurationSeconds, - LeaderTransitions: le.observedRecord.LeaderTransitions, - } - if err := le.config.Lock.Update(context.TODO(), leaderElectionRecord); err != nil { - klog.Errorf("Failed to release lock: %v", err) - return false - } - le.observedRecord = leaderElectionRecord - le.observedTime = le.clock.Now() - return true -} - -// tryAcquireOrRenew tries to acquire a leader lease if it is not already acquired, -// else it tries to renew the lease if it has already been acquired. Returns true -// on success else returns false. -func (le *LeaderElector) tryAcquireOrRenew(ctx context.Context) bool { - now := metav1.Now() - leaderElectionRecord := rl.LeaderElectionRecord{ - HolderIdentity: le.config.Lock.Identity(), - LeaseDurationSeconds: int(le.config.LeaseDuration / time.Second), - RenewTime: now, - AcquireTime: now, - } - - // 1. obtain or create the ElectionRecord - oldLeaderElectionRecord, oldLeaderElectionRawRecord, err := le.config.Lock.Get(ctx) - if err != nil { - if !errors.IsNotFound(err) { - klog.Errorf("error retrieving resource lock %v: %v", le.config.Lock.Describe(), err) - return false - } - if err = le.config.Lock.Create(ctx, leaderElectionRecord); err != nil { - klog.Errorf("error initially creating leader election record: %v", err) - return false - } - le.observedRecord = leaderElectionRecord - le.observedTime = le.clock.Now() - return true - } - - // 2. Record obtained, check the Identity & Time - if !bytes.Equal(le.observedRawRecord, oldLeaderElectionRawRecord) { - le.observedRecord = *oldLeaderElectionRecord - le.observedRawRecord = oldLeaderElectionRawRecord - le.observedTime = le.clock.Now() - } - if len(oldLeaderElectionRecord.HolderIdentity) > 0 && - le.observedTime.Add(le.config.LeaseDuration).After(now.Time) && - !le.IsLeader() { - klog.V(4).Infof("lock is held by %v and has not yet expired", oldLeaderElectionRecord.HolderIdentity) - return false - } - - // 3. We're going to try to update. The leaderElectionRecord is set to it's default - // here. Let's correct it before updating. - if le.IsLeader() { - leaderElectionRecord.AcquireTime = oldLeaderElectionRecord.AcquireTime - leaderElectionRecord.LeaderTransitions = oldLeaderElectionRecord.LeaderTransitions - } else { - leaderElectionRecord.LeaderTransitions = oldLeaderElectionRecord.LeaderTransitions + 1 - } - - // update the lock itself - if err = le.config.Lock.Update(ctx, leaderElectionRecord); err != nil { - klog.Errorf("Failed to update lock: %v", err) - return false - } - - le.observedRecord = leaderElectionRecord - le.observedTime = le.clock.Now() - return true -} - -func (le *LeaderElector) maybeReportTransition() { - if le.observedRecord.HolderIdentity == le.reportedLeader { - return - } - le.reportedLeader = le.observedRecord.HolderIdentity - if le.config.Callbacks.OnNewLeader != nil { - go le.config.Callbacks.OnNewLeader(le.reportedLeader) - } -} - -// Check will determine if the current lease is expired by more than timeout. -func (le *LeaderElector) Check(maxTolerableExpiredLease time.Duration) error { - if !le.IsLeader() { - // Currently not concerned with the case that we are hot standby - return nil - } - // If we are more than timeout seconds after the lease duration that is past the timeout - // on the lease renew. Time to start reporting ourselves as unhealthy. We should have - // died but conditions like deadlock can prevent this. (See #70819) - if le.clock.Since(le.observedTime) > le.config.LeaseDuration+maxTolerableExpiredLease { - return fmt.Errorf("failed election to renew leadership on lease %s", le.config.Name) - } - - return nil -} diff --git a/pkg/leaderElection/leaderelection_test.go b/pkg/leaderElection/leaderelection_test.go deleted file mode 100644 index 10acfa7d..00000000 --- a/pkg/leaderElection/leaderelection_test.go +++ /dev/null @@ -1,919 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package leaderelection - -import ( - "context" - "encoding/json" - "fmt" - "sync" - "testing" - "time" - - coordinationv1 "k8s.io/api/coordination/v1" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/equality" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/clock" - "k8s.io/apimachinery/pkg/util/diff" - "k8s.io/client-go/kubernetes/fake" - fakeclient "k8s.io/client-go/testing" - rl "k8s.io/client-go/tools/leaderelection/resourcelock" - "k8s.io/client-go/tools/record" -) - -func createLockObject(t *testing.T, objectType, namespace, name string, record *rl.LeaderElectionRecord) (obj runtime.Object) { - objectMeta := metav1.ObjectMeta{ - Namespace: namespace, - Name: name, - } - if record != nil { - recordBytes, _ := json.Marshal(record) - objectMeta.Annotations = map[string]string{ - rl.LeaderElectionRecordAnnotationKey: string(recordBytes), - } - } - switch objectType { - case "endpoints": - obj = &corev1.Endpoints{ObjectMeta: objectMeta} - case "configmaps": - obj = &corev1.ConfigMap{ObjectMeta: objectMeta} - case "leases": - var spec coordinationv1.LeaseSpec - if record != nil { - spec = rl.LeaderElectionRecordToLeaseSpec(record) - } - obj = &coordinationv1.Lease{ObjectMeta: objectMeta, Spec: spec} - default: - t.Fatal("unexpected objType:" + objectType) - } - return -} - -// Will test leader election using endpoints as the resource -func TestTryAcquireOrRenewEndpoints(t *testing.T) { - testTryAcquireOrRenew(t, "endpoints") -} - -type Reactor struct { - verb string - objectType string - reaction fakeclient.ReactionFunc -} - -func testTryAcquireOrRenew(t *testing.T, objectType string) { - future := time.Now().Add(1000 * time.Hour) - past := time.Now().Add(-1000 * time.Hour) - - tests := []struct { - name string - observedRecord rl.LeaderElectionRecord - observedTime time.Time - reactors []Reactor - - expectSuccess bool - transitionLeader bool - outHolder string - }{ - { - name: "acquire from no object", - reactors: []Reactor{ - { - verb: "get", - reaction: func(action fakeclient.Action) (handled bool, ret runtime.Object, err error) { - return true, nil, errors.NewNotFound(action.(fakeclient.GetAction).GetResource().GroupResource(), action.(fakeclient.GetAction).GetName()) - }, - }, - { - verb: "create", - reaction: func(action fakeclient.Action) (handled bool, ret runtime.Object, err error) { - return true, action.(fakeclient.CreateAction).GetObject(), nil - }, - }, - }, - expectSuccess: true, - outHolder: "baz", - }, - { - name: "acquire from object without annotations", - reactors: []Reactor{ - { - verb: "get", - reaction: func(action fakeclient.Action) (handled bool, ret runtime.Object, err error) { - return true, createLockObject(t, objectType, action.GetNamespace(), action.(fakeclient.GetAction).GetName(), nil), nil - }, - }, - { - verb: "update", - reaction: func(action fakeclient.Action) (handled bool, ret runtime.Object, err error) { - return true, action.(fakeclient.CreateAction).GetObject(), nil - }, - }, - }, - expectSuccess: true, - transitionLeader: true, - outHolder: "baz", - }, - { - name: "acquire from unled object", - reactors: []Reactor{ - { - verb: "get", - reaction: func(action fakeclient.Action) (handled bool, ret runtime.Object, err error) { - return true, createLockObject(t, objectType, action.GetNamespace(), action.(fakeclient.GetAction).GetName(), &rl.LeaderElectionRecord{}), nil - }, - }, - { - verb: "update", - reaction: func(action fakeclient.Action) (handled bool, ret runtime.Object, err error) { - return true, action.(fakeclient.CreateAction).GetObject(), nil - }, - }, - }, - - expectSuccess: true, - transitionLeader: true, - outHolder: "baz", - }, - { - name: "acquire from led, unacked object", - reactors: []Reactor{ - { - verb: "get", - reaction: func(action fakeclient.Action) (handled bool, ret runtime.Object, err error) { - return true, createLockObject(t, objectType, action.GetNamespace(), action.(fakeclient.GetAction).GetName(), &rl.LeaderElectionRecord{HolderIdentity: "bing"}), nil - }, - }, - { - verb: "update", - reaction: func(action fakeclient.Action) (handled bool, ret runtime.Object, err error) { - return true, action.(fakeclient.CreateAction).GetObject(), nil - }, - }, - }, - observedRecord: rl.LeaderElectionRecord{HolderIdentity: "bing"}, - observedTime: past, - - expectSuccess: true, - transitionLeader: true, - outHolder: "baz", - }, - { - name: "acquire from empty led, acked object", - reactors: []Reactor{ - { - verb: "get", - reaction: func(action fakeclient.Action) (handled bool, ret runtime.Object, err error) { - return true, createLockObject(t, objectType, action.GetNamespace(), action.(fakeclient.GetAction).GetName(), &rl.LeaderElectionRecord{HolderIdentity: ""}), nil - }, - }, - { - verb: "update", - reaction: func(action fakeclient.Action) (handled bool, ret runtime.Object, err error) { - return true, action.(fakeclient.CreateAction).GetObject(), nil - }, - }, - }, - observedTime: future, - - expectSuccess: true, - transitionLeader: true, - outHolder: "baz", - }, - { - name: "don't acquire from led, acked object", - reactors: []Reactor{ - { - verb: "get", - reaction: func(action fakeclient.Action) (handled bool, ret runtime.Object, err error) { - return true, createLockObject(t, objectType, action.GetNamespace(), action.(fakeclient.GetAction).GetName(), &rl.LeaderElectionRecord{HolderIdentity: "bing"}), nil - }, - }, - }, - observedTime: future, - - expectSuccess: false, - outHolder: "bing", - }, - { - name: "renew already acquired object", - reactors: []Reactor{ - { - verb: "get", - reaction: func(action fakeclient.Action) (handled bool, ret runtime.Object, err error) { - return true, createLockObject(t, objectType, action.GetNamespace(), action.(fakeclient.GetAction).GetName(), &rl.LeaderElectionRecord{HolderIdentity: "baz"}), nil - }, - }, - { - verb: "update", - reaction: func(action fakeclient.Action) (handled bool, ret runtime.Object, err error) { - return true, action.(fakeclient.CreateAction).GetObject(), nil - }, - }, - }, - observedTime: future, - observedRecord: rl.LeaderElectionRecord{HolderIdentity: "baz"}, - - expectSuccess: true, - outHolder: "baz", - }, - } - - for i := range tests { - test := &tests[i] - t.Run(test.name, func(t *testing.T) { - // OnNewLeader is called async so we have to wait for it. - var wg sync.WaitGroup - wg.Add(1) - var reportedLeader string - var lock rl.Interface - - objectMeta := metav1.ObjectMeta{Namespace: "foo", Name: "bar"} - resourceLockConfig := rl.ResourceLockConfig{ - Identity: "baz", - EventRecorder: &record.FakeRecorder{}, - } - c := &fake.Clientset{} - for _, reactor := range test.reactors { - c.AddReactor(reactor.verb, objectType, reactor.reaction) - } - c.AddReactor("*", "*", func(action fakeclient.Action) (bool, runtime.Object, error) { - t.Errorf("unreachable action. testclient called too many times: %+v", action) - return true, nil, fmt.Errorf("unreachable action") - }) - - switch objectType { - case "endpoints": - lock = &rl.EndpointsLock{ - EndpointsMeta: objectMeta, - LockConfig: resourceLockConfig, - Client: c.CoreV1(), - } - case "configmaps": - lock = &rl.ConfigMapLock{ - ConfigMapMeta: objectMeta, - LockConfig: resourceLockConfig, - Client: c.CoreV1(), - } - case "leases": - lock = &rl.LeaseLock{ - LeaseMeta: objectMeta, - LockConfig: resourceLockConfig, - Client: c.CoordinationV1(), - } - } - - lec := LeaderElectionConfig{ - Lock: lock, - LeaseDuration: 10 * time.Second, - Callbacks: LeaderCallbacks{ - OnNewLeader: func(l string) { - defer wg.Done() - reportedLeader = l - }, - }, - } - observedRawRecord := GetRawRecordOrDie(t, objectType, test.observedRecord) - le := &LeaderElector{ - config: lec, - observedRecord: test.observedRecord, - observedRawRecord: observedRawRecord, - observedTime: test.observedTime, - clock: clock.RealClock{}, - } - if test.expectSuccess != le.tryAcquireOrRenew(context.Background()) { - t.Errorf("unexpected result of tryAcquireOrRenew: [succeeded=%v]", !test.expectSuccess) - } - - le.observedRecord.AcquireTime = metav1.Time{} - le.observedRecord.RenewTime = metav1.Time{} - if le.observedRecord.HolderIdentity != test.outHolder { - t.Errorf("expected holder:\n\t%+v\ngot:\n\t%+v", test.outHolder, le.observedRecord.HolderIdentity) - } - if len(test.reactors) != len(c.Actions()) { - t.Errorf("wrong number of api interactions") - } - if test.transitionLeader && le.observedRecord.LeaderTransitions != 1 { - t.Errorf("leader should have transitioned but did not") - } - if !test.transitionLeader && le.observedRecord.LeaderTransitions != 0 { - t.Errorf("leader should not have transitioned but did") - } - - le.maybeReportTransition() - wg.Wait() - if reportedLeader != test.outHolder { - t.Errorf("reported leader was not the new leader. expected %q, got %q", test.outHolder, reportedLeader) - } - }) - } -} - -// Will test leader election using configmap as the resource -func TestTryAcquireOrRenewConfigMaps(t *testing.T) { - testTryAcquireOrRenew(t, "configmaps") -} - -// Will test leader election using lease as the resource -func TestTryAcquireOrRenewLeases(t *testing.T) { - testTryAcquireOrRenew(t, "leases") -} - -func TestLeaseSpecToLeaderElectionRecordRoundTrip(t *testing.T) { - holderIdentity := "foo" - leaseDurationSeconds := int32(10) - leaseTransitions := int32(1) - oldSpec := coordinationv1.LeaseSpec{ - HolderIdentity: &holderIdentity, - LeaseDurationSeconds: &leaseDurationSeconds, - AcquireTime: &metav1.MicroTime{time.Now()}, - RenewTime: &metav1.MicroTime{time.Now()}, - LeaseTransitions: &leaseTransitions, - } - - oldRecord := rl.LeaseSpecToLeaderElectionRecord(&oldSpec) - newSpec := rl.LeaderElectionRecordToLeaseSpec(oldRecord) - - if !equality.Semantic.DeepEqual(oldSpec, newSpec) { - t.Errorf("diff: %v", diff.ObjectReflectDiff(oldSpec, newSpec)) - } - - newRecord := rl.LeaseSpecToLeaderElectionRecord(&newSpec) - - if !equality.Semantic.DeepEqual(oldRecord, newRecord) { - t.Errorf("diff: %v", diff.ObjectReflectDiff(oldRecord, newRecord)) - } -} - -func multiLockType(t *testing.T, objectType string) (primaryType, secondaryType string) { - switch objectType { - case rl.EndpointsLeasesResourceLock: - return rl.EndpointsResourceLock, rl.LeasesResourceLock - case rl.ConfigMapsLeasesResourceLock: - return rl.ConfigMapsResourceLock, rl.LeasesResourceLock - default: - t.Fatal("unexpected objType:" + objectType) - } - return -} - -func GetRawRecordOrDie(t *testing.T, objectType string, ler rl.LeaderElectionRecord) (ret []byte) { - var err error - switch objectType { - case "endpoints", "configmaps", "leases": - ret, err = json.Marshal(ler) - if err != nil { - t.Fatalf("lock %s get raw record %v failed: %v", objectType, ler, err) - } - case "endpointsleases", "configmapsleases": - recordBytes, err := json.Marshal(ler) - if err != nil { - t.Fatalf("lock %s get raw record %v failed: %v", objectType, ler, err) - } - ret = rl.ConcatRawRecord(recordBytes, recordBytes) - default: - t.Fatal("unexpected objType:" + objectType) - } - return -} - -func testTryAcquireOrRenewMultiLock(t *testing.T, objectType string) { - future := time.Now().Add(1000 * time.Hour) - past := time.Now().Add(-1000 * time.Hour) - primaryType, secondaryType := multiLockType(t, objectType) - tests := []struct { - name string - observedRecord rl.LeaderElectionRecord - observedRawRecord []byte - observedTime time.Time - reactors []Reactor - - expectSuccess bool - transitionLeader bool - outHolder string - }{ - { - name: "acquire from no object", - reactors: []Reactor{ - { - verb: "get", - objectType: primaryType, - reaction: func(action fakeclient.Action) (handled bool, ret runtime.Object, err error) { - return true, nil, errors.NewNotFound(action.(fakeclient.GetAction).GetResource().GroupResource(), action.(fakeclient.GetAction).GetName()) - }, - }, - { - verb: "create", - objectType: primaryType, - reaction: func(action fakeclient.Action) (handled bool, ret runtime.Object, err error) { - return true, action.(fakeclient.CreateAction).GetObject(), nil - }, - }, - { - verb: "create", - objectType: secondaryType, - reaction: func(action fakeclient.Action) (handled bool, ret runtime.Object, err error) { - return true, action.(fakeclient.CreateAction).GetObject(), nil - }, - }, - }, - expectSuccess: true, - outHolder: "baz", - }, - { - name: "acquire from unled old object", - reactors: []Reactor{ - { - verb: "get", - objectType: primaryType, - reaction: func(action fakeclient.Action) (handled bool, ret runtime.Object, err error) { - return true, createLockObject(t, primaryType, action.GetNamespace(), action.(fakeclient.GetAction).GetName(), &rl.LeaderElectionRecord{}), nil - }, - }, - { - verb: "get", - objectType: secondaryType, - reaction: func(action fakeclient.Action) (handled bool, ret runtime.Object, err error) { - return true, nil, errors.NewNotFound(action.(fakeclient.GetAction).GetResource().GroupResource(), action.(fakeclient.GetAction).GetName()) - }, - }, - { - verb: "update", - objectType: primaryType, - reaction: func(action fakeclient.Action) (handled bool, ret runtime.Object, err error) { - return true, action.(fakeclient.UpdateAction).GetObject(), nil - }, - }, - { - verb: "get", - objectType: secondaryType, - reaction: func(action fakeclient.Action) (handled bool, ret runtime.Object, err error) { - return true, nil, errors.NewNotFound(action.(fakeclient.GetAction).GetResource().GroupResource(), action.(fakeclient.GetAction).GetName()) - }, - }, - { - verb: "create", - objectType: secondaryType, - reaction: func(action fakeclient.Action) (handled bool, ret runtime.Object, err error) { - return true, action.(fakeclient.CreateAction).GetObject(), nil - }, - }, - }, - expectSuccess: true, - transitionLeader: true, - outHolder: "baz", - }, - { - name: "acquire from unled transition object", - reactors: []Reactor{ - { - verb: "get", - objectType: primaryType, - reaction: func(action fakeclient.Action) (handled bool, ret runtime.Object, err error) { - return true, createLockObject(t, primaryType, action.GetNamespace(), action.(fakeclient.GetAction).GetName(), &rl.LeaderElectionRecord{}), nil - }, - }, - { - verb: "get", - objectType: secondaryType, - reaction: func(action fakeclient.Action) (handled bool, ret runtime.Object, err error) { - return true, createLockObject(t, secondaryType, action.GetNamespace(), action.(fakeclient.GetAction).GetName(), &rl.LeaderElectionRecord{}), nil - }, - }, - { - verb: "update", - objectType: primaryType, - reaction: func(action fakeclient.Action) (handled bool, ret runtime.Object, err error) { - return true, action.(fakeclient.UpdateAction).GetObject(), nil - }, - }, - { - verb: "get", - objectType: secondaryType, - reaction: func(action fakeclient.Action) (handled bool, ret runtime.Object, err error) { - return true, createLockObject(t, secondaryType, action.GetNamespace(), action.(fakeclient.GetAction).GetName(), &rl.LeaderElectionRecord{}), nil - }, - }, - { - verb: "update", - objectType: secondaryType, - reaction: func(action fakeclient.Action) (handled bool, ret runtime.Object, err error) { - return true, action.(fakeclient.UpdateAction).GetObject(), nil - }, - }, - }, - expectSuccess: true, - transitionLeader: true, - outHolder: "baz", - }, - { - name: "acquire from led, unack old object", - reactors: []Reactor{ - { - verb: "get", - objectType: primaryType, - reaction: func(action fakeclient.Action) (handled bool, ret runtime.Object, err error) { - return true, createLockObject(t, primaryType, action.GetNamespace(), action.(fakeclient.GetAction).GetName(), &rl.LeaderElectionRecord{HolderIdentity: "bing"}), nil - }, - }, - { - verb: "get", - objectType: secondaryType, - reaction: func(action fakeclient.Action) (handled bool, ret runtime.Object, err error) { - return true, nil, errors.NewNotFound(action.(fakeclient.GetAction).GetResource().GroupResource(), action.(fakeclient.GetAction).GetName()) - }, - }, - { - verb: "update", - objectType: primaryType, - reaction: func(action fakeclient.Action) (handled bool, ret runtime.Object, err error) { - return true, action.(fakeclient.UpdateAction).GetObject(), nil - }, - }, - { - verb: "get", - objectType: secondaryType, - reaction: func(action fakeclient.Action) (handled bool, ret runtime.Object, err error) { - return true, createLockObject(t, secondaryType, action.GetNamespace(), action.(fakeclient.GetAction).GetName(), &rl.LeaderElectionRecord{HolderIdentity: "bing"}), nil - }, - }, - { - verb: "create", - objectType: secondaryType, - reaction: func(action fakeclient.Action) (handled bool, ret runtime.Object, err error) { - return true, action.(fakeclient.CreateAction).GetObject(), nil - }, - }, - }, - observedRecord: rl.LeaderElectionRecord{HolderIdentity: "bing"}, - observedRawRecord: GetRawRecordOrDie(t, primaryType, rl.LeaderElectionRecord{HolderIdentity: "bing"}), - observedTime: past, - - expectSuccess: true, - transitionLeader: true, - outHolder: "baz", - }, - { - name: "acquire from led, unack transition object", - reactors: []Reactor{ - { - verb: "get", - objectType: primaryType, - reaction: func(action fakeclient.Action) (handled bool, ret runtime.Object, err error) { - return true, createLockObject(t, primaryType, action.GetNamespace(), action.(fakeclient.GetAction).GetName(), &rl.LeaderElectionRecord{HolderIdentity: "bing"}), nil - }, - }, - { - verb: "get", - objectType: secondaryType, - reaction: func(action fakeclient.Action) (handled bool, ret runtime.Object, err error) { - return true, createLockObject(t, secondaryType, action.GetNamespace(), action.(fakeclient.GetAction).GetName(), &rl.LeaderElectionRecord{HolderIdentity: "bing"}), nil - }, - }, - { - verb: "update", - objectType: primaryType, - reaction: func(action fakeclient.Action) (handled bool, ret runtime.Object, err error) { - return true, action.(fakeclient.UpdateAction).GetObject(), nil - }, - }, - { - verb: "get", - objectType: secondaryType, - reaction: func(action fakeclient.Action) (handled bool, ret runtime.Object, err error) { - return true, createLockObject(t, secondaryType, action.GetNamespace(), action.(fakeclient.GetAction).GetName(), &rl.LeaderElectionRecord{HolderIdentity: "bing"}), nil - }, - }, - { - verb: "update", - objectType: secondaryType, - reaction: func(action fakeclient.Action) (handled bool, ret runtime.Object, err error) { - return true, action.(fakeclient.UpdateAction).GetObject(), nil - }, - }, - }, - observedRecord: rl.LeaderElectionRecord{HolderIdentity: "bing"}, - observedRawRecord: GetRawRecordOrDie(t, objectType, rl.LeaderElectionRecord{HolderIdentity: "bing"}), - observedTime: past, - - expectSuccess: true, - transitionLeader: true, - outHolder: "baz", - }, - { - name: "acquire from conflict led, ack transition object", - reactors: []Reactor{ - { - verb: "get", - objectType: primaryType, - reaction: func(action fakeclient.Action) (handled bool, ret runtime.Object, err error) { - return true, createLockObject(t, primaryType, action.GetNamespace(), action.(fakeclient.GetAction).GetName(), &rl.LeaderElectionRecord{HolderIdentity: "bing"}), nil - }, - }, - { - verb: "get", - objectType: secondaryType, - reaction: func(action fakeclient.Action) (handled bool, ret runtime.Object, err error) { - return true, createLockObject(t, secondaryType, action.GetNamespace(), action.(fakeclient.GetAction).GetName(), &rl.LeaderElectionRecord{HolderIdentity: "baz"}), nil - }, - }, - }, - observedRecord: rl.LeaderElectionRecord{HolderIdentity: "bing"}, - observedRawRecord: GetRawRecordOrDie(t, objectType, rl.LeaderElectionRecord{HolderIdentity: "bing"}), - observedTime: future, - - expectSuccess: false, - outHolder: rl.UnknownLeader, - }, - { - name: "acquire from led, unack unknown object", - reactors: []Reactor{ - { - verb: "get", - objectType: primaryType, - reaction: func(action fakeclient.Action) (handled bool, ret runtime.Object, err error) { - return true, createLockObject(t, primaryType, action.GetNamespace(), action.(fakeclient.GetAction).GetName(), &rl.LeaderElectionRecord{HolderIdentity: rl.UnknownLeader}), nil - }, - }, - { - verb: "get", - objectType: secondaryType, - reaction: func(action fakeclient.Action) (handled bool, ret runtime.Object, err error) { - return true, createLockObject(t, secondaryType, action.GetNamespace(), action.(fakeclient.GetAction).GetName(), &rl.LeaderElectionRecord{HolderIdentity: rl.UnknownLeader}), nil - }, - }, - { - verb: "update", - objectType: primaryType, - reaction: func(action fakeclient.Action) (handled bool, ret runtime.Object, err error) { - return true, action.(fakeclient.UpdateAction).GetObject(), nil - }, - }, - { - verb: "get", - objectType: secondaryType, - reaction: func(action fakeclient.Action) (handled bool, ret runtime.Object, err error) { - return true, createLockObject(t, secondaryType, action.GetNamespace(), action.(fakeclient.GetAction).GetName(), &rl.LeaderElectionRecord{HolderIdentity: rl.UnknownLeader}), nil - }, - }, - { - verb: "update", - objectType: secondaryType, - reaction: func(action fakeclient.Action) (handled bool, ret runtime.Object, err error) { - return true, action.(fakeclient.UpdateAction).GetObject(), nil - }, - }, - }, - observedRecord: rl.LeaderElectionRecord{HolderIdentity: rl.UnknownLeader}, - observedRawRecord: GetRawRecordOrDie(t, objectType, rl.LeaderElectionRecord{HolderIdentity: rl.UnknownLeader}), - observedTime: past, - - expectSuccess: true, - transitionLeader: true, - outHolder: "baz", - }, - { - name: "don't acquire from led, ack old object", - reactors: []Reactor{ - { - verb: "get", - objectType: primaryType, - reaction: func(action fakeclient.Action) (handled bool, ret runtime.Object, err error) { - return true, createLockObject(t, primaryType, action.GetNamespace(), action.(fakeclient.GetAction).GetName(), &rl.LeaderElectionRecord{HolderIdentity: "bing"}), nil - }, - }, - { - verb: "get", - objectType: secondaryType, - reaction: func(action fakeclient.Action) (handled bool, ret runtime.Object, err error) { - return true, nil, errors.NewNotFound(action.(fakeclient.GetAction).GetResource().GroupResource(), action.(fakeclient.GetAction).GetName()) - }, - }, - }, - observedRecord: rl.LeaderElectionRecord{HolderIdentity: "bing"}, - observedRawRecord: GetRawRecordOrDie(t, primaryType, rl.LeaderElectionRecord{HolderIdentity: "bing"}), - observedTime: future, - - expectSuccess: false, - outHolder: "bing", - }, - { - name: "don't acquire from led, acked new object, observe new record", - reactors: []Reactor{ - { - verb: "get", - objectType: primaryType, - reaction: func(action fakeclient.Action) (handled bool, ret runtime.Object, err error) { - return true, createLockObject(t, primaryType, action.GetNamespace(), action.(fakeclient.GetAction).GetName(), &rl.LeaderElectionRecord{HolderIdentity: "baz"}), nil - }, - }, - { - verb: "get", - objectType: secondaryType, - reaction: func(action fakeclient.Action) (handled bool, ret runtime.Object, err error) { - return true, createLockObject(t, secondaryType, action.GetNamespace(), action.(fakeclient.GetAction).GetName(), &rl.LeaderElectionRecord{HolderIdentity: "bing"}), nil - }, - }, - }, - observedRecord: rl.LeaderElectionRecord{HolderIdentity: "bing"}, - observedRawRecord: GetRawRecordOrDie(t, secondaryType, rl.LeaderElectionRecord{HolderIdentity: "bing"}), - observedTime: future, - - expectSuccess: false, - outHolder: rl.UnknownLeader, - }, - { - name: "don't acquire from led, acked new object, observe transition record", - reactors: []Reactor{ - { - verb: "get", - objectType: primaryType, - reaction: func(action fakeclient.Action) (handled bool, ret runtime.Object, err error) { - return true, createLockObject(t, primaryType, action.GetNamespace(), action.(fakeclient.GetAction).GetName(), &rl.LeaderElectionRecord{HolderIdentity: "bing"}), nil - }, - }, - { - verb: "get", - objectType: secondaryType, - reaction: func(action fakeclient.Action) (handled bool, ret runtime.Object, err error) { - return true, createLockObject(t, secondaryType, action.GetNamespace(), action.(fakeclient.GetAction).GetName(), &rl.LeaderElectionRecord{HolderIdentity: "bing"}), nil - }, - }, - }, - observedRecord: rl.LeaderElectionRecord{HolderIdentity: "bing"}, - observedRawRecord: GetRawRecordOrDie(t, objectType, rl.LeaderElectionRecord{HolderIdentity: "bing"}), - observedTime: future, - - expectSuccess: false, - outHolder: "bing", - }, - { - name: "renew already required object", - reactors: []Reactor{ - { - verb: "get", - objectType: primaryType, - reaction: func(action fakeclient.Action) (handled bool, ret runtime.Object, err error) { - return true, createLockObject(t, primaryType, action.GetNamespace(), action.(fakeclient.GetAction).GetName(), &rl.LeaderElectionRecord{HolderIdentity: "baz"}), nil - }, - }, - { - verb: "get", - objectType: secondaryType, - reaction: func(action fakeclient.Action) (handled bool, ret runtime.Object, err error) { - return true, createLockObject(t, secondaryType, action.GetNamespace(), action.(fakeclient.GetAction).GetName(), &rl.LeaderElectionRecord{HolderIdentity: "baz"}), nil - }, - }, - { - verb: "update", - objectType: primaryType, - reaction: func(action fakeclient.Action) (handled bool, ret runtime.Object, err error) { - return true, action.(fakeclient.UpdateAction).GetObject(), nil - }, - }, - { - verb: "get", - objectType: secondaryType, - reaction: func(action fakeclient.Action) (handled bool, ret runtime.Object, err error) { - return true, createLockObject(t, secondaryType, action.GetNamespace(), action.(fakeclient.GetAction).GetName(), &rl.LeaderElectionRecord{HolderIdentity: "baz"}), nil - }, - }, - { - verb: "update", - objectType: secondaryType, - reaction: func(action fakeclient.Action) (handled bool, ret runtime.Object, err error) { - return true, action.(fakeclient.UpdateAction).GetObject(), nil - }, - }, - }, - observedRecord: rl.LeaderElectionRecord{HolderIdentity: "baz"}, - observedRawRecord: GetRawRecordOrDie(t, objectType, rl.LeaderElectionRecord{HolderIdentity: "baz"}), - observedTime: future, - - expectSuccess: true, - outHolder: "baz", - }, - } - - for i := range tests { - test := &tests[i] - t.Run(test.name, func(t *testing.T) { - // OnNewLeader is called async so we have to wait for it. - var wg sync.WaitGroup - wg.Add(1) - var reportedLeader string - var lock rl.Interface - - objectMeta := metav1.ObjectMeta{Namespace: "foo", Name: "bar"} - resourceLockConfig := rl.ResourceLockConfig{ - Identity: "baz", - EventRecorder: &record.FakeRecorder{}, - } - c := &fake.Clientset{} - for _, reactor := range test.reactors { - c.AddReactor(reactor.verb, reactor.objectType, reactor.reaction) - } - c.AddReactor("*", "*", func(action fakeclient.Action) (bool, runtime.Object, error) { - t.Errorf("unreachable action. testclient called too many times: %+v", action) - return true, nil, fmt.Errorf("unreachable action") - }) - - switch objectType { - case rl.EndpointsLeasesResourceLock: - lock = &rl.MultiLock{ - Primary: &rl.EndpointsLock{ - EndpointsMeta: objectMeta, - LockConfig: resourceLockConfig, - Client: c.CoreV1(), - }, - Secondary: &rl.LeaseLock{ - LeaseMeta: objectMeta, - LockConfig: resourceLockConfig, - Client: c.CoordinationV1(), - }, - } - case rl.ConfigMapsLeasesResourceLock: - lock = &rl.MultiLock{ - Primary: &rl.ConfigMapLock{ - ConfigMapMeta: objectMeta, - LockConfig: resourceLockConfig, - Client: c.CoreV1(), - }, - Secondary: &rl.LeaseLock{ - LeaseMeta: objectMeta, - LockConfig: resourceLockConfig, - Client: c.CoordinationV1(), - }, - } - } - - lec := LeaderElectionConfig{ - Lock: lock, - LeaseDuration: 10 * time.Second, - Callbacks: LeaderCallbacks{ - OnNewLeader: func(l string) { - defer wg.Done() - reportedLeader = l - }, - }, - } - le := &LeaderElector{ - config: lec, - observedRecord: test.observedRecord, - observedRawRecord: test.observedRawRecord, - observedTime: test.observedTime, - clock: clock.RealClock{}, - } - if test.expectSuccess != le.tryAcquireOrRenew(context.Background()) { - t.Errorf("unexpected result of tryAcquireOrRenew: [succeeded=%v]", !test.expectSuccess) - } - - le.observedRecord.AcquireTime = metav1.Time{} - le.observedRecord.RenewTime = metav1.Time{} - if le.observedRecord.HolderIdentity != test.outHolder { - t.Errorf("expected holder:\n\t%+v\ngot:\n\t%+v", test.outHolder, le.observedRecord.HolderIdentity) - } - if len(test.reactors) != len(c.Actions()) { - t.Errorf("wrong number of api interactions") - } - if test.transitionLeader && le.observedRecord.LeaderTransitions != 1 { - t.Errorf("leader should have transitioned but did not") - } - if !test.transitionLeader && le.observedRecord.LeaderTransitions != 0 { - t.Errorf("leader should not have transitioned but did") - } - - le.maybeReportTransition() - wg.Wait() - if reportedLeader != test.outHolder { - t.Errorf("reported leader was not the new leader. expected %q, got %q", test.outHolder, reportedLeader) - } - }) - } -} - -// Will test leader election using endpointsleases as the resource -func TestTryAcquireOrRenewEndpointsLeases(t *testing.T) { - testTryAcquireOrRenewMultiLock(t, "endpointsleases") -} - -// Will test leader election using configmapsleases as the resource -func TestTryAcquireOrRenewConfigMapsLeases(t *testing.T) { - testTryAcquireOrRenewMultiLock(t, "configmapsleases") -} diff --git a/pkg/leaderElection/metrics.go b/pkg/leaderElection/metrics.go deleted file mode 100644 index 65917bf8..00000000 --- a/pkg/leaderElection/metrics.go +++ /dev/null @@ -1,109 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package leaderelection - -import ( - "sync" -) - -// This file provides abstractions for setting the provider (e.g., prometheus) -// of metrics. - -type leaderMetricsAdapter interface { - leaderOn(name string) - leaderOff(name string) -} - -// GaugeMetric represents a single numerical value that can arbitrarily go up -// and down. -type SwitchMetric interface { - On(name string) - Off(name string) -} - -type noopMetric struct{} - -func (noopMetric) On(name string) {} -func (noopMetric) Off(name string) {} - -// defaultLeaderMetrics expects the caller to lock before setting any metrics. -type defaultLeaderMetrics struct { - // leader's value indicates if the current process is the owner of name lease - leader SwitchMetric -} - -func (m *defaultLeaderMetrics) leaderOn(name string) { - if m == nil { - return - } - m.leader.On(name) -} - -func (m *defaultLeaderMetrics) leaderOff(name string) { - if m == nil { - return - } - m.leader.Off(name) -} - -type noMetrics struct{} - -func (noMetrics) leaderOn(name string) {} -func (noMetrics) leaderOff(name string) {} - -// MetricsProvider generates various metrics used by the leader election. -type MetricsProvider interface { - NewLeaderMetric() SwitchMetric -} - -type noopMetricsProvider struct{} - -func (_ noopMetricsProvider) NewLeaderMetric() SwitchMetric { - return noopMetric{} -} - -var globalMetricsFactory = leaderMetricsFactory{ - metricsProvider: noopMetricsProvider{}, -} - -type leaderMetricsFactory struct { - metricsProvider MetricsProvider - - onlyOnce sync.Once -} - -func (f *leaderMetricsFactory) setProvider(mp MetricsProvider) { - f.onlyOnce.Do(func() { - f.metricsProvider = mp - }) -} - -func (f *leaderMetricsFactory) newLeaderMetrics() leaderMetricsAdapter { - mp := f.metricsProvider - if mp == (noopMetricsProvider{}) { - return noMetrics{} - } - return &defaultLeaderMetrics{ - leader: mp.NewLeaderMetric(), - } -} - -// SetProvider sets the metrics provider for all subsequently created work -// queues. Only the first call has an effect. -func SetProvider(metricsProvider MetricsProvider) { - globalMetricsFactory.setProvider(metricsProvider) -} diff --git a/pkg/loadbalancer/ipvs.go b/pkg/loadbalancer/ipvs.go new file mode 100644 index 00000000..af93b2e2 --- /dev/null +++ b/pkg/loadbalancer/ipvs.go @@ -0,0 +1,192 @@ +package loadbalancer + +import ( + "fmt" + "net" + "net/netip" + "strings" + + "github.com/cloudflare/ipvs" + log "github.com/sirupsen/logrus" +) + +/* +IPVS Architecture - for those that are interested + +There are going to be a large number of end users that are using a VIP that exists within the same subnet +as the back end servers. This unfortunately will result in "packet" confusion with the destingation and +source becoming messed up by the IPVS NAT. + +The solution is to perform two things ! + +First: +Set up kube-vip TCP port forwarder from the VIP:PORT to the IPVS:PORT + +Second: +Start up a node watcher and a IPVS load balancer, the node balancer is responsible for adding/removing +the nodes from the IPVS load-balancer. + +*/ + +const ( + ROUNDROBIN = "rr" +) + +type IPVSLoadBalancer struct { + client ipvs.Client + loadBalancerService ipvs.Service + Port int + forwardingMethod ipvs.ForwardType +} + +func NewIPVSLB(address string, port int, forwardingMethod string) (*IPVSLoadBalancer, error) { + // Create IPVS client + c, err := ipvs.New() + if err != nil { + log.Errorf("ensure IPVS kernel modules are loaded") + log.Fatalf("Error starting IPVS [%v]", err) + } + i, err := c.Info() + if err != nil { + log.Errorf("ensure IPVS kernel modules are loaded") + log.Fatalf("Error getting IPVS version [%v]", err) + } + log.Infof("IPVS Loadbalancer enabled for %d.%d.%d", i.Version[0], i.Version[1], i.Version[2]) + + ip, family := ipAndFamily(address) + + // Generate out API Server LoadBalancer instance + svc := ipvs.Service{ + Family: family, + Protocol: ipvs.TCP, + Port: uint16(port), + Address: ip, + Scheduler: ROUNDROBIN, + } + + var m ipvs.ForwardType + switch strings.ToLower(forwardingMethod) { + case "masquerade": + m = ipvs.Masquerade + case "local": + m = ipvs.Local + case "tunnel": + m = ipvs.Tunnel + case "directroute": + m = ipvs.DirectRoute + case "bypass": + m = ipvs.Bypass + default: + m = ipvs.Local + log.Warnf("unknown forwarding method. Defaulting to Local") + } + + lb := &IPVSLoadBalancer{ + Port: port, + client: c, + loadBalancerService: svc, + forwardingMethod: m, + } + // Return our created load-balancer + return lb, nil +} + +func (lb *IPVSLoadBalancer) RemoveIPVSLB() error { + err := lb.client.RemoveService(lb.loadBalancerService) + if err != nil { + return fmt.Errorf("error removing existing IPVS service: %v", err) + } + return nil +} + +func (lb *IPVSLoadBalancer) AddBackend(address string, port int) error { + // Check if this is the first backend + backends, err := lb.client.Destinations(lb.loadBalancerService) + if err != nil && strings.Contains(err.Error(), "file does not exist") { + log.Errorf("Error querying backends %s", err) + } + // If this is our first backend, then we can create the load-balancer service and add a backend + if len(backends) == 0 { + err = lb.client.CreateService(lb.loadBalancerService) + // If we've an error it could be that the IPVS lb instance has been left from a previous leadership + if err != nil && strings.Contains(err.Error(), "file exists") { + log.Warnf("load balancer for API server already exists, attempting to remove and re-create") + err = lb.client.RemoveService(lb.loadBalancerService) + + if err != nil { + return fmt.Errorf("error re-creating IPVS service: %v", err) + } + err = lb.client.CreateService(lb.loadBalancerService) + if err != nil { + return fmt.Errorf("error re-creating IPVS service: %v", err) + } + } else if err != nil { + // Fatal error at this point as IPVS is probably not working + log.Errorf("Unable to create an IPVS service, ensure IPVS kernel modules are loaded") + log.Fatalf("IPVS service error: %v", err) + } + log.Infof("Created Load-Balancer services on [%s:%d]", lb.addrString(), lb.Port) + } + + ip, family := ipAndFamily(address) + + // Ignore backends that use a different address family. + // Looks like different families could be supported in tunnel mode... + if family != lb.loadBalancerService.Family { + return nil + } + + dst := ipvs.Destination{ + Address: ip, + Port: uint16(port), + Family: family, + Weight: 1, + FwdMethod: lb.forwardingMethod, + } + + err = lb.client.CreateDestination(lb.loadBalancerService, dst) + // Swallow error of existing back end, the node watcher may attempt to apply + // the same back end multiple times + if err != nil { + if !strings.Contains(err.Error(), "file exists") { + return fmt.Errorf("error creating backend: %v", err) + } + // file exists is fine, we will just return at this point + return nil + } + log.Infof("Added backend for [%s:%d] on [%s:%d]", lb.addrString(), lb.Port, address, port) + + return nil +} + +func (lb *IPVSLoadBalancer) RemoveBackend(address string, port int) error { + ip, family := ipAndFamily(address) + if family != lb.loadBalancerService.Family { + return nil + } + + dst := ipvs.Destination{ + Address: ip, + Port: uint16(port), + Family: family, + Weight: 1, + } + err := lb.client.RemoveDestination(lb.loadBalancerService, dst) + if err != nil { + return fmt.Errorf("error removing backend: %v", err) + } + return nil +} + +func (lb *IPVSLoadBalancer) addrString() string { + return lb.loadBalancerService.Address.String() +} + +func ipAndFamily(address string) (netip.Addr, ipvs.AddressFamily) { + + ipAddr := net.ParseIP(address) + if ipAddr.To4() == nil { + return netip.AddrFrom16([16]byte(ipAddr.To16())), ipvs.INET6 + } + return netip.AddrFrom4([4]byte(ipAddr.To4())), ipvs.INET +} diff --git a/pkg/loadbalancer/ipvs_test.go b/pkg/loadbalancer/ipvs_test.go new file mode 100644 index 00000000..6271aa88 --- /dev/null +++ b/pkg/loadbalancer/ipvs_test.go @@ -0,0 +1,41 @@ +package loadbalancer + +import ( + "net/netip" + "reflect" + "testing" + + "github.com/cloudflare/ipvs" +) + +func Test_ipAndFamily(t *testing.T) { + type args struct { + address string + } + tests := []struct { + name string + args args + want netip.Addr + want1 ipvs.AddressFamily + }{ + { + name: "IPv4", + args: args{ + address: "192.168.0.20", + }, + want: netip.AddrFrom4([4]byte{192, 168, 0, 20}), + want1: ipvs.INET, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, got1 := ipAndFamily(tt.args.address) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("ipAndFamily() got = %v, want %v", got, tt.want) + } + if !reflect.DeepEqual(got1, tt.want1) { + t.Errorf("ipAndFamily() got1 = %v, want %v", got1, tt.want1) + } + }) + } +} diff --git a/pkg/loadbalancer/lb_connections.go b/pkg/loadbalancer/lb_connections.go deleted file mode 100644 index b7dd1c7a..00000000 --- a/pkg/loadbalancer/lb_connections.go +++ /dev/null @@ -1,129 +0,0 @@ -package loadbalancer - -import ( - "io" - "net" - "sync" - "time" - - "github.com/plunder-app/kube-vip/pkg/kubevip" - log "github.com/sirupsen/logrus" -) - -// 1. Load balancer port is exposed -// 2. We listen -// 3. On connection we connect to an endpoint -// [loop] -// 4. We read from the load balancer port -// 5. We write traffic to the endpoint -// 6. We read response from endpoint -// 7. We write response to load balancer -// [goto loop] - -func persistentConnection(frontendConnection net.Conn, lb *kubevip.LoadBalancer) { - - var endpoint net.Conn - defer frontendConnection.Close() - // Makes sure we close the connections to the endpoint when we've completed - - // Set a timeout for connecting to an endpoint - dialer := net.Dialer{Timeout: time.Millisecond * 500} - for { - - // Connect to Endpoint - ep, err := lb.ReturnEndpointAddr() - if err != nil { - return - } - - // We now dial to an endpoint with a timeout of half a second - // TODO - make this adjustable - endpoint, err = dialer.Dial("tcp", ep) - if err != nil { - log.Debugf("%v", err) - log.Warnf("[%s]---X [FAILED] X-->[%s]", frontendConnection.RemoteAddr(), ep) - } else { - log.Debugf("[%s]---->[ACCEPT]---->[%s]", frontendConnection.RemoteAddr(), ep) - defer endpoint.Close() - break - } - } - - wg := &sync.WaitGroup{} - wg.Add(1) - - // Begin copying incoming (frontend -> to an endpoint) - go func() { - bytes, err := io.Copy(endpoint, frontendConnection) - log.Debugf("[%d] bytes of data sent to endpoint", bytes) - if err != nil { - log.Warnf("Error sending data to endpoint [%s] [%v]", endpoint.RemoteAddr(), err) - } - wg.Done() - }() - // go func() { - // Begin copying recieving (endpoint -> back to frontend) - bytes, err := io.Copy(frontendConnection, endpoint) - log.Debugf("[%d] bytes of data sent to client", bytes) - if err != nil { - log.Warnf("Error sending data to frontend [%s] [%s]", frontendConnection.RemoteAddr(), err) - } - // wg.Done() - // endpoint.Close() - // }() - wg.Wait() -} - -func persistentUDPConnection(frontendConnection net.Conn, lb *kubevip.LoadBalancer) { - - var endpoint net.Conn - defer frontendConnection.Close() - // Makes sure we close the connections to the endpoint when we've completed - - // Set a timeout for connecting to an endpoint - dialer := net.Dialer{Timeout: time.Millisecond * 500} - for { - - // Connect to Endpoint - ep, err := lb.ReturnEndpointAddr() - if err != nil { - return - } - - // We now dial to an endpoint with a timeout of half a second - // TODO - make this adjustable - endpoint, err = dialer.Dial("udp", ep) - if err != nil { - log.Debugf("%v", err) - log.Warnf("[%s]---X [FAILED] X-->[%s]", frontendConnection.RemoteAddr(), ep) - } else { - log.Debugf("[%s]---->[ACCEPT]---->[%s]", frontendConnection.RemoteAddr(), ep) - defer endpoint.Close() - break - } - } - - wg := &sync.WaitGroup{} - wg.Add(1) - - // Begin copying incoming (frontend -> to an endpoint) - go func() { - bytes, err := io.Copy(endpoint, frontendConnection) - log.Debugf("[%d] bytes of data sent to endpoint", bytes) - if err != nil { - log.Warnf("Error sending data to endpoint [%s] [%v]", endpoint.RemoteAddr(), err) - } - wg.Done() - }() - // go func() { - // Begin copying recieving (endpoint -> back to frontend) - bytes, err := io.Copy(frontendConnection, endpoint) - log.Debugf("[%d] bytes of data sent to client", bytes) - if err != nil { - log.Warnf("Error sending data to frontend [%s] [%s]", frontendConnection.RemoteAddr(), err) - } - // wg.Done() - // endpoint.Close() - // }() - wg.Wait() -} diff --git a/pkg/loadbalancer/lb_http.go b/pkg/loadbalancer/lb_http.go deleted file mode 100644 index 4a7b7387..00000000 --- a/pkg/loadbalancer/lb_http.go +++ /dev/null @@ -1,126 +0,0 @@ -package loadbalancer - -import ( - "context" - "fmt" - "net/http" - "net/http/httputil" - "net/url" - "time" - - "github.com/plunder-app/kube-vip/pkg/kubevip" - log "github.com/sirupsen/logrus" -) - -func (lb *LBInstance) startHTTP(bindAddress string) error { - log.Infof("Starting TCP Load Balancer for service [%s]", lb.instance.Name) - - // Validate the back end URLS - err := kubevip.ValidateBackEndURLS(&lb.instance.Backends) - if err != nil { - return err - } - - frontEnd := fmt.Sprintf("%s:%d", bindAddress, lb.instance.Port) - - handler := func(w http.ResponseWriter, req *http.Request) { - // parse the url - url, _ := url.Parse(lb.instance.ReturnEndpointURL().String()) - - // create the reverse proxy - proxy := httputil.NewSingleHostReverseProxy(url) - - // Update the headers to allow for SSL redirection - req.URL.Host = url.Host - req.URL.Scheme = url.Scheme - req.Header.Set("X-Forwarded-Host", req.Host) - req.Host = url.Host - - //Print out the response (if debug logging) - if log.GetLevel() == log.DebugLevel { - fmt.Printf("Host:\t%s\n", req.Host) - fmt.Printf("Request:\t%s\n", req.Method) - fmt.Printf("URI:\t%s\n", req.RequestURI) - - for key, value := range req.Header { - fmt.Println("Header:", key, "Value:", value) - } - } - - // Note that ServeHttp is non blocking and uses a go routine under the hood - proxy.ServeHTTP(w, req) - } - - mux := http.NewServeMux() - mux.HandleFunc("/", handler) - log.Infof("Starting server listening [%s]", frontEnd) - - server := &http.Server{Addr: frontEnd, Handler: mux} - - go func() error { - if err := server.ListenAndServe(); err != nil { - return err - } - return nil - }() - - // If the stop channel is closed then the server will be gracefully shut down - <-lb.stop - log.Infof("Stopping the load balancer [%s] bound to [%s] with 5sec timeout", lb.instance.Name, frontEnd) - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - if err := server.Shutdown(ctx); err != nil { - return err - } - close(lb.stopped) - return nil -} - -//StartHTTP - begins the HTTP load balancer -func StartHTTP(lb *kubevip.LoadBalancer, address string) error { - log.Infof("Starting TCP Load Balancer for service [%s]", lb.Name) - - // Validate the back end URLS - err := kubevip.ValidateBackEndURLS(&lb.Backends) - if err != nil { - return err - } - - frontEnd := fmt.Sprintf("%s:%d", address, lb.Port) - - handler := func(w http.ResponseWriter, req *http.Request) { - // parse the url - url, _ := url.Parse(lb.ReturnEndpointURL().String()) - - // create the reverse proxy - proxy := httputil.NewSingleHostReverseProxy(url) - - // Update the headers to allow for SSL redirection - req.URL.Host = url.Host - req.URL.Scheme = url.Scheme - req.Header.Set("X-Forwarded-Host", req.Host) - req.Host = url.Host - - //Print out the response (if debug logging) - if log.GetLevel() == log.DebugLevel { - fmt.Printf("Host:\t%s\n", req.Host) - fmt.Printf("Request:\t%s\n", req.Method) - fmt.Printf("URI:\t%s\n", req.RequestURI) - - for key, value := range req.Header { - fmt.Println("Header:", key, "Value:", value) - } - } - - // Note that ServeHttp is non blocking and uses a go routine under the hood - proxy.ServeHTTP(w, req) - } - - mux := http.NewServeMux() - mux.HandleFunc("/", handler) - log.Infof("Starting server listening [%s]", frontEnd) - http.ListenAndServe(frontEnd, mux) - // Should never get here - return nil -} diff --git a/pkg/loadbalancer/lb_tcp.go b/pkg/loadbalancer/lb_tcp.go deleted file mode 100644 index 8ce2cf61..00000000 --- a/pkg/loadbalancer/lb_tcp.go +++ /dev/null @@ -1,178 +0,0 @@ -package loadbalancer - -import ( - "bytes" - "fmt" - "io" - "net" - "time" - - "github.com/plunder-app/kube-vip/pkg/kubevip" - log "github.com/sirupsen/logrus" -) - -// StartTCP a TCP load balancer server instane -func (lb *LBInstance) startTCP(bindAddress string) error { - fullAddress := fmt.Sprintf("%s:%d", bindAddress, lb.instance.Port) - log.Infof("Starting TCP Load Balancer for service [%s]", fullAddress) - - laddr, err := net.ResolveTCPAddr("tcp", fullAddress) - if nil != err { - log.Errorln(err) - } - l, err := net.ListenTCP("tcp", laddr) - if nil != err { - return fmt.Errorf("Unable to bind [%s]", err.Error()) - } - go func() { - for { - select { - - case <-lb.stop: - log.Debugln("Closing listener") - - // We've closed the stop channel - err = l.Close() - if err != nil { - return - } - // Close the stopped channel as the listener has been stopped - close(lb.stopped) - default: - - err = l.SetDeadline(time.Now().Add(200 * time.Millisecond)) - if err != nil { - log.Errorf("Error setting TCP deadline", err) - } - fd, err := l.Accept() - if err != nil { - // Check it it's an accept timeout - if opErr, ok := err.(*net.OpError); ok && opErr.Timeout() { - continue - } else if err != io.EOF { - log.Errorf("TCP Accept error [%s]", err) - } - } - go persistentConnection(fd, lb.instance) - } - } - }() - log.Infof("Load Balancer [%s] started", lb.instance.Name) - - return nil -} - -// startTCPDNU - Start TCP service Do not use -// This stops the service by closing the listener and then ignorning the error from Accept() -func (lb *LBInstance) startTCPDNU(bindAddress string) error { - fullAddress := fmt.Sprintf("%s:%d", bindAddress, lb.instance.Port) - log.Infof("Starting TCP Load Balancer for service [%s]", fullAddress) - - //l, err := net.Listen("tcp", fmt.Sprintf("%s:%d", bindAddress, lb.instance.Port)) - laddr, err := net.ResolveTCPAddr("tcp", fullAddress) - if nil != err { - log.Errorln(err) - } - l, err := net.ListenTCP("tcp", laddr) - if nil != err { - return fmt.Errorf("Unable to bind [%s]", err.Error()) - } - go func() { - <-lb.stop - log.Debugln("Closing listener") - - // We've closed the stop channel - err = l.Close() - if err != nil { - return - } - // Close the stopped channel as the listener has been stopped - close(lb.stopped) - - }() - - go func() { - for { - - fd, err := l.Accept() - if err != nil { - // Check it it's an accept timeout - if opErr, ok := err.(*net.OpError); ok && opErr.Timeout() { - continue - } else if err != io.EOF { - log.Errorf("TCP Accept error [%s]", err) - return - } - } - go persistentConnection(fd, lb.instance) - //go processRequests(lb.instance, fd) - } - // } - }() - log.Infof("Load Balancer [%s] started", lb.instance.Name) - - return nil -} - -// user -> [LB] -// [LB] (select end pot) -> [endpoint] -// -// -// -// -// -// - -func processRequests(lb *kubevip.LoadBalancer, frontendConnection net.Conn) { - for { - // READ FROM client - buf := make([]byte, 1024*1024) - datalen, err := frontendConnection.Read(buf) - if err != nil { - log.Fatalf("%v", err) - } - log.Debugf("Sent [%d] bytes to the LB", datalen) - data := buf[0:datalen] - - // Connect to Endpoint - ep, err := lb.ReturnEndpointAddr() - if err != nil { - log.Errorf("No Backends available") - } - log.Debugf("Attempting endpoint [%s]", ep) - - endpoint, err := net.Dial("tcp", ep) - if err != nil { - fmt.Println("dial error:", err) - // return nil, err - } - log.Debugf("succesfully connected to [%s]", ep) - - // Set a timeout - endpoint.SetReadDeadline(time.Now().Add(time.Second * 1)) - - b, err := endpointRequest(endpoint, ep, string(data)) - - _, err = frontendConnection.Write(b) - if err != nil { - log.Fatal("Write: ", err) - } - } -} - -// endpointRequest will take an endpoint address and send the data and wait for the response -func endpointRequest(endpoint net.Conn, endpointAddr, request string) ([]byte, error) { - - // defer conn.Close() - datalen, err := fmt.Fprintf(endpoint, request) - if err != nil { - fmt.Println("dial error:", err) - return nil, err - } - log.Debugf("Sent [%d] bytes to the endpoint", datalen) - - var b bytes.Buffer - io.Copy(&b, endpoint) - log.Debugf("Recieved [%d] from the endpoint", b.Len()) - return b.Bytes(), nil -} diff --git a/pkg/loadbalancer/lb_udp.go b/pkg/loadbalancer/lb_udp.go deleted file mode 100644 index 378df896..00000000 --- a/pkg/loadbalancer/lb_udp.go +++ /dev/null @@ -1,103 +0,0 @@ -package loadbalancer - -import ( - "fmt" - "net" - - log "github.com/sirupsen/logrus" -) - -// StartTCP a TCP load balancer server instane -func (lb *LBInstance) startUDP(bindAddress string) error { - fullAddress := fmt.Sprintf("%s:%d", bindAddress, lb.instance.Port) - log.Infof("Starting UDP Load Balancer for service [%s]", fullAddress) - - laddr, err := net.ResolveUDPAddr("udp", fullAddress) - if nil != err { - log.Errorln(err) - } - l, err := net.ListenUDP("udp", laddr) - if nil != err { - return fmt.Errorf("Unable to bind [%s]", err.Error()) - } - //stopListener := make(chan bool) - - go func() { - for { - select { - - case <-lb.stop: - log.Debugln("Closing listener") - - // We've closed the stop channel - err = l.Close() - if err != nil { - return - } - // Close the stopped channel as the listener has been stopped - close(lb.stopped) - default: - - // err = l.SetDeadline(time.Now().Add(200 * time.Millisecond)) - // if err != nil { - // log.Errorf("Error setting TCP deadline", err) - // } - // fd, err := l.Accept() - // if err != nil { - // // Check it it's an accept timeout - // if opErr, ok := err.(*net.OpError); ok && opErr.Timeout() { - // continue - // } else if err != io.EOF { - // log.Errorf("TCP Accept error [%s]", err) - // } - // } - go persistentUDPConnection(l, lb.instance) - } - } - }() - log.Infof("Load Balancer [%s] started", lb.instance.Name) - - // go func() { - // <-lb.stop - // log.Debugln("Closing listener") - - // // We've closed the stop channel - // err = l.Close() - // if err != nil { - // return - // } - // // Close the stopped channel as the listener has been stopped - // close(stopListener) - // }() - - // // Create a listener per CPU - // // TODO - make this customisable? - - // //for i := 0; i < runtime.NumCPU(); i++ { - // go persistentUDPConnection(l, lb.instance) - // //} - - //<-stopListener // hang until an error - - log.Infof("Load Balancer [%s] started", lb.instance.Name) - - return nil -} - -// func listen(connection *net.UDPConn, quit chan bool) { -// buffer := make([]byte, 1024) -// n, remoteAddr, err := 0, new(net.UDPAddr), error(nil) -// for err == nil { - -// //TODO - - -// n, remoteAddr, err = connection.ReadFromUDP(buffer) -// // you might copy out the contents of the packet here, to -// // `var r myapp.Request`, say, and `go handleRequest(r)` (or -// // send it down a channel) to free up the listening -// // goroutine. you do *need* to copy then, though, -// // because you've only made one buffer per listen(). -// fmt.Println("from", remoteAddr, "-", buffer[:n]) -// } -// fmt.Println("listener failed - ", err) -// } diff --git a/pkg/loadbalancer/manager.go b/pkg/loadbalancer/manager.go deleted file mode 100644 index 7c191320..00000000 --- a/pkg/loadbalancer/manager.go +++ /dev/null @@ -1,78 +0,0 @@ -package loadbalancer - -import ( - "fmt" - "strings" - - "github.com/plunder-app/kube-vip/pkg/kubevip" - log "github.com/sirupsen/logrus" -) - -//LBInstance - manages the state of load balancer instances -type LBInstance struct { - stop chan bool // Asks LB to stop - stopped chan bool // LB is stopped - instance *kubevip.LoadBalancer // pointer to a LB instance - // mux sync.Mutex -} - -//LBManager - will manage a number of load blancer instances -type LBManager struct { - loadBalancer []LBInstance -} - -//Add - handles the building of the load balancers -func (lm *LBManager) Add(bindAddress string, lb *kubevip.LoadBalancer) error { - newLB := LBInstance{ - stop: make(chan bool, 1), - stopped: make(chan bool, 1), - instance: lb, - } - - switch strings.ToLower(lb.Type) { - case "tcp": - err := newLB.startTCP(bindAddress) - if err != nil { - return err - } - case "udp": - err := newLB.startUDP(bindAddress) - if err != nil { - return err - } - case "http": - err := newLB.startHTTP(bindAddress) - if err != nil { - return err - } - default: - return fmt.Errorf("Unknown Load Balancer type [%s]", lb.Type) - } - - lm.loadBalancer = append(lm.loadBalancer, newLB) - return nil -} - -//StopAll - handles the building of the load balancers -func (lm *LBManager) StopAll() error { - log.Debugf("Stopping [%d] loadbalancer instances", len(lm.loadBalancer)) - for x := range lm.loadBalancer { - err := lm.loadBalancer[x].Stop() - if err != nil { - return err - } - } - // Reset the loadbalancer entries - lm.loadBalancer = nil - return nil -} - -//Stop - handles the building of the load balancers -func (l *LBInstance) Stop() error { - - close(l.stop) - - <-l.stopped - log.Infof("Load Balancer instance [%s] has stopped", l.instance.Name) - return nil -} diff --git a/pkg/manager/cluster.go b/pkg/manager/cluster.go new file mode 100644 index 00000000..da2222de --- /dev/null +++ b/pkg/manager/cluster.go @@ -0,0 +1,29 @@ +package manager + +import ( + "github.com/pkg/errors" + + "github.com/kube-vip/kube-vip/pkg/cluster" + "github.com/kube-vip/kube-vip/pkg/etcd" +) + +func initClusterManager(sm *Manager) (*cluster.Manager, error) { + m := &cluster.Manager{ + SignalChan: sm.signalChan, + } + + switch sm.config.LeaderElectionType { + case "kubernetes", "": + m.KubernetesClient = sm.clientSet + case "etcd": + client, err := etcd.NewClient(sm.config) + if err != nil { + return nil, err + } + m.EtcdClient = client + default: + return nil, errors.Errorf("invalid LeaderElectionMode %s not supported", sm.config.LeaderElectionType) + } + + return m, nil +} diff --git a/pkg/manager/instance.go b/pkg/manager/instance.go new file mode 100644 index 00000000..41422a1f --- /dev/null +++ b/pkg/manager/instance.go @@ -0,0 +1,223 @@ +package manager + +import ( + "fmt" + "net" + + log "github.com/sirupsen/logrus" + "github.com/vishvananda/netlink" + v1 "k8s.io/api/core/v1" + + "github.com/kube-vip/kube-vip/pkg/cluster" + "github.com/kube-vip/kube-vip/pkg/kubevip" + "github.com/kube-vip/kube-vip/pkg/vip" +) + +// Instance defines an instance of everything needed to manage vips +type Instance struct { + // Virtual IP / Load Balancer configuration + vipConfigs []*kubevip.Config + + // cluster instances + clusters []*cluster.Cluster + + // Service uses DHCP + isDHCP bool + dhcpInterface string + dhcpInterfaceHwaddr string + dhcpInterfaceIP string + dhcpHostname string + dhcpClient *vip.DHCPClient + + // Kubernetes service mapping + VIPs []string + Port int32 + UID string + Type string + + serviceSnapshot *v1.Service +} + +func NewInstance(svc *v1.Service, config *kubevip.Config) (*Instance, error) { + instanceAddresses := fetchServiceAddresses(svc) + instanceUID := string(svc.UID) + + // Detect if we're using a specific interface for services + var serviceInterface string + if config.ServicesInterface != "" { + serviceInterface = config.ServicesInterface + } else { + serviceInterface = config.Interface + } + + var newVips []*kubevip.Config + + for _, address := range instanceAddresses { + // Generate new Virtual IP configuration + newVips = append(newVips, &kubevip.Config{ + VIP: address, + Interface: serviceInterface, + SingleNode: true, + EnableARP: config.EnableARP, + EnableBGP: config.EnableBGP, + VIPCIDR: config.VIPCIDR, + VIPSubnet: config.VIPSubnet, + EnableRoutingTable: config.EnableRoutingTable, + RoutingTableID: config.RoutingTableID, + RoutingTableType: config.RoutingTableType, + ArpBroadcastRate: config.ArpBroadcastRate, + EnableServiceSecurity: config.EnableServiceSecurity, + DNSMode: config.DNSMode, + DisableServiceUpdates: config.DisableServiceUpdates, + EnableServicesElection: config.EnableServicesElection, + KubernetesLeaderElection: kubevip.KubernetesLeaderElection{ + EnableLeaderElection: config.EnableLeaderElection, + }, + }) + } + + // Create new service + instance := &Instance{ + UID: instanceUID, + VIPs: instanceAddresses, + serviceSnapshot: svc, + } + if len(svc.Spec.Ports) > 0 { + instance.Type = string(svc.Spec.Ports[0].Protocol) + instance.Port = svc.Spec.Ports[0].Port + } + + if svc.Annotations != nil { + instance.dhcpInterfaceHwaddr = svc.Annotations[hwAddrKey] + instance.dhcpInterfaceIP = svc.Annotations[requestedIP] + instance.dhcpHostname = svc.Annotations[loadbalancerHostname] + } + + // Generate Load Balancer config + newLB := kubevip.LoadBalancer{ + Name: fmt.Sprintf("%s-load-balancer", svc.Name), + Port: int(instance.Port), + Type: instance.Type, + BindToVip: true, + } + for _, vip := range newVips { + // Add Load Balancer Configuration + vip.LoadBalancers = append(vip.LoadBalancers, newLB) + } + // Create Add configuration to the new service + instance.vipConfigs = newVips + + // If this was purposely created with the address 0.0.0.0, + // we will create a macvlan on the main interface and a DHCP client + // TODO: Consider how best to handle DHCP with multiple addresses + if len(instanceAddresses) == 1 && instanceAddresses[0] == "0.0.0.0" { + err := instance.startDHCP() + if err != nil { + return nil, err + } + select { + case err := <-instance.dhcpClient.ErrorChannel(): + return nil, fmt.Errorf("error starting DHCP for %s/%s: error: %s", + instance.serviceSnapshot.Namespace, instance.serviceSnapshot.Name, err) + case ip := <-instance.dhcpClient.IPChannel(): + instance.vipConfigs[0].VIP = ip + instance.dhcpInterfaceIP = ip + } + } + for _, vipConfig := range instance.vipConfigs { + c, err := cluster.InitCluster(vipConfig, false) + if err != nil { + log.Errorf("Failed to add Service %s/%s", svc.Namespace, svc.Name) + return nil, err + } + + for i := range c.Network { + c.Network[i].SetServicePorts(svc) + } + + instance.clusters = append(instance.clusters, c) + } + + return instance, nil +} + +func (i *Instance) startDHCP() error { + if len(i.vipConfigs) != 1 { + return fmt.Errorf("DHCP requires exactly 1 VIP config, got: %v", len(i.vipConfigs)) + } + parent, err := netlink.LinkByName(i.vipConfigs[0].Interface) + if err != nil { + return fmt.Errorf("error finding VIP Interface, for building DHCP Link : %v", err) + } + + // Generate name from UID + interfaceName := fmt.Sprintf("vip-%s", i.UID[0:8]) + + // Check if the interface doesn't exist first + iface, err := net.InterfaceByName(interfaceName) + if err != nil { + log.Infof("Creating new macvlan interface for DHCP [%s]", interfaceName) + + hwaddr, err := net.ParseMAC(i.dhcpInterfaceHwaddr) + if i.dhcpInterfaceHwaddr != "" && err != nil { + return err + } else if hwaddr == nil { + hwaddr, err = net.ParseMAC(vip.GenerateMac()) + if err != nil { + return err + } + } + + log.Infof("New interface [%s] mac is %s", interfaceName, hwaddr) + mac := &netlink.Macvlan{ + LinkAttrs: netlink.LinkAttrs{ + Name: interfaceName, + ParentIndex: parent.Attrs().Index, + HardwareAddr: hwaddr, + }, + Mode: netlink.MACVLAN_MODE_DEFAULT, + } + + err = netlink.LinkAdd(mac) + if err != nil { + return fmt.Errorf("could not add %s: %v", interfaceName, err) + } + + err = netlink.LinkSetUp(mac) + if err != nil { + return fmt.Errorf("could not bring up interface [%s] : %v", interfaceName, err) + } + + iface, err = net.InterfaceByName(interfaceName) + if err != nil { + return fmt.Errorf("error finding new DHCP interface by name [%v]", err) + } + } else { + log.Infof("Using existing macvlan interface for DHCP [%s]", interfaceName) + } + + var initRebootFlag bool + if i.dhcpInterfaceIP != "" { + initRebootFlag = true + } + + client := vip.NewDHCPClient(iface, initRebootFlag, i.dhcpInterfaceIP) + + // Add hostname to dhcp client if annotated + if i.dhcpHostname != "" { + log.Infof("Hostname specified for dhcp lease: [%s] - [%s]", interfaceName, i.dhcpHostname) + client.WithHostName(i.dhcpHostname) + } + + go client.Start() + + // Set that DHCP is enabled + i.isDHCP = true + // Set the name of the interface so that it can be removed on Service deletion + i.dhcpInterface = interfaceName + i.dhcpInterfaceHwaddr = iface.HardwareAddr.String() + // Add the client so that we can call it to stop function + i.dhcpClient = client + + return nil +} diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go new file mode 100644 index 00000000..bf266c79 --- /dev/null +++ b/pkg/manager/manager.go @@ -0,0 +1,231 @@ +package manager + +import ( + "fmt" + "os" + "os/signal" + "path/filepath" + "strings" + "sync" + "syscall" + + "github.com/kube-vip/kube-vip/pkg/k8s" + + "github.com/kamhlos/upnp" + "github.com/kube-vip/kube-vip/pkg/bgp" + "github.com/kube-vip/kube-vip/pkg/kubevip" + "github.com/prometheus/client_golang/prometheus" + log "github.com/sirupsen/logrus" + v1 "k8s.io/api/core/v1" + "k8s.io/client-go/kubernetes" +) + +const plunderLock = "plndr-svcs-lock" + +// Manager degines the manager of the load-balancing services +type Manager struct { + clientSet *kubernetes.Clientset + configMap string + config *kubevip.Config + + // Manager services + // service bool + + // Keeps track of all running instances + serviceInstances []*Instance + + // Additional functionality + upnp *upnp.Upnp + + //BGP Manager, this is a singleton that manages all BGP advertisements + bgpServer *bgp.Server + + // This channel is used to catch an OS signal and trigger a shutdown + signalChan chan os.Signal + + // This channel is used to signal a shutdown + shutdownChan chan struct{} + + // This is a prometheus counter used to count the number of events received + // from the service watcher + countServiceWatchEvent *prometheus.CounterVec + + // This is a prometheus gauge indicating the state of the sessions. + // 1 means "ESTABLISHED", 0 means "NOT ESTABLISHED" + bgpSessionInfoGauge *prometheus.GaugeVec + + // This mutex is to protect calls from various goroutines + mutex sync.Mutex +} + +// New will create a new managing object +func New(configMap string, config *kubevip.Config) (*Manager, error) { + + var clientset *kubernetes.Clientset + var err error + + adminConfigPath := "/etc/kubernetes/admin.conf" + homeConfigPath := filepath.Join(os.Getenv("HOME"), ".kube", "config") + + switch { + case config.LeaderElectionType == "etcd": + // Do nothing, we don't construct a k8s client for etcd leader election + case fileExists(adminConfigPath): + if config.EnableControlPlane { + // If this is a control plane host it will likely have started as a static pod or won't have the + // VIP up before trying to connect to the API server, we set the API endpoint to this machine to + // ensure connectivity. + if config.DetectControlPlane { + clientset, err = k8s.FindWorkingKubernetesAddress(adminConfigPath, false) + } else { + // This will attempt to use kubernetes as the hostname (this should be passed as a host alias) in the pod manifest + clientset, err = k8s.NewClientset(adminConfigPath, false, fmt.Sprintf("kubernetes:%v", config.Port)) + } + } else { + clientset, err = k8s.NewClientset(adminConfigPath, false, "") + } + if err != nil { + return nil, fmt.Errorf("could not create k8s clientset from external file: %q: %v", adminConfigPath, err) + } + log.Debugf("Using external Kubernetes configuration from file [%s]", adminConfigPath) + case fileExists(homeConfigPath): + clientset, err = k8s.NewClientset(homeConfigPath, false, "") + if err != nil { + return nil, fmt.Errorf("could not create k8s clientset from external file: %q: %v", homeConfigPath, err) + } + log.Debugf("Using external Kubernetes configuration from file [%s]", homeConfigPath) + default: + clientset, err = k8s.NewClientset("", true, "") + if err != nil { + return nil, fmt.Errorf("could not create k8s clientset from incluster config: %v", err) + } + log.Debug("Using external Kubernetes configuration from incluster config.") + } + + // Flip this to something else + // if config.DetectControlPlane { + // log.Info("[k8s client] flipping to internal service account") + // _, err = clientset.CoreV1().ServiceAccounts("kube-system").Apply(context.TODO(), kubevip.GenerateSA(), v1.ApplyOptions{FieldManager: "application/apply-patch"}) + // if err != nil { + // return nil, fmt.Errorf("could not create k8s clientset from incluster config: %v", err) + // } + // _, err = clientset.RbacV1().ClusterRoles().Apply(context.TODO(), kubevip.GenerateCR(), v1.ApplyOptions{FieldManager: "application/apply-patch"}) + // if err != nil { + // return nil, fmt.Errorf("could not create k8s clientset from incluster config: %v", err) + // } + // _, err = clientset.RbacV1().ClusterRoleBindings().Apply(context.TODO(), kubevip.GenerateCRB(), v1.ApplyOptions{FieldManager: "application/apply-patch"}) + // if err != nil { + // return nil, fmt.Errorf("could not create k8s clientset from incluster config: %v", err) + // } + // } + + return &Manager{ + clientSet: clientset, + configMap: configMap, + config: config, + countServiceWatchEvent: prometheus.NewCounterVec(prometheus.CounterOpts{ + Namespace: "kube_vip", + Subsystem: "manager", + Name: "all_services_events", + Help: "Count all events fired by the service watcher categorised by event type", + }, []string{"type"}), + bgpSessionInfoGauge: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "kube_vip", + Subsystem: "manager", + Name: "bgp_session_info", + Help: "Display state of session by setting metric for label value with current state to 1", + }, []string{"state", "peer"}), + }, nil +} + +// Start will begin the Manager, which will start services and watch the configmap +func (sm *Manager) Start() error { + + // listen for interrupts or the Linux SIGTERM signal and cancel + // our context, which the leader election code will observe and + // step down + sm.signalChan = make(chan os.Signal, 1) + // Add Notification for Userland interrupt + signal.Notify(sm.signalChan, syscall.SIGINT) + + // Add Notification for SIGTERM (sent from Kubernetes) + signal.Notify(sm.signalChan, syscall.SIGTERM) + + // All watchers and other goroutines should have an additional goroutine that blocks on this, to shut things down + sm.shutdownChan = make(chan struct{}) + + // If BGP is enabled then we start a server instance that will broadcast VIPs + if sm.config.EnableBGP { + + // If Annotations have been set then we will look them up + err := sm.parseAnnotations() + if err != nil { + return err + } + + log.Infoln("Starting Kube-vip Manager with the BGP engine") + return sm.startBGP() + } + + // If ARP is enabled then we start a LeaderElection that will use ARP to advertise VIPs + if sm.config.EnableARP { + log.Infoln("Starting Kube-vip Manager with the ARP engine") + return sm.startARP() + } + + if sm.config.EnableWireguard { + log.Infoln("Starting Kube-vip Manager with the Wireguard engine") + return sm.startWireguard() + } + + if sm.config.EnableRoutingTable { + log.Infoln("Starting Kube-vip Manager with the Routing Table engine") + return sm.startTableMode() + } + + log.Errorln("prematurely exiting Load-balancer as no modes [ARP/BGP/Wireguard] are enabled") + return nil +} + +func returnNameSpace() (string, error) { + if data, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace"); err == nil { + if ns := strings.TrimSpace(string(data)); len(ns) > 0 { + return ns, nil + } + return "", err + } + return "", fmt.Errorf("unable to find Namespace") +} + +func (sm *Manager) parseAnnotations() error { + if sm.config.Annotations == "" { + log.Debugf("No Node annotations to parse") + return nil + } + + err := sm.annotationsWatcher() + if err != nil { + return err + } + return nil +} + +func fileExists(filename string) bool { + info, err := os.Stat(filename) + if os.IsNotExist(err) { + return false + } + return !info.IsDir() +} + +func (sm *Manager) findServiceInstance(svc *v1.Service) *Instance { + svcUID := string(svc.UID) + log.Debugf("service UID: %s", svcUID) + for i := range sm.serviceInstances { + log.Debugf("saved service instance %d UID: %s", i, sm.serviceInstances[i].UID) + if sm.serviceInstances[i].UID == svcUID { + return sm.serviceInstances[i] + } + } + return nil +} diff --git a/pkg/manager/manager_arp.go b/pkg/manager/manager_arp.go new file mode 100644 index 00000000..4a963a03 --- /dev/null +++ b/pkg/manager/manager_arp.go @@ -0,0 +1,189 @@ +package manager + +import ( + "context" + "os" + "strconv" + "syscall" + "time" + + "github.com/kamhlos/upnp" + log "github.com/sirupsen/logrus" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/leaderelection" + "k8s.io/client-go/tools/leaderelection/resourcelock" + + "github.com/kube-vip/kube-vip/pkg/cluster" + "github.com/kube-vip/kube-vip/pkg/iptables" + "github.com/kube-vip/kube-vip/pkg/vip" +) + +// Start will begin the Manager, which will start services and watch the configmap +func (sm *Manager) startARP() error { + var cpCluster *cluster.Cluster + var ns string + var err error + + // use a Go context so we can tell the leaderelection code when we + // want to step down + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Shutdown function that will wait on this signal, unless we call it ourselves + go func() { + <-sm.signalChan + log.Info("Received kube-vip termination, signaling shutdown") + if sm.config.EnableControlPlane { + cpCluster.Stop() + } + // Close all go routines + close(sm.shutdownChan) + // Cancel the context, which will in turn cancel the leadership + cancel() + }() + + if sm.config.EnableControlPlane { + cpCluster, err = cluster.InitCluster(sm.config, false) + if err != nil { + return err + } + + clusterManager, err := initClusterManager(sm) + if err != nil { + return err + } + + go func() { + err := cpCluster.StartCluster(sm.config, clusterManager, nil) + if err != nil { + log.Errorf("Control Plane Error [%v]", err) + // Trigger the shutdown of this manager instance + sm.signalChan <- syscall.SIGINT + + } + }() + + // Check if we're also starting the services, if not we can sit and wait on the closing channel and return here + if !sm.config.EnableServices { + <-sm.signalChan + log.Infof("Shutting down Kube-Vip") + + return nil + } + + ns = sm.config.Namespace + } else { + + ns, err = returnNameSpace() + if err != nil { + log.Warnf("unable to auto-detect namespace, dropping to [%s]", sm.config.Namespace) + ns = sm.config.Namespace + } + } + + id, err := os.Hostname() + if err != nil { + return err + } + + // Before starting the leader Election enable any additional functionality + upnpEnabled, _ := strconv.ParseBool(os.Getenv("enableUPNP")) + + if upnpEnabled { + sm.upnp = new(upnp.Upnp) + err := sm.upnp.ExternalIPAddr() + if err != nil { + log.Errorf("Error Enabling UPNP %s", err.Error()) + // Set the struct to nil so nothing should use it in future + sm.upnp = nil + } else { + log.Infof("Successfully enabled UPNP, Gateway address [%s]", sm.upnp.GatewayOutsideIP) + } + } + + // This will tidy any dangling kube-vip iptables rules + if os.Getenv("EGRESS_CLEAN") != "" { + i, err := vip.CreateIptablesClient(sm.config.EgressWithNftables, sm.config.ServiceNamespace, iptables.ProtocolIPv4) + if err != nil { + log.Warnf("(egress) Unable to clean any dangling egress rules [%v]", err) + log.Warn("(egress) Can be ignored in non iptables release of kube-vip") + } else { + log.Info("(egress) Cleaning any dangling kube-vip egress rules") + cleanErr := i.CleanIPtables() + if cleanErr != nil { + log.Errorf("Error cleaning rules [%v]", cleanErr) + } + } + } + + // Start a services watcher (all kube-vip pods will watch services), upon a new service + // a lock based upon that service is created that they will all leaderElection on + if sm.config.EnableServicesElection { + log.Infof("beginning watching services, leaderelection will happen for every service") + err = sm.startServicesWatchForLeaderElection(ctx) + if err != nil { + return err + } + } else { + + log.Infof("beginning services leadership, namespace [%s], lock name [%s], id [%s]", ns, sm.config.ServicesLeaseName, id) + // we use the Lease lock type since edits to Leases are less common + // and fewer objects in the cluster watch "all Leases". + lock := &resourcelock.LeaseLock{ + LeaseMeta: metav1.ObjectMeta{ + Name: sm.config.ServicesLeaseName, + Namespace: ns, + }, + Client: sm.clientSet.CoordinationV1(), + LockConfig: resourcelock.ResourceLockConfig{ + Identity: id, + }, + } + + // start the leader election code loop + leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{ + Lock: lock, + // IMPORTANT: you MUST ensure that any code you have that + // is protected by the lease must terminate **before** + // you call cancel. Otherwise, you could have a background + // loop still running and another process could + // get elected before your background loop finished, violating + // the stated goal of the lease. + ReleaseOnCancel: true, + LeaseDuration: time.Duration(sm.config.LeaseDuration) * time.Second, + RenewDeadline: time.Duration(sm.config.RenewDeadline) * time.Second, + RetryPeriod: time.Duration(sm.config.RetryPeriod) * time.Second, + Callbacks: leaderelection.LeaderCallbacks{ + OnStartedLeading: func(ctx context.Context) { + err = sm.servicesWatcher(ctx, sm.syncServices) + if err != nil { + log.Error(err) + } + }, + OnStoppedLeading: func() { + // we can do cleanup here + log.Infof("leader lost: %s", id) + for _, instance := range sm.serviceInstances { + for _, cluster := range instance.clusters { + cluster.Stop() + } + } + + log.Fatal("lost leadership, restarting kube-vip") + }, + OnNewLeader: func(identity string) { + // we're notified when new leader elected + if sm.config.EnableNodeLabeling { + applyNodeLabel(sm.clientSet, sm.config.Address, id, identity) + } + if identity == id { + // I just got the lock + return + } + log.Infof("new leader elected: %s", identity) + }, + }, + }) + } + return nil +} diff --git a/pkg/manager/manager_bgp.go b/pkg/manager/manager_bgp.go new file mode 100644 index 00000000..edc2d8db --- /dev/null +++ b/pkg/manager/manager_bgp.go @@ -0,0 +1,141 @@ +package manager + +import ( + "context" + "fmt" + "os" + "syscall" + + "github.com/kube-vip/kube-vip/pkg/bgp" + "github.com/kube-vip/kube-vip/pkg/cluster" + "github.com/kube-vip/kube-vip/pkg/equinixmetal" + api "github.com/osrg/gobgp/v3/api" + "github.com/packethost/packngo" + "github.com/prometheus/client_golang/prometheus" + log "github.com/sirupsen/logrus" +) + +// Start will begin the Manager, which will start services and watch the configmap +func (sm *Manager) startBGP() error { + var cpCluster *cluster.Cluster + // var ns string + var err error + + // If Equinix Metal is enabled then we can begin our preparation work + var packetClient *packngo.Client + if sm.config.EnableMetal { + if sm.config.ProviderConfig != "" { + key, project, err := equinixmetal.GetPacketConfig(sm.config.ProviderConfig) + if err != nil { + log.Error(err) + } else { + // Set the environment variable with the key for the project + os.Setenv("PACKET_AUTH_TOKEN", key) + // Update the configuration with the project key + sm.config.MetalProjectID = project + } + } + packetClient, err = packngo.NewClient() + if err != nil { + log.Error(err) + } + + // We're using Equinix Metal with BGP, populate the Peer information from the API + if sm.config.EnableBGP { + log.Infoln("Looking up the BGP configuration from Equinix Metal") + err = equinixmetal.BGPLookup(packetClient, sm.config) + if err != nil { + log.Error(err) + } + } + } + + log.Info("Starting the BGP server to advertise VIP routes to BGP peers") + sm.bgpServer, err = bgp.NewBGPServer(&sm.config.BGPConfig, func(p *api.WatchEventResponse_PeerEvent) { + ipaddr := p.GetPeer().GetState().GetNeighborAddress() + port := uint64(179) + peerDescription := fmt.Sprintf("%s:%d", ipaddr, port) + + for stateName, stateValue := range api.PeerState_SessionState_value { + metricValue := 0.0 + if stateValue == int32(p.GetPeer().GetState().GetSessionState().Number()) { + metricValue = 1 + } + + sm.bgpSessionInfoGauge.With(prometheus.Labels{ + "state": stateName, + "peer": peerDescription, + }).Set(metricValue) + } + }) + if err != nil { + return err + } + + // use a Go context so we can tell the leaderelection code when we + // want to step down + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Defer a function to check if the bgpServer has been created and if so attempt to close it + defer func() { + if sm.bgpServer != nil { + sm.bgpServer.Close() + } + }() + + // Shutdown function that will wait on this signal, unless we call it ourselves + go func() { + <-sm.signalChan + log.Info("Received termination, signaling shutdown") + if sm.config.EnableControlPlane { + if cpCluster != nil { + cpCluster.Stop() + } + } + // Cancel the context, which will in turn cancel the leadership + cancel() + }() + + if sm.config.EnableControlPlane { + cpCluster, err = cluster.InitCluster(sm.config, false) + if err != nil { + return err + } + + clusterManager, err := initClusterManager(sm) + if err != nil { + return err + } + + go func() { + if sm.config.EnableLeaderElection { + err = cpCluster.StartCluster(sm.config, clusterManager, sm.bgpServer) + } else { + err = cpCluster.StartVipService(sm.config, clusterManager, sm.bgpServer, packetClient) + } + if err != nil { + log.Errorf("Control Plane Error [%v]", err) + // Trigger the shutdown of this manager instance + sm.signalChan <- syscall.SIGINT + } + }() + + // Check if we're also starting the services, if not we can sit and wait on the closing channel and return here + if !sm.config.EnableServices { + <-sm.signalChan + log.Infof("Shutting down Kube-Vip") + + return nil + } + } + + err = sm.servicesWatcher(ctx, sm.syncServices) + if err != nil { + return err + } + + log.Infof("Shutting down Kube-Vip") + + return nil +} diff --git a/pkg/manager/manager_table.go b/pkg/manager/manager_table.go new file mode 100644 index 00000000..8527b53f --- /dev/null +++ b/pkg/manager/manager_table.go @@ -0,0 +1,117 @@ +package manager + +import ( + "context" + "os" + "time" + + log "github.com/sirupsen/logrus" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/leaderelection" + "k8s.io/client-go/tools/leaderelection/resourcelock" +) + +// Start will begin the Manager, which will start services and watch the configmap +func (sm *Manager) startTableMode() error { + var ns string + var err error + + id, err := os.Hostname() + if err != nil { + return err + } + + // use a Go context so we can tell the leaderelection code when we + // want to step down + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + log.Infof("all routing table entries will exist in table [%d]", sm.config.RoutingTableID) + + // Shutdown function that will wait on this signal, unless we call it ourselves + go func() { + <-sm.signalChan + log.Info("Received termination, signaling shutdown") + + // Cancel the context, which will in turn cancel the leadership + cancel() + }() + + ns, err = returnNameSpace() + if err != nil { + log.Warnf("unable to auto-detect namespace, dropping to [%s]", sm.config.Namespace) + ns = sm.config.Namespace + } + + // Start a services watcher (all kube-vip pods will watch services), upon a new service + // a lock based upon that service is created that they will all leaderElection on + if sm.config.EnableServicesElection { + log.Infof("beginning watching services, leaderelection will happen for every service") + err = sm.startServicesWatchForLeaderElection(ctx) + if err != nil { + return err + } + } else if sm.config.EnableLeaderElection { + + log.Infof("beginning services leadership, namespace [%s], lock name [%s], id [%s]", ns, plunderLock, id) + // we use the Lease lock type since edits to Leases are less common + // and fewer objects in the cluster watch "all Leases". + lock := &resourcelock.LeaseLock{ + LeaseMeta: metav1.ObjectMeta{ + Name: plunderLock, + Namespace: ns, + }, + Client: sm.clientSet.CoordinationV1(), + LockConfig: resourcelock.ResourceLockConfig{ + Identity: id, + }, + } + // start the leader election code loop + leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{ + Lock: lock, + // IMPORTANT: you MUST ensure that any code you have that + // is protected by the lease must terminate **before** + // you call cancel. Otherwise, you could have a background + // loop still running and another process could + // get elected before your background loop finished, violating + // the stated goal of the lease. + ReleaseOnCancel: true, + LeaseDuration: time.Duration(sm.config.LeaseDuration) * time.Second, + RenewDeadline: time.Duration(sm.config.RenewDeadline) * time.Second, + RetryPeriod: time.Duration(sm.config.RetryPeriod) * time.Second, + Callbacks: leaderelection.LeaderCallbacks{ + OnStartedLeading: func(ctx context.Context) { + err = sm.servicesWatcher(ctx, sm.syncServices) + if err != nil { + log.Error(err) + } + }, + OnStoppedLeading: func() { + // we can do cleanup here + log.Infof("leader lost: %s", id) + for _, instance := range sm.serviceInstances { + for _, cluster := range instance.clusters { + cluster.Stop() + } + } + + log.Fatal("lost leadership, restarting kube-vip") + }, + OnNewLeader: func(identity string) { + // we're notified when new leader elected + if identity == id { + // I just got the lock + return + } + log.Infof("new leader elected: %s", identity) + }, + }, + }) + } else { + log.Infof("beginning watching services without leader election") + err = sm.servicesWatcher(ctx, sm.syncServices) + if err != nil { + log.Errorf("Cannot watch services, %v", err) + } + } + return nil +} diff --git a/pkg/manager/manager_wireguard.go b/pkg/manager/manager_wireguard.go new file mode 100644 index 00000000..0f84b957 --- /dev/null +++ b/pkg/manager/manager_wireguard.go @@ -0,0 +1,144 @@ +package manager + +import ( + "context" + "os" + "strconv" + "time" + + "github.com/kamhlos/upnp" + "github.com/kube-vip/kube-vip/pkg/wireguard" + log "github.com/sirupsen/logrus" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/leaderelection" + "k8s.io/client-go/tools/leaderelection/resourcelock" +) + +// Start will begin the Manager, which will start services and watch the configmap +func (sm *Manager) startWireguard() error { + var ns string + var err error + + id, err := os.Hostname() + if err != nil { + return err + } + + // use a Go context so we can tell the leaderelection code when we + // want to step down + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + log.Infoln("reading wireguard peer configuration from Kubernetes secret") + s, err := sm.clientSet.CoreV1().Secrets(sm.config.Namespace).Get(ctx, "wireguard", metav1.GetOptions{}) + if err != nil { + return err + } + // parse all the details needed for Wireguard + peerPublicKey := s.Data["peerPublicKey"] + peerEndpoint := s.Data["peerEndpoint"] + privateKey := s.Data["privateKey"] + + // Configure the interface to join the Wireguard VPN + err = wireguard.ConfigureInterface(string(privateKey), string(peerPublicKey), string(peerEndpoint)) + if err != nil { + return err + } + + // Shutdown function that will wait on this signal, unless we call it ourselves + go func() { + <-sm.signalChan + log.Info("Received termination, signaling shutdown") + + // Cancel the context, which will in turn cancel the leadership + cancel() + }() + + ns, err = returnNameSpace() + if err != nil { + log.Warnf("unable to auto-detect namespace, dropping to [%s]", sm.config.Namespace) + ns = sm.config.Namespace + } + + // Before starting the leader Election enable any additional functionality + upnpEnabled, _ := strconv.ParseBool(os.Getenv("enableUPNP")) + + if upnpEnabled { + sm.upnp = new(upnp.Upnp) + err := sm.upnp.ExternalIPAddr() + if err != nil { + log.Errorf("Error Enabling UPNP %s", err.Error()) + // Set the struct to nil so nothing should use it in future + sm.upnp = nil + } else { + log.Infof("Successfully enabled UPNP, Gateway address [%s]", sm.upnp.GatewayOutsideIP) + } + } + + // Start a services watcher (all kube-vip pods will watch services), upon a new service + // a lock based upon that service is created that they will all leaderElection on + if sm.config.EnableServicesElection { + log.Infof("beginning watching services, leaderelection will happen for every service") + err = sm.startServicesWatchForLeaderElection(ctx) + if err != nil { + return err + } + } else { + + log.Infof("beginning services leadership, namespace [%s], lock name [%s], id [%s]", ns, plunderLock, id) + // we use the Lease lock type since edits to Leases are less common + // and fewer objects in the cluster watch "all Leases". + lock := &resourcelock.LeaseLock{ + LeaseMeta: metav1.ObjectMeta{ + Name: plunderLock, + Namespace: ns, + }, + Client: sm.clientSet.CoordinationV1(), + LockConfig: resourcelock.ResourceLockConfig{ + Identity: id, + }, + } + + // start the leader election code loop + leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{ + Lock: lock, + // IMPORTANT: you MUST ensure that any code you have that + // is protected by the lease must terminate **before** + // you call cancel. Otherwise, you could have a background + // loop still running and another process could + // get elected before your background loop finished, violating + // the stated goal of the lease. + ReleaseOnCancel: true, + LeaseDuration: time.Duration(sm.config.LeaseDuration) * time.Second, + RenewDeadline: time.Duration(sm.config.RenewDeadline) * time.Second, + RetryPeriod: time.Duration(sm.config.RetryPeriod) * time.Second, + Callbacks: leaderelection.LeaderCallbacks{ + OnStartedLeading: func(ctx context.Context) { + err = sm.servicesWatcher(ctx, sm.syncServices) + if err != nil { + log.Error(err) + } + }, + OnStoppedLeading: func() { + // we can do cleanup here + log.Infof("leader lost: %s", id) + for _, instance := range sm.serviceInstances { + for _, cluster := range instance.clusters { + cluster.Stop() + } + } + + log.Fatal("lost leadership, restarting kube-vip") + }, + OnNewLeader: func(identity string) { + // we're notified when new leader elected + if identity == id { + // I just got the lock + return + } + log.Infof("new leader elected: %s", identity) + }, + }, + }) + } + return nil +} diff --git a/pkg/manager/node_labeling.go b/pkg/manager/node_labeling.go new file mode 100644 index 00000000..610cdfc0 --- /dev/null +++ b/pkg/manager/node_labeling.go @@ -0,0 +1,73 @@ +package manager + +import ( + "context" + "encoding/json" + "fmt" + + log "github.com/sirupsen/logrus" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes" +) + +const ( + nodeLabelIndex = "kube-vip.io/has-ip" + nodeLabelJSONPath = `kube-vip.io~1has-ip` +) + +type patchStringLabel struct { + Op string `json:"op"` + Path string `json:"path"` + Value string `json:"value"` +} + +// applyNodeLabel add/remove node label `kube-vip.io/has-ip=` to/from +// the node where the virtual IP was added to/removed from. +func applyNodeLabel(clientSet *kubernetes.Clientset, address, id, identity string) { + ctx := context.Background() + node, err := clientSet.CoreV1().Nodes().Get(ctx, id, metav1.GetOptions{}) + if err != nil { + log.Errorf("can't query node %s labels. error: %v", id, err) + return + } + + log.Debugf("node %s labels: %+v", id, node.Labels) + + value, ok := node.Labels[nodeLabelIndex] + path := fmt.Sprintf("/metadata/labels/%s", nodeLabelJSONPath) + if (!ok || value != address) && id == identity { + log.Debugf("setting node label `has-ip=%s` on %s", address, id) + // Append label + applyPatchLabels(ctx, clientSet, id, "add", path, address) + } else if ok && value == address { + log.Debugf("removing node label `has-ip=%s` on %s", address, id) + // Remove label + applyPatchLabels(ctx, clientSet, id, "remove", path, address) + } else { + log.Debugf("no node label change needed") + } +} + +// applyPatchLabels add/remove node labels +func applyPatchLabels(ctx context.Context, clientSet *kubernetes.Clientset, + name, operation, path, value string) { + patchLabels := []patchStringLabel{{ + Op: operation, + Path: path, + Value: value, + }} + patchData, err := json.Marshal(patchLabels) + if err != nil { + log.Errorf("node patch marshaling failed. error: %v", err) + return + } + // patch node + node, err := clientSet.CoreV1().Nodes().Patch(ctx, + name, types.JSONPatchType, patchData, metav1.PatchOptions{}) + if err != nil { + log.Errorf("can't patch node %s. error: %v", name, err) + return + } + log.Debugf("updated node %s labels: %+v", name, node.Labels) +} diff --git a/pkg/manager/prom.go b/pkg/manager/prom.go new file mode 100644 index 00000000..9683411a --- /dev/null +++ b/pkg/manager/prom.go @@ -0,0 +1,8 @@ +package manager + +import "github.com/prometheus/client_golang/prometheus" + +// PrometheusCollector defines a service watch event counter. +func (sm *Manager) PrometheusCollector() []prometheus.Collector { + return []prometheus.Collector{sm.countServiceWatchEvent, sm.bgpSessionInfoGauge} +} diff --git a/pkg/manager/service_egress.go b/pkg/manager/service_egress.go new file mode 100644 index 00000000..dc2b803c --- /dev/null +++ b/pkg/manager/service_egress.go @@ -0,0 +1,243 @@ +package manager + +import ( + "bufio" + "context" + "fmt" + "os" + "strings" + + "github.com/kube-vip/kube-vip/pkg/iptables" + "github.com/kube-vip/kube-vip/pkg/vip" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// DEBUG +const ( + defaultPodCIDR = "10.0.0.0/16" + defaultServiceCIDR = "10.96.0.0/12" +) + +func (sm *Manager) iptablesCheck() error { + file, err := os.Open("/proc/modules") + if err != nil { + return err + } + scanner := bufio.NewScanner(file) + scanner.Split(bufio.ScanLines) + var nat, filter, mangle bool + for scanner.Scan() { + line := strings.Fields(scanner.Text()) + switch line[0] { + case "iptable_filter": + filter = true + case "iptable_nat": + nat = true + case "iptable_mangle": + mangle = true + } + } + + if !filter || !nat || !mangle { + return fmt.Errorf("missing iptables modules -> nat [%t] -> filter [%t] mangle -> [%t]", nat, filter, mangle) + } + return nil +} + +func getSameFamilyCidr(source, ip string) string { + cidrs := strings.Split(source, ",") + for _, cidr := range cidrs { + if vip.IsIPv4(cidr) == vip.IsIPv4(ip) { + return cidr + } + } + return "" +} + +func (sm *Manager) configureEgress(vipIP, podIP, destinationPorts, namespace string) error { + // serviceCIDR, podCIDR, err := sm.AutoDiscoverCIDRs() + // if err != nil { + // serviceCIDR = "10.96.0.0/12" + // podCIDR = "10.0.0.0/16" + // } + + var podCidr, serviceCidr string + + if sm.config.EgressPodCidr != "" { + podCidr = getSameFamilyCidr(sm.config.EgressPodCidr, podIP) + } else { + // There's no default IPv6 pod CIDR, therefore we silently back off if CIDR s not specified. + if !vip.IsIPv4(podIP) { + return nil + } + podCidr = defaultPodCIDR + } + + if sm.config.EgressServiceCidr != "" { + serviceCidr = getSameFamilyCidr(sm.config.EgressServiceCidr, vipIP) + } else { + // There's no default IPv6 service CIDR, therefore we silently back off if CIDR s not specified. + if !vip.IsIPv4(vipIP) { + return nil + } + serviceCidr = defaultServiceCIDR + } + + protocol := iptables.ProtocolIPv4 + + if vip.IsIPv6(vipIP) { + protocol = iptables.ProtocolIPv6 + } + + i, err := vip.CreateIptablesClient(sm.config.EgressWithNftables, namespace, protocol) + if err != nil { + return fmt.Errorf("error Creating iptables client [%s]", err) + } + + // Check if the kube-vip mangle chain exists, if not create it + exists, err := i.CheckMangleChain(vip.MangleChainName) + if err != nil { + return fmt.Errorf("error checking for existence of mangle chain [%s], error [%s]", vip.MangleChainName, err) + } + if !exists { + err = i.CreateMangleChain(vip.MangleChainName) + if err != nil { + return fmt.Errorf("error creating mangle chain [%s], error [%s]", vip.MangleChainName, err) + } + } + err = i.AppendReturnRulesForDestinationSubnet(vip.MangleChainName, podCidr) + if err != nil { + return fmt.Errorf("error adding rules to mangle chain [%s], error [%s]", vip.MangleChainName, err) + } + err = i.AppendReturnRulesForDestinationSubnet(vip.MangleChainName, serviceCidr) + if err != nil { + return fmt.Errorf("error adding rules to mangle chain [%s], error [%s]", vip.MangleChainName, err) + } + + mask := "/32" + if !vip.IsIPv4(podIP) { + mask = "/128" + } + + err = i.AppendReturnRulesForMarking(vip.MangleChainName, podIP+mask) + if err != nil { + return fmt.Errorf("error adding marking rules to mangle chain [%s], error [%s]", vip.MangleChainName, err) + } + + err = i.InsertMangeTableIntoPrerouting(vip.MangleChainName) + if err != nil { + return fmt.Errorf("error adding prerouting mangle chain [%s], error [%s]", vip.MangleChainName, err) + } + + if destinationPorts != "" { + + fixedPorts := strings.Split(destinationPorts, ",") + + for _, fixedPort := range fixedPorts { + var proto, port string + + data := strings.Split(fixedPort, ":") + if len(data) == 0 { + continue + } else if len(data) == 1 { + proto = "tcp" + port = data[0] + } else { + proto = data[0] + port = data[1] + } + + err = i.InsertSourceNatForDestinationPort(vipIP, podIP, port, proto) + if err != nil { + return fmt.Errorf("error adding snat rules to nat chain [%s], error [%s]", vip.MangleChainName, err) + } + + } + } else { + err = i.InsertSourceNat(vipIP, podIP) + if err != nil { + return fmt.Errorf("error adding snat rules to nat chain [%s], error [%s]", vip.MangleChainName, err) + } + } + //_ = i.DumpChain(vip.MangleChainName) + err = vip.DeleteExistingSessions(podIP, false) + if err != nil { + return err + } + + return nil +} + +func (sm *Manager) AutoDiscoverCIDRs() (serviceCIDR, podCIDR string, err error) { + pod, err := sm.clientSet.CoreV1().Pods("kube-system").Get(context.TODO(), "kube-controller-manager", v1.GetOptions{}) + if err != nil { + return "", "", err + } + for flags := range pod.Spec.Containers[0].Command { + if strings.Contains(pod.Spec.Containers[0].Command[flags], "--cluster-cidr=") { + podCIDR = strings.ReplaceAll(pod.Spec.Containers[0].Command[flags], "--cluster-cidr=", "") + } + if strings.Contains(pod.Spec.Containers[0].Command[flags], "--service-cluster-ip-range=") { + serviceCIDR = strings.ReplaceAll(pod.Spec.Containers[0].Command[flags], "--service-cluster-ip-range=", "") + } + } + if podCIDR == "" || serviceCIDR == "" { + err = fmt.Errorf("unable to fully determine cluster CIDR configurations") + } + + return +} + +func (sm *Manager) TeardownEgress(podIP, vipIP, destinationPorts, namespace string) error { + protocol := iptables.ProtocolIPv4 + if vip.IsIPv6(podIP) { + protocol = iptables.ProtocolIPv6 + } + + i, err := vip.CreateIptablesClient(sm.config.EgressWithNftables, namespace, protocol) + if err != nil { + return fmt.Errorf("error Creating iptables client [%s]", err) + } + + // Remove the marking of egress packets + err = i.DeleteMangleMarking(podIP, vip.MangleChainName) + if err != nil { + return fmt.Errorf("error changing iptables rules for egress [%s]", err) + } + + // Clear up SNAT rules + if destinationPorts != "" { + fixedPorts := strings.Split(destinationPorts, ",") + + for _, fixedPort := range fixedPorts { + var proto, port string + + data := strings.Split(fixedPort, ":") + if len(data) == 0 { + continue + } else if len(data) == 1 { + proto = "tcp" + port = data[0] + } else { + proto = data[0] + port = data[1] + } + + err = i.DeleteSourceNatForDestinationPort(podIP, vipIP, port, proto) + if err != nil { + return fmt.Errorf("error changing iptables rules for egress [%s]", err) + } + + } + } else { + err = i.DeleteSourceNat(podIP, vipIP) + if err != nil { + return fmt.Errorf("error changing iptables rules for egress [%s]", err) + } + } + err = vip.DeleteExistingSessions(podIP, false) + if err != nil { + return fmt.Errorf("error changing iptables rules for egress [%s]", err) + } + return nil +} diff --git a/pkg/manager/services.go b/pkg/manager/services.go new file mode 100644 index 00000000..c3e7c4ee --- /dev/null +++ b/pkg/manager/services.go @@ -0,0 +1,423 @@ +package manager + +import ( + "context" + "fmt" + "os" + "slices" + "strings" + "sync" + "time" + + "github.com/google/go-cmp/cmp" + log "github.com/sirupsen/logrus" + "github.com/vishvananda/netlink" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/util/retry" + + "github.com/kube-vip/kube-vip/pkg/vip" +) + +const ( + hwAddrKey = "kube-vip.io/hwaddr" + requestedIP = "kube-vip.io/requestedIP" + vipHost = "kube-vip.io/vipHost" + egress = "kube-vip.io/egress" + egressDestinationPorts = "kube-vip.io/egress-destination-ports" + egressSourcePorts = "kube-vip.io/egress-source-ports" + activeEndpoint = "kube-vip.io/active-endpoint" + activeEndpointIPv6 = "kube-vip.io/active-endpoint-ipv6" + flushContrack = "kube-vip.io/flush-conntrack" + loadbalancerIPAnnotation = "kube-vip.io/loadbalancerIPs" + loadbalancerHostname = "kube-vip.io/loadbalancerHostname" +) + +func (sm *Manager) syncServices(_ context.Context, svc *v1.Service, wg *sync.WaitGroup) error { + defer wg.Done() + + log.Debugf("[STARTING] Service Sync") + + // Iterate through the synchronising services + foundInstance := false + newServiceAddresses := fetchServiceAddresses(svc) + newServiceUID := string(svc.UID) + + ingressIPs := []string{} + + for _, ingress := range svc.Status.LoadBalancer.Ingress { + ingressIPs = append(ingressIPs, ingress.IP) + } + + shouldBreake := false + + for x := range sm.serviceInstances { + if shouldBreake { + break + } + for _, newServiceAddress := range newServiceAddresses { + log.Debugf("isDHCP: %t, newServiceAddress: %s", sm.serviceInstances[x].isDHCP, newServiceAddress) + if sm.serviceInstances[x].UID == newServiceUID { + // If the found instance's DHCP configuration doesn't match the new service, delete it. + if (sm.serviceInstances[x].isDHCP && newServiceAddress != "0.0.0.0") || + (!sm.serviceInstances[x].isDHCP && newServiceAddress == "0.0.0.0") || + (!sm.serviceInstances[x].isDHCP && len(svc.Status.LoadBalancer.Ingress) > 0 && !slices.Contains(ingressIPs, newServiceAddress)) || + (len(svc.Status.LoadBalancer.Ingress) > 0 && !comparePortsAndPortStatuses(svc)) || + (sm.serviceInstances[x].isDHCP && len(svc.Status.LoadBalancer.Ingress) > 0 && !slices.Contains(ingressIPs, sm.serviceInstances[x].dhcpInterfaceIP)) { + if err := sm.deleteService(newServiceUID); err != nil { + return err + } + shouldBreake = true + break + } + foundInstance = true + } + } + } + + // This instance wasn't found, we need to add it to the manager + if !foundInstance && len(newServiceAddresses) > 0 { + if err := sm.addService(svc); err != nil { + return err + } + } + + return nil +} + +func comparePortsAndPortStatuses(svc *v1.Service) bool { + portsStatus := svc.Status.LoadBalancer.Ingress[0].Ports + if len(portsStatus) != len(svc.Spec.Ports) { + return false + } + for i, portSpec := range svc.Spec.Ports { + if portsStatus[i].Port != portSpec.Port || portsStatus[i].Protocol != portSpec.Protocol { + return false + } + } + return true +} + +func (sm *Manager) addService(svc *v1.Service) error { + startTime := time.Now() + + newService, err := NewInstance(svc, sm.config) + if err != nil { + return err + } + + log.Infof("(svcs) adding VIP [%s] for [%s/%s]", newService.VIPs, newService.serviceSnapshot.Namespace, newService.serviceSnapshot.Name) + + for x := range newService.vipConfigs { + newService.clusters[x].StartLoadBalancerService(newService.vipConfigs[x], sm.bgpServer) + } + + sm.upnpMap(newService) + + if newService.isDHCP && len(newService.vipConfigs) == 1 { + go func() { + for ip := range newService.dhcpClient.IPChannel() { + log.Debugf("IP %s may have changed", ip) + newService.vipConfigs[0].VIP = ip + newService.dhcpInterfaceIP = ip + if !sm.config.DisableServiceUpdates { + if err := sm.updateStatus(newService); err != nil { + log.Warnf("error updating svc: %s", err) + } + } + } + log.Debugf("IP update channel closed, stopping") + }() + } + + sm.serviceInstances = append(sm.serviceInstances, newService) + + if !sm.config.DisableServiceUpdates { + log.Debugf("(svcs) will update [%s/%s]", newService.serviceSnapshot.Namespace, newService.serviceSnapshot.Name) + if err := sm.updateStatus(newService); err != nil { + // delete service to collect garbage + if deleteErr := sm.deleteService(newService.UID); err != nil { + return deleteErr + } + return err + } + } + + serviceIPs := fetchServiceAddresses(svc) + + // Check if we need to flush any conntrack connections (due to some dangling conntrack connections) + if svc.Annotations[flushContrack] == "true" { + log.Debugf("Flushing conntrack rules for service [%s]", svc.Name) + for _, serviceIP := range serviceIPs { + err = vip.DeleteExistingSessions(serviceIP, false) + if err != nil { + log.Errorf("Error flushing any remaining egress connections [%s]", err) + } + err = vip.DeleteExistingSessions(serviceIP, true) + if err != nil { + log.Errorf("Error flushing any remaining ingress connections [%s]", err) + } + } + } + + // Check if egress is enabled on the service, if so we'll need to configure some rules + if svc.Annotations[egress] == "true" && len(serviceIPs) > 0 { + log.Debugf("Enabling egress for the service [%s]", svc.Name) + if svc.Annotations[activeEndpoint] != "" { + // We will need to modify the iptables rules + err = sm.iptablesCheck() + if err != nil { + log.Errorf("Error configuring egress for loadbalancer [%s]", err) + } + errList := []error{} + for _, serviceIP := range serviceIPs { + podIPs := svc.Annotations[activeEndpoint] + if sm.config.EnableEndpointSlices && vip.IsIPv6(serviceIP) { + podIPs = svc.Annotations[activeEndpointIPv6] + } + err = sm.configureEgress(serviceIP, podIPs, svc.Annotations[egressDestinationPorts], svc.Namespace) + if err != nil { + errList = append(errList, err) + log.Errorf("Error configuring egress for loadbalancer [%s]", err) + } + } + if len(errList) == 0 { + if !sm.config.EnableEndpointSlices { + err = sm.updateServiceEndpointAnnotation(svc.Annotations[activeEndpoint], svc) + if err != nil { + log.Errorf("Error configuring egress annotation for loadbalancer [%s]", err) + } + } else { + err = sm.updateServiceEndpointSlicesAnnotation(svc.Annotations[activeEndpoint], + svc.Annotations[activeEndpointIPv6], svc) + if err != nil { + log.Errorf("Error configuring egress annotation for loadbalancer [%s]", err) + } + } + + } + } + } + + finishTime := time.Since(startTime) + log.Infof("[service] synchronised in %dms", finishTime.Milliseconds()) + + return nil +} + +func (sm *Manager) deleteService(uid string) error { + // protect multiple calls + sm.mutex.Lock() + defer sm.mutex.Unlock() + + var updatedInstances []*Instance + var serviceInstance *Instance + found := false + for x := range sm.serviceInstances { + log.Debugf("Looking for [%s], found [%s]", uid, sm.serviceInstances[x].UID) + // Add the running services to the new array + if sm.serviceInstances[x].UID != uid { + updatedInstances = append(updatedInstances, sm.serviceInstances[x]) + } else { + // Flip the found when we match + found = true + serviceInstance = sm.serviceInstances[x] + } + } + // If we've been through all services and not found the correct one then error + if !found { + // TODO: - fix UX + // return fmt.Errorf("unable to find/stop service [%s]", uid) + return nil + } + shared := false + vipSet := make(map[string]interface{}) + for x := range updatedInstances { + for _, vip := range updatedInstances[x].VIPs { + vipSet[vip] = nil + } + } + for _, vip := range serviceInstance.VIPs { + if _, found := vipSet[vip]; found { + shared = true + } + } + if !shared { + for x := range serviceInstance.clusters { + serviceInstance.clusters[x].Stop() + } + if serviceInstance.isDHCP { + serviceInstance.dhcpClient.Stop() + macvlan, err := netlink.LinkByName(serviceInstance.dhcpInterface) + if err != nil { + return fmt.Errorf("error finding VIP Interface: %v", err) + } + + err = netlink.LinkDel(macvlan) + if err != nil { + return fmt.Errorf("error deleting DHCP Link : %v", err) + } + } + // TODO: Implement dual-stack loadbalancer support if BGP is enabled + if serviceInstance.vipConfigs[0].EnableBGP && ((sm.config.EnableLeaderElection || sm.config.EnableServicesElection) || configuredLocalRoutes[uid]) { + cidrVip := fmt.Sprintf("%s/%s", serviceInstance.vipConfigs[0].VIP, serviceInstance.vipConfigs[0].VIPCIDR) + err := sm.bgpServer.DelHost(cidrVip) + if err != nil { + return fmt.Errorf("error deleting BGP host: %v", err) + } + } + + // We will need to tear down the egress + if serviceInstance.serviceSnapshot.Annotations[egress] == "true" { + if serviceInstance.serviceSnapshot.Annotations[activeEndpoint] != "" { + log.Infof("service [%s] has an egress re-write enabled", serviceInstance.serviceSnapshot.Name) + err := sm.TeardownEgress(serviceInstance.serviceSnapshot.Annotations[activeEndpoint], serviceInstance.serviceSnapshot.Spec.LoadBalancerIP, serviceInstance.serviceSnapshot.Annotations[egressDestinationPorts], serviceInstance.serviceSnapshot.Namespace) + if err != nil { + log.Errorf("%v", err) + } + } + } + } + + // Update the service array + sm.serviceInstances = updatedInstances + + log.Infof("Removed [%s] from manager, [%d] advertised services remain", uid, len(sm.serviceInstances)) + + return nil +} + +func (sm *Manager) upnpMap(s *Instance) { + // If upnp is enabled then update the gateway/router with the address + // TODO - work out if we need to mapping.Reclaim() + // TODO - check if this implementation for dualstack is correct + if sm.upnp != nil { + for _, vip := range s.VIPs { + log.Infof("[UPNP] Adding map to [%s:%d - %s]", vip, s.Port, s.serviceSnapshot.Name) + if err := sm.upnp.AddPortMapping(int(s.Port), int(s.Port), 0, vip, strings.ToUpper(s.Type), s.serviceSnapshot.Name); err == nil { + log.Infof("service should be accessible externally on port [%d]", s.Port) + } else { + sm.upnp.Reclaim() + log.Errorf("unable to map port to gateway [%s]", err.Error()) + } + } + } +} + +func (sm *Manager) updateStatus(i *Instance) error { + retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { + // Retrieve the latest version of Deployment before attempting update + // RetryOnConflict uses exponential backoff to avoid exhausting the apiserver + currentService, err := sm.clientSet.CoreV1().Services(i.serviceSnapshot.Namespace).Get(context.TODO(), i.serviceSnapshot.Name, metav1.GetOptions{}) + if err != nil { + return err + } + + id, err := os.Hostname() + if err != nil { + return err + } + + currentServiceCopy := currentService.DeepCopy() + if currentServiceCopy.Annotations == nil { + currentServiceCopy.Annotations = make(map[string]string) + } + + // If we're using ARP then we can only broadcast the VIP from one place, add an annotation to the service + if sm.config.EnableARP { + // Add the current host + currentServiceCopy.Annotations[vipHost] = id + } + if i.dhcpInterfaceHwaddr != "" || i.dhcpInterfaceIP != "" { + currentServiceCopy.Annotations[hwAddrKey] = i.dhcpInterfaceHwaddr + currentServiceCopy.Annotations[requestedIP] = i.dhcpInterfaceIP + } + + if !cmp.Equal(currentService, currentServiceCopy) { + currentService, err = sm.clientSet.CoreV1().Services(currentServiceCopy.Namespace).Update(context.TODO(), currentServiceCopy, metav1.UpdateOptions{}) + if err != nil { + log.Errorf("Error updating Service Spec [%s] : %v", i.serviceSnapshot.Name, err) + return err + } + } + + ports := make([]v1.PortStatus, 0, len(i.serviceSnapshot.Spec.Ports)) + for _, port := range i.serviceSnapshot.Spec.Ports { + ports = append(ports, v1.PortStatus{ + Port: port.Port, + Protocol: port.Protocol, + }) + } + + ingresses := []v1.LoadBalancerIngress{} + + for _, c := range i.vipConfigs { + if !vip.IsIP(c.VIP) { + ips, err := vip.LookupHost(c.VIP, sm.config.DNSMode) + if err != nil { + return err + } + for _, ip := range ips { + i := v1.LoadBalancerIngress{ + IP: ip, + Ports: ports, + } + ingresses = append(ingresses, i) + } + } else { + i := v1.LoadBalancerIngress{ + IP: c.VIP, + Ports: ports, + } + ingresses = append(ingresses, i) + } + } + if !cmp.Equal(currentService.Status.LoadBalancer.Ingress, ingresses) { + currentService.Status.LoadBalancer.Ingress = ingresses + _, err = sm.clientSet.CoreV1().Services(currentService.Namespace).UpdateStatus(context.TODO(), currentService, metav1.UpdateOptions{}) + if err != nil { + log.Errorf("Error updating Service %s/%s Status: %v", i.serviceSnapshot.Namespace, i.serviceSnapshot.Name, err) + return err + } + } + return nil + }) + + if retryErr != nil { + log.Errorf("Failed to set Services: %v", retryErr) + return retryErr + } + return nil +} + +// fetchServiceAddresses tries to get the addresses from annotations +// kube-vip.io/loadbalancerIPs, then from spec.loadbalancerIP +func fetchServiceAddresses(s *v1.Service) []string { + annotationAvailable := false + if s.Annotations != nil { + if v, annotationAvailable := s.Annotations[loadbalancerIPAnnotation]; annotationAvailable { + ips := strings.Split(v, ",") + var trimmedIPs []string + for _, ip := range ips { + trimmedIPs = append(trimmedIPs, strings.TrimSpace(ip)) + } + return trimmedIPs + } + } + + if !annotationAvailable { + if len(s.Status.LoadBalancer.Ingress) > 0 { + addresses := []string{} + for _, ingress := range s.Status.LoadBalancer.Ingress { + addresses = append(addresses, ingress.IP) + } + return addresses + } + } + + if s.Spec.LoadBalancerIP != "" { + return []string{s.Spec.LoadBalancerIP} + } + + return []string{} +} diff --git a/pkg/manager/servicesLeader.go b/pkg/manager/servicesLeader.go new file mode 100644 index 00000000..1904a17f --- /dev/null +++ b/pkg/manager/servicesLeader.go @@ -0,0 +1,111 @@ +package manager + +import ( + "context" + "fmt" + "os" + "sync" + "time" + + log "github.com/sirupsen/logrus" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/leaderelection" + "k8s.io/client-go/tools/leaderelection/resourcelock" +) + +// The startServicesWatchForLeaderElection function will start a services watcher, the +func (sm *Manager) startServicesWatchForLeaderElection(ctx context.Context) error { + + err := sm.servicesWatcher(ctx, sm.StartServicesLeaderElection) + if err != nil { + return err + } + + for _, instance := range sm.serviceInstances { + for _, cluster := range instance.clusters { + for i := range cluster.Network { + _ = cluster.Network[i].DeleteRoute() + } + cluster.Stop() + } + } + + log.Infof("Shutting down kube-Vip") + + return nil +} + +// The startServicesWatchForLeaderElection function will start a services watcher, the +func (sm *Manager) StartServicesLeaderElection(ctx context.Context, service *v1.Service, wg *sync.WaitGroup) error { + + id, err := os.Hostname() + if err != nil { + return err + } + + serviceLease := fmt.Sprintf("kubevip-%s", service.Name) + log.Infof("(svc election) service [%s], namespace [%s], lock name [%s], host id [%s]", service.Name, service.Namespace, serviceLease, id) + // we use the Lease lock type since edits to Leases are less common + // and fewer objects in the cluster watch "all Leases". + lock := &resourcelock.LeaseLock{ + LeaseMeta: metav1.ObjectMeta{ + Name: serviceLease, + Namespace: service.Namespace, + }, + Client: sm.clientSet.CoordinationV1(), + LockConfig: resourcelock.ResourceLockConfig{ + Identity: id, + }, + } + + activeService[string(service.UID)] = true + // start the leader election code loop + leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{ + Lock: lock, + // IMPORTANT: you MUST ensure that any code you have that + // is protected by the lease must terminate **before** + // you call cancel. Otherwise, you could have a background + // loop still running and another process could + // get elected before your background loop finished, violating + // the stated goal of the lease. + ReleaseOnCancel: true, + LeaseDuration: time.Duration(sm.config.LeaseDuration) * time.Second, + RenewDeadline: time.Duration(sm.config.RenewDeadline) * time.Second, + RetryPeriod: time.Duration(sm.config.RetryPeriod) * time.Second, + Callbacks: leaderelection.LeaderCallbacks{ + OnStartedLeading: func(ctx context.Context) { + // Mark this service as active (as we've started leading) + // we run this in background as it's blocking + wg.Add(1) + go func() { + if err := sm.syncServices(ctx, service, wg); err != nil { + log.Errorln(err) + } + }() + + }, + OnStoppedLeading: func() { + // we can do cleanup here + log.Infof("(svc election) service [%s] leader lost: [%s]", service.Name, id) + if activeService[string(service.UID)] { + if err := sm.deleteService(string(service.UID)); err != nil { + log.Errorln(err) + } + } + // Mark this service is inactive + activeService[string(service.UID)] = false + }, + OnNewLeader: func(identity string) { + // we're notified when new leader elected + if identity == id { + // I just got the lock + return + } + log.Infof("(svc election) new leader elected: %s", identity) + }, + }, + }) + log.Infof("(svc election) for service [%s] stopping", service.Name) + return nil +} diff --git a/pkg/manager/watch_annotations.go b/pkg/manager/watch_annotations.go new file mode 100644 index 00000000..9488f78c --- /dev/null +++ b/pkg/manager/watch_annotations.go @@ -0,0 +1,253 @@ +package manager + +import ( + "context" + "encoding/base64" + "fmt" + "os" + "regexp" + "strconv" + "strings" + + "github.com/kube-vip/kube-vip/pkg/bgp" + log "github.com/sirupsen/logrus" + + "github.com/davecgh/go-spew/spew" + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + watchtools "k8s.io/client-go/tools/watch" + + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/tools/cache" +) + +// This file handles the watching of node annotations for configuration, it will exit once the annotations are +// present +func (sm *Manager) annotationsWatcher() error { + // Use a restartable watcher, as this should help in the event of etcd or timeout issues + log.Infof("Kube-Vip is waiting for annotation prefix [%s] to be present on this node", sm.config.Annotations) + hostname, err := os.Hostname() + if err != nil { + return err + } + + labelSelector := metav1.LabelSelector{MatchLabels: map[string]string{"kubernetes.io/hostname": hostname}} + listOptions := metav1.ListOptions{ + LabelSelector: labels.Set(labelSelector.MatchLabels).String(), + } + + // First we'll check the annotations for the node and if + // they aren't what are expected, we'll drop into the watch until they are + nodeList, err := sm.clientSet.CoreV1().Nodes().List(context.Background(), listOptions) + if err != nil { + return err + } + + // We'll assume there's only one node with the hostname annotation. If that's not true, + // there's probably bigger problems + node := nodeList.Items[0] + + bgpConfig, bgpPeer, err := parseBgpAnnotations(sm.config.BGPConfig, &node, sm.config.Annotations) + if err == nil { + // No error, the annotations already exist + sm.config.BGPConfig = bgpConfig + sm.config.BGPPeerConfig = bgpPeer + return nil + } + + // We got an error with the annotations, falling back to the watch until + // they're as needed + log.Warn(err) + + rw, err := watchtools.NewRetryWatcher(node.ResourceVersion, &cache.ListWatch{ + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return sm.clientSet.CoreV1().Nodes().Watch(context.Background(), listOptions) + }, + }) + if err != nil { + return fmt.Errorf("error creating annotations watcher: %s", err.Error()) + } + + exitFunction := make(chan struct{}) + go func() { + select { + case <-sm.shutdownChan: + log.Debug("[annotations] shutdown called") + // Stop the retry watcher + rw.Stop() + return + case <-exitFunction: + log.Debug("[annotations] function ending") + // Stop the retry watcher + rw.Stop() + return + } + }() + + ch := rw.ResultChan() + + for event := range ch { + // We need to inspect the event and get ResourceVersion out of it + switch event.Type { + case watch.Added, watch.Modified: + node, ok := event.Object.(*v1.Node) + if !ok { + return fmt.Errorf("unable to parse Kubernetes Node from Annotation watcher") + } + + bgpConfig, bgpPeer, err := parseBgpAnnotations(sm.config.BGPConfig, node, sm.config.Annotations) + if err != nil { + log.Error(err) + continue + } + + sm.config.BGPConfig = bgpConfig + sm.config.BGPPeerConfig = bgpPeer + + rw.Stop() + case watch.Deleted: + node, ok := event.Object.(*v1.Node) + if !ok { + return fmt.Errorf("unable to parse Kubernetes Node from Kubernetes watcher") + } + + log.Infof("Node [%s] has been deleted", node.Name) + + case watch.Bookmark: + // Un-used + case watch.Error: + log.Error("Error attempting to watch Kubernetes Nodes") + + // This round trip allows us to handle unstructured status + errObject := apierrors.FromObject(event.Object) + statusErr, ok := errObject.(*apierrors.StatusError) + if !ok { + log.Errorf(spew.Sprintf("Received an error which is not *metav1.Status but %#+v", event.Object)) + + } + + status := statusErr.ErrStatus + log.Errorf("%v", status) + default: + } + } + close(exitFunction) + log.Infoln("Exiting Annotations watcher") + return nil + +} + +// parseNodeAnnotations parses the annotations on the node and updates the configuration +// returning an error if the annotations are not valid or missing; and nil if everything is OK +// to continue +// +// Parsed annotation config overlays config in passed bgpConfig in order to preserve configs +// set by other means with the exception that bgpConfig.Peers is overwritten. +// +// The regex expression for each annotation ensures (at least in terms of annotations) backwards +// compatibility with the Equinix Metal annotation format changed in +// https://github.com/equinix/cloud-provider-equinix-metal/releases/tag/v3.3.0 +// +// "metal.equinix.com/`" --> "metal.equinix.com/bgp-peers-{{n}}-`" +// * `` is the relevant information, such as `node-asn` or `peer-ip` +// * `{{n}}` is the number of the peer, always starting with `0` +// * kube-vip is only designed to manage one peer, just look for {{n}} == 0 +func parseBgpAnnotations(bgpConfig bgp.Config, node *v1.Node, prefix string) (bgp.Config, bgp.Peer, error) { + bgpPeer := bgp.Peer{} + + nodeASN := "" + for k, v := range node.Annotations { + regex := regexp.MustCompile(fmt.Sprintf("^%s/(bgp-peers-0-)?node-asn", prefix)) + if regex.Match([]byte(k)) { + nodeASN = v + } + } + if nodeASN == "" { + return bgpConfig, bgpPeer, fmt.Errorf("node-asn value missing or empty") + } + + u64, err := strconv.ParseUint(nodeASN, 10, 32) + if err != nil { + return bgpConfig, bgpPeer, err + } + + bgpConfig.AS = uint32(u64) + + srcIP := "" + for k, v := range node.Annotations { + regex := regexp.MustCompile(fmt.Sprintf("^%s/(bgp-peers-0-)?src-ip", prefix)) + if regex.Match([]byte(k)) { + srcIP = v + } + } + if srcIP == "" { + return bgpConfig, bgpPeer, fmt.Errorf("src-ip value missing or empty") + } + + // Set the routerID (Unique ID for BGP) to the source IP + // Also set the BGP peering to the sourceIP + bgpConfig.RouterID, bgpConfig.SourceIP = srcIP, srcIP + + peerASN := "" + for k, v := range node.Annotations { + regex := regexp.MustCompile(fmt.Sprintf("^%s/(bgp-peers-0-)?peer-asn", prefix)) + if regex.Match([]byte(k)) { + peerASN = v + } + } + if peerASN == "" { + return bgpConfig, bgpPeer, fmt.Errorf("peer-asn value missing or empty") + } + + u64, err = strconv.ParseUint(peerASN, 10, 32) + if err != nil { + return bgpConfig, bgpPeer, err + } + + bgpPeer.AS = uint32(u64) + + peerIPString := "" + for k, v := range node.Annotations { + regex := regexp.MustCompile(fmt.Sprintf("^%s/(bgp-peers-[0-9]+-)?peer-ip", prefix)) + if regex.Match([]byte(k)) { + peerIPString += v + "," + } + } + peerIPString = strings.TrimRight(peerIPString, ",") + + peerIPs := strings.Split(peerIPString, ",") + + bgpConfig.Peers = make([]bgp.Peer, 0, len(peerIPs)) + for _, peerIP := range peerIPs { + ipAddr := strings.TrimSpace(peerIP) + + if ipAddr != "" { + bgpPeer.Address = ipAddr + // Check if we're also expecting a password for this peer + base64BGPPassword := "" + for k, v := range node.Annotations { + regex := regexp.MustCompile(fmt.Sprintf("^%s/(bgp-peers-0-)?bgp-pass", prefix)) + if regex.Match([]byte(k)) { + base64BGPPassword = v + } + } + if base64BGPPassword != "" { + // Decode base64 encoded string + decodedPassword, err := base64.StdEncoding.DecodeString(base64BGPPassword) + if err != nil { + return bgpConfig, bgpPeer, err + } + // Set the password for each peer + bgpPeer.Password = string(decodedPassword) + } + bgpConfig.Peers = append(bgpConfig.Peers, bgpPeer) + } + } + + //log.Debugf("BGPConfig: %v\n", bgpConfig) + //log.Debugf("BGPPeerConfig: %v\n", bgpPeer) + + return bgpConfig, bgpPeer, nil +} diff --git a/pkg/manager/watch_endpoints.go b/pkg/manager/watch_endpoints.go new file mode 100644 index 00000000..6091de6d --- /dev/null +++ b/pkg/manager/watch_endpoints.go @@ -0,0 +1,409 @@ +package manager + +import ( + "context" + "fmt" + "strings" + "sync" + + "github.com/kube-vip/kube-vip/pkg/kubevip" + log "github.com/sirupsen/logrus" + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/tools/cache" + watchtools "k8s.io/client-go/tools/watch" + "k8s.io/client-go/util/retry" +) + +func (sm *Manager) watchEndpoint(ctx context.Context, id string, service *v1.Service, wg *sync.WaitGroup) error { + log.Infof("[endpoint] watching for service [%s] in namespace [%s]", service.Name, service.Namespace) + // Use a restartable watcher, as this should help in the event of etcd or timeout issues + leaderContext, cancel := context.WithCancel(context.Background()) + var leaderElectionActive bool + defer cancel() + + opts := metav1.ListOptions{ + FieldSelector: fields.OneTermEqualSelector("metadata.name", service.Name).String(), + } + rw, err := watchtools.NewRetryWatcher("1", &cache.ListWatch{ + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return sm.clientSet.CoreV1().Endpoints(service.Namespace).Watch(ctx, opts) + }, + }) + if err != nil { + cancel() + return fmt.Errorf("error creating endpoint watcher: %s", err.Error()) + } + + exitFunction := make(chan struct{}) + go func() { + select { + case <-ctx.Done(): + log.Debug("[endpoint] context cancelled") + // Stop the retry watcher + rw.Stop() + // Cancel the context, which will in turn cancel the leadership + cancel() + return + case <-sm.shutdownChan: + log.Debug("[endpoint] shutdown called") + // Stop the retry watcher + rw.Stop() + // Cancel the context, which will in turn cancel the leadership + cancel() + return + case <-exitFunction: + log.Debug("[endpoint] function ending") + // Stop the retry watcher + rw.Stop() + // Cancel the context, which will in turn cancel the leadership + cancel() + return + } + }() + + ch := rw.ResultChan() + + var lastKnownGoodEndpoint string + for event := range ch { + + // We need to inspect the event and get ResourceVersion out of it + switch event.Type { + + case watch.Added, watch.Modified: + ep, ok := event.Object.(*v1.Endpoints) + if !ok { + cancel() + return fmt.Errorf("unable to parse Kubernetes services from API watcher") + } + + // Build endpoints + var endpoints []string + if (sm.config.EnableBGP || sm.config.EnableRoutingTable) && !sm.config.EnableLeaderElection && !sm.config.EnableServicesElection && + service.Spec.ExternalTrafficPolicy == v1.ServiceExternalTrafficPolicyTypeCluster { + endpoints = getAllEndpoints(ep) + } else { + endpoints = getLocalEndpoints(ep, id, sm.config) + } + + // Find out if we have any local endpoints + // if out endpoint is empty then populate it + // if not, go through the endpoints and see if ours still exists + // If we have a local endpoint then begin the leader Election, unless it's already running + // + + // Check that we have local endpoints + if len(endpoints) != 0 { + // if we haven't populated one, then do so + if lastKnownGoodEndpoint != "" { + + // check out previous endpoint exists + stillExists := false + + for x := range endpoints { + if endpoints[x] == lastKnownGoodEndpoint { + stillExists = true + } + } + // If the last endpoint no longer exists, we cancel our leader Election + if !stillExists && leaderElectionActive { + log.Warnf("[endpoint] existing [%s] has been removed, restarting leaderElection", lastKnownGoodEndpoint) + // Stop the existing leaderElection + cancel() + // Set our active endpoint to an existing one + lastKnownGoodEndpoint = endpoints[0] + // disable last leaderElection flag + leaderElectionActive = false + } + + } else { + lastKnownGoodEndpoint = endpoints[0] + } + + // Set the service accordingly + if service.Annotations[egress] == "true" { + service.Annotations[activeEndpoint] = lastKnownGoodEndpoint + } + + if !leaderElectionActive && sm.config.EnableServicesElection { + go func() { + leaderContext, cancel = context.WithCancel(context.Background()) + + // This is a blocking function, that will restart (in the event of failure) + for { + // if the context isn't cancelled restart + if leaderContext.Err() != context.Canceled { + leaderElectionActive = true + err := sm.StartServicesLeaderElection(leaderContext, service, wg) + if err != nil { + log.Error(err) + } + leaderElectionActive = false + } else { + leaderElectionActive = false + break + } + } + }() + } + + // There are local endpoints available on the node + if !sm.config.EnableServicesElection && !sm.config.EnableLeaderElection && !configuredLocalRoutes[string(service.UID)] { + // If routing table mode is enabled - routes should be added per node + if sm.config.EnableRoutingTable { + if instance := sm.findServiceInstance(service); instance != nil { + for _, cluster := range instance.clusters { + for i := range cluster.Network { + err := cluster.Network[i].AddRoute() + if err != nil { + log.Errorf("[endpoint] error adding route: %s\n", err.Error()) + } else { + log.Infof("[endpoint] added route: %s, service: %s/%s, interface: %s, table: %d", + cluster.Network[i].IP(), service.Namespace, service.Name, cluster.Network[i].Interface(), sm.config.RoutingTableID) + configuredLocalRoutes[string(service.UID)] = true + leaderElectionActive = true + } + } + } + } + } + + // If BGP mode is enabled - hosts should be added per node + if sm.config.EnableBGP { + if instance := sm.findServiceInstance(service); instance != nil { + for _, cluster := range instance.clusters { + for i := range cluster.Network { + address := fmt.Sprintf("%s/%s", cluster.Network[i].IP(), sm.config.VIPCIDR) + log.Debugf("[endpoint] Attempting to advertise BGP service: %s", address) + err := sm.bgpServer.AddHost(address) + if err != nil { + log.Errorf("[endpoint] error adding BGP host %s\n", err.Error()) + } else { + log.Infof("[endpoint] added BGP host: %s, service: %s/%s", address, service.Namespace, service.Name) + configuredLocalRoutes[string(service.UID)] = true + leaderElectionActive = true + } + } + } + } + } + } + } else { + // There are no local enpoints + if !sm.config.EnableServicesElection && !sm.config.EnableLeaderElection && configuredLocalRoutes[string(service.UID)] { + // If routing table mode is enabled - routes should be deleted + if sm.config.EnableRoutingTable { + sm.clearRoutes(service) + configuredLocalRoutes[string(service.UID)] = false + } + + // If BGP mode is enabled - routes should be deleted + if sm.config.EnableBGP { + if instance := sm.findServiceInstance(service); instance != nil { + for _, cluster := range instance.clusters { + for i := range cluster.Network { + address := fmt.Sprintf("%s/%s", cluster.Network[i].IP(), sm.config.VIPCIDR) + err := sm.bgpServer.DelHost(address) + if err != nil { + log.Errorf("[endpoint] error deleting BGP host%s: %s\n", address, err.Error()) + } else { + log.Infof("[endpoint] deleted BGP host: %s, service: %s/%s", + address, service.Namespace, service.Name) + configuredLocalRoutes[string(service.UID)] = false + leaderElectionActive = false + } + } + } + } + } + } + + // If there are no local endpoints, and we had one then remove it and stop the leaderElection + if lastKnownGoodEndpoint != "" { + log.Warnf("[endpoint] existing [%s] has been removed, no remaining endpoints for leaderElection", lastKnownGoodEndpoint) + lastKnownGoodEndpoint = "" // reset endpoint + cancel() // stop services watcher + leaderElectionActive = false + } + } + log.Debugf("[endpoint watcher] service %s/%s: local endpoint(s) [%d], known good [%s], active election [%t]", + service.Namespace, service.Name, len(endpoints), lastKnownGoodEndpoint, leaderElectionActive) + + case watch.Deleted: + // When no-leader-elecition mode + if !sm.config.EnableServicesElection && !sm.config.EnableLeaderElection { + // find all existing local endpoints + ep, ok := event.Object.(*v1.Endpoints) + if !ok { + cancel() + return fmt.Errorf("unable to parse Kubernetes services from API watcher") + } + + var endpoints []string + if (sm.config.EnableBGP || sm.config.EnableRoutingTable) && !sm.config.EnableLeaderElection && !sm.config.EnableServicesElection && + service.Spec.ExternalTrafficPolicy == v1.ServiceExternalTrafficPolicyTypeCluster { + endpoints = getAllEndpoints(ep) + } else { + endpoints = getLocalEndpoints(ep, id, sm.config) + } + + // If there were local endpoints deleted + if len(endpoints) > 0 { + // Delete all routes in routing table mode + if sm.config.EnableRoutingTable { + sm.clearRoutes(service) + } + + // Delete all hosts in BGP mode + if sm.config.EnableBGP { + sm.clearBGPHosts(service) + } + } + } + + // Close the goroutine that will end the retry watcher, then exit the endpoint watcher function + close(exitFunction) + log.Infof("[endpoints] deleted stopping watching for [%s] in namespace [%s]", service.Name, service.Namespace) + + return nil + case watch.Error: + errObject := apierrors.FromObject(event.Object) + statusErr, _ := errObject.(*apierrors.StatusError) + log.Errorf("[endpoint] -> %v", statusErr) + } + } + close(exitFunction) + log.Infof("[endpoints] stopping watching for [%s] in namespace [%s]", service.Name, service.Namespace) + return nil //nolint:govet +} + +func getAllEndpoints(ep *v1.Endpoints) []string { + endpoints := []string{} + for subset := range ep.Subsets { + for address := range ep.Subsets[subset].Addresses { + addr := strings.Split(ep.Subsets[subset].Addresses[address].IP, "/") + endpoints = append(endpoints, addr[0]) + } + } + return endpoints +} + +func getLocalEndpoints(ep *v1.Endpoints, id string, config *kubevip.Config) []string { + var localendpoints []string + + shortname, shortnameErr := getShortname(id) + if shortnameErr != nil { + if config.EnableRoutingTable && (!config.EnableLeaderElection && !config.EnableServicesElection) { + log.Debugf("[endpoint] %v, shortname will not be used", shortnameErr) + } else { + log.Errorf("[endpoint] %v", shortnameErr) + } + } + + for subset := range ep.Subsets { + for address := range ep.Subsets[subset].Addresses { + // 1. Compare the hostname on the endpoint to the hostname + // 2. Compare the nodename on the endpoint to the hostname + // 3. Drop the FQDN to a shortname and compare to the nodename on the endpoint + + // 1. Compare the Hostname first (should be FQDN) + log.Debugf("[endpoint] processing endpoint [%s]", ep.Subsets[subset].Addresses[address].IP) + if id == ep.Subsets[subset].Addresses[address].Hostname { + log.Debugf("[endpoint] found local endpoint - address: %s, hostname: %s", ep.Subsets[subset].Addresses[address].IP, ep.Subsets[subset].Addresses[address].Hostname) + localendpoints = append(localendpoints, ep.Subsets[subset].Addresses[address].IP) + } else { + // 2. Compare the Nodename (from testing could be FQDN or short) + if ep.Subsets[subset].Addresses[address].NodeName != nil { + if id == *ep.Subsets[subset].Addresses[address].NodeName { + log.Debugf("[endpoint] found local endpoint - address: %s, hostname: %s, node: %s", ep.Subsets[subset].Addresses[address].IP, ep.Subsets[subset].Addresses[address].Hostname, *ep.Subsets[subset].Addresses[address].NodeName) + localendpoints = append(localendpoints, ep.Subsets[subset].Addresses[address].IP) + } else if shortnameErr == nil && shortname == *ep.Subsets[subset].Addresses[address].NodeName { + log.Debugf("[endpoint] found local endpoint - address: %s, shortname: %s, node: %s", ep.Subsets[subset].Addresses[address].IP, shortname, *ep.Subsets[subset].Addresses[address].NodeName) + localendpoints = append(localendpoints, ep.Subsets[subset].Addresses[address].IP) + } + + } + } + } + } + return localendpoints +} + +func (sm *Manager) clearRoutes(service *v1.Service) { + if instance := sm.findServiceInstance(service); instance != nil { + for _, cluster := range instance.clusters { + for i := range cluster.Network { + err := cluster.Network[i].DeleteRoute() + if err != nil && !strings.Contains(err.Error(), "no such process") { + log.Errorf("failed to delete route for %s: %s", cluster.Network[i].IP(), err.Error()) + } else { + log.Infof("deleted route: %s, service: %s/%s, interface: %s, table: %d", + cluster.Network[i].IP(), service.Namespace, service.Name, cluster.Network[i].Interface(), sm.config.RoutingTableID) + } + } + } + } +} + +func (sm *Manager) clearBGPHosts(service *v1.Service) { + if instance := sm.findServiceInstance(service); instance != nil { + for _, cluster := range instance.clusters { + for i := range cluster.Network { + address := fmt.Sprintf("%s/%s", cluster.Network[i].IP(), sm.config.VIPCIDR) + err := sm.bgpServer.DelHost(address) + if err != nil { + log.Errorf("[endpoint] error deleting BGP host %s\n", err.Error()) + } else { + log.Infof("[endpoint] deleted BGP host: %s, service: %s/%s", + address, service.Namespace, service.Name) + } + } + } + } +} + +func (sm *Manager) updateServiceEndpointAnnotation(endpoint string, service *v1.Service) error { + retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { + // Retrieve the latest version of Deployment before attempting update + // RetryOnConflict uses exponential backoff to avoid exhausting the apiserver + currentService, err := sm.clientSet.CoreV1().Services(service.Namespace).Get(context.TODO(), service.Name, metav1.GetOptions{}) + if err != nil { + return err + } + + currentServiceCopy := currentService.DeepCopy() + if currentServiceCopy.Annotations == nil { + currentServiceCopy.Annotations = make(map[string]string) + } + + currentServiceCopy.Annotations[activeEndpoint] = endpoint + + _, err = sm.clientSet.CoreV1().Services(currentService.Namespace).Update(context.TODO(), currentServiceCopy, metav1.UpdateOptions{}) + if err != nil { + log.Errorf("Error updating Service Spec [%s] : %v", currentServiceCopy.Name, err) + return err + } + return nil + }) + + if retryErr != nil { + log.Errorf("Failed to set Services: %v", retryErr) + return retryErr + } + return nil +} + +// returns just the shortname (or first bit) of a FQDN +func getShortname(hostname string) (string, error) { + if len(hostname) == 0 { + return "", fmt.Errorf("unable to find shortname from %s", hostname) + } + hostParts := strings.Split(hostname, ".") + if len(hostParts) >= 1 { + return hostParts[0], nil + } + return "", fmt.Errorf("unable to find shortname from %s", hostname) +} diff --git a/pkg/manager/watch_endpointslices.go b/pkg/manager/watch_endpointslices.go new file mode 100644 index 00000000..04a00cc9 --- /dev/null +++ b/pkg/manager/watch_endpointslices.go @@ -0,0 +1,375 @@ +package manager + +import ( + "context" + "fmt" + "sync" + + "github.com/kube-vip/kube-vip/pkg/kubevip" + log "github.com/sirupsen/logrus" + v1 "k8s.io/api/core/v1" + discoveryv1 "k8s.io/api/discovery/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/tools/cache" + watchtools "k8s.io/client-go/tools/watch" + "k8s.io/client-go/util/retry" +) + +func (sm *Manager) watchEndpointSlices(ctx context.Context, id string, service *v1.Service, wg *sync.WaitGroup) error { + log.Infof("[endpointslices] watching for service [%s] in namespace [%s]", service.Name, service.Namespace) + // Use a restartable watcher, as this should help in the event of etcd or timeout issues + leaderContext, cancel := context.WithCancel(context.Background()) + var leaderElectionActive bool + defer cancel() + + labelSelector := metav1.LabelSelector{MatchLabels: map[string]string{"kubernetes.io/service-name": service.Name}} + + opts := metav1.ListOptions{ + LabelSelector: labels.Set(labelSelector.MatchLabels).String(), + } + + rw, err := watchtools.NewRetryWatcher("1", &cache.ListWatch{ + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return sm.clientSet.DiscoveryV1().EndpointSlices(service.Namespace).Watch(ctx, opts) + }, + }) + if err != nil { + cancel() + return fmt.Errorf("[endpointslices] error creating endpointslices watcher: %s", err.Error()) + } + + exitFunction := make(chan struct{}) + go func() { + select { + case <-ctx.Done(): + log.Debug("[endpointslices] context cancelled") + // Stop the retry watcher + rw.Stop() + // Cancel the context, which will in turn cancel the leadership + cancel() + return + case <-sm.shutdownChan: + log.Debug("[endpointslices] shutdown called") + // Stop the retry watcher + rw.Stop() + // Cancel the context, which will in turn cancel the leadership + cancel() + return + case <-exitFunction: + log.Debug("[endpointslices] function ending") + // Stop the retry watcher + rw.Stop() + // Cancel the context, which will in turn cancel the leadership + cancel() + return + } + }() + + ch := rw.ResultChan() + + for event := range ch { + lastKnownGoodEndpoint := "" + activeEndpointAnnotation := activeEndpoint + // We need to inspect the event and get ResourceVersion out of it + switch event.Type { + case watch.Added, watch.Modified: + + eps, ok := event.Object.(*discoveryv1.EndpointSlice) + if !ok { + cancel() + return fmt.Errorf("[endpointslices] unable to parse Kubernetes services from API watcher") + } + + if eps.AddressType == discoveryv1.AddressTypeIPv6 { + activeEndpointAnnotation = activeEndpointIPv6 + } + + // Build endpoints + var endpoints []string + if (sm.config.EnableBGP || sm.config.EnableRoutingTable) && !sm.config.EnableLeaderElection && !sm.config.EnableServicesElection && + service.Spec.ExternalTrafficPolicy == v1.ServiceExternalTrafficPolicyTypeCluster { + endpoints = getAllEndpointsFromEndpointslices(eps) + } else { + endpoints = getLocalEndpointsFromEndpointslices(eps, id, sm.config) + } + + // Find out if we have any local endpoints + // if out endpoint is empty then populate it + // if not, go through the endpoints and see if ours still exists + // If we have a local endpoint then begin the leader Election, unless it's already running + // + + // Check that we have local endpoints + if len(endpoints) != 0 { + // if we haven't populated one, then do so + if lastKnownGoodEndpoint != "" { + + // check out previous endpoint exists + stillExists := false + + for x := range endpoints { + if endpoints[x] == lastKnownGoodEndpoint { + stillExists = true + } + } + // If the last endpoint no longer exists, we cancel our leader Election + if !stillExists && leaderElectionActive { + log.Warnf("[endpointslices] existing endpoint [%s] has been removed, restarting leaderElection", lastKnownGoodEndpoint) + // Stop the existing leaderElection + cancel() + // Set our active endpoint to an existing one + lastKnownGoodEndpoint = endpoints[0] + // disable last leaderElection flag + leaderElectionActive = false + } + + } else { + lastKnownGoodEndpoint = endpoints[0] + } + + // Set the service accordingly + if service.Annotations[egress] == "true" { + service.Annotations[activeEndpointAnnotation] = lastKnownGoodEndpoint + } + + if !leaderElectionActive && sm.config.EnableServicesElection { + go func() { + leaderContext, cancel = context.WithCancel(context.Background()) + + // This is a blocking function, that will restart (in the event of failure) + for { + // if the context isn't cancelled restart + if leaderContext.Err() != context.Canceled { + leaderElectionActive = true + err := sm.StartServicesLeaderElection(leaderContext, service, wg) + if err != nil { + log.Error(err) + } + leaderElectionActive = false + } else { + leaderElectionActive = false + break + } + } + }() + } + + // There are local endpoints available on the node + if !sm.config.EnableServicesElection && !sm.config.EnableLeaderElection && !configuredLocalRoutes[string(service.UID)] { + // If routing table mode is enabled - routes should be added per node + if sm.config.EnableRoutingTable { + if instance := sm.findServiceInstance(service); instance != nil { + for _, cluster := range instance.clusters { + for i := range cluster.Network { + err := cluster.Network[i].AddRoute() + if err != nil { + log.Errorf("[endpointslices] error adding route: %s\n", err.Error()) + } else { + log.Infof("[endpointslices] added route: %s, service: %s/%s, interface: %s, table: %d", + cluster.Network[i].IP(), service.Namespace, service.Name, cluster.Network[i].Interface(), sm.config.RoutingTableID) + configuredLocalRoutes[string(service.UID)] = true + leaderElectionActive = true + } + } + } + } + } + + // If BGP mode is enabled - hosts should be added per node + if sm.config.EnableBGP { + if instance := sm.findServiceInstance(service); instance != nil { + for _, cluster := range instance.clusters { + for i := range cluster.Network { + address := fmt.Sprintf("%s/%s", cluster.Network[i].IP(), sm.config.VIPCIDR) + log.Debugf("[endpointslices] Attempting to advertise BGP service: %s", address) + err := sm.bgpServer.AddHost(address) + if err != nil { + log.Errorf("[endpointslices] error adding BGP host %s\n", err.Error()) + } else { + log.Infof("[endpointslices] added BGP host: %s, service: %s/%s", address, service.Namespace, service.Name) + configuredLocalRoutes[string(service.UID)] = true + leaderElectionActive = true + } + } + } + } + } + } + } else { + // There are no local enpoints + if !sm.config.EnableServicesElection && !sm.config.EnableLeaderElection && configuredLocalRoutes[string(service.UID)] { + // If routing table mode is enabled - routes should be deleted + if sm.config.EnableRoutingTable { + sm.clearRoutes(service) + configuredLocalRoutes[string(service.UID)] = false + } + + // If BGP mode is enabled - routes should be deleted + if sm.config.EnableBGP { + if instance := sm.findServiceInstance(service); instance != nil { + for _, cluster := range instance.clusters { + for i := range cluster.Network { + address := fmt.Sprintf("%s/%s", cluster.Network[i].IP(), sm.config.VIPCIDR) + err := sm.bgpServer.DelHost(address) + if err != nil { + log.Errorf("[endpointslices] error deleting BGP host%s: %s\n", address, err.Error()) + } else { + log.Infof("[endpointslices] deleted BGP host: %s, service: %s/%s", + address, service.Namespace, service.Name) + configuredLocalRoutes[string(service.UID)] = false + leaderElectionActive = false + } + } + } + } + } + } + + // If there are no local endpoints, and we had one then remove it and stop the leaderElection + if lastKnownGoodEndpoint != "" { + log.Warnf("[endpointslices] existing endpoint [%s] has been removed, no remaining endpoints for leaderElection", lastKnownGoodEndpoint) + lastKnownGoodEndpoint = "" // reset endpoint + cancel() // stop services watcher + leaderElectionActive = false + } + } + log.Debugf("[endpointslices watcher] service %s/%s: local endpoint(s) [%d], known good [%s], active election [%t]", + service.Namespace, service.Name, len(endpoints), lastKnownGoodEndpoint, leaderElectionActive) + + case watch.Deleted: + // When no-leader-elecition mode + if !sm.config.EnableServicesElection && !sm.config.EnableLeaderElection { + // find all existing local endpoints + eps, ok := event.Object.(*discoveryv1.EndpointSlice) + if !ok { + cancel() + return fmt.Errorf("unable to parse Kubernetes services from API watcher") + } + + var endpoints []string + if (sm.config.EnableBGP || sm.config.EnableRoutingTable) && !sm.config.EnableLeaderElection && !sm.config.EnableServicesElection && + service.Spec.ExternalTrafficPolicy == v1.ServiceExternalTrafficPolicyTypeCluster { + endpoints = getAllEndpointsFromEndpointslices(eps) + } else { + endpoints = getLocalEndpointsFromEndpointslices(eps, id, sm.config) + } + + // If there were local endpints deleted + if len(endpoints) > 0 { + // Delete all routes in routing table mode + if sm.config.EnableRoutingTable { + sm.clearRoutes(service) + } + + // Delete all hosts in BGP mode + if sm.config.EnableBGP { + sm.clearBGPHosts(service) + } + } + } + + // Close the goroutine that will end the retry watcher, then exit the endpoint watcher function + close(exitFunction) + log.Infof("[endpointslices] deleted stopping watching for [%s] in namespace [%s]", service.Name, service.Namespace) + return nil + case watch.Error: + errObject := apierrors.FromObject(event.Object) + statusErr, _ := errObject.(*apierrors.StatusError) + log.Errorf("[endpointslices] -> %v", statusErr) + } + } + close(exitFunction) + log.Infof("[endpointslices] stopping watching for [%s] in namespace [%s]", service.Name, service.Namespace) + return nil //nolint:govet +} + +func (sm *Manager) updateServiceEndpointSlicesAnnotation(endpoint, endpointIPv6 string, service *v1.Service) error { + retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { + // Retrieve the latest version of Deployment before attempting update + // RetryOnConflict uses exponential backoff to avoid exhausting the apiserver + currentService, err := sm.clientSet.CoreV1().Services(service.Namespace).Get(context.TODO(), service.Name, metav1.GetOptions{}) + if err != nil { + return err + } + + currentServiceCopy := currentService.DeepCopy() + if currentServiceCopy.Annotations == nil { + currentServiceCopy.Annotations = make(map[string]string) + } + + currentServiceCopy.Annotations[activeEndpoint] = endpoint + currentServiceCopy.Annotations[activeEndpointIPv6] = endpointIPv6 + + _, err = sm.clientSet.CoreV1().Services(currentService.Namespace).Update(context.TODO(), currentServiceCopy, metav1.UpdateOptions{}) + if err != nil { + log.Errorf("Error updating Service Spec [%s] : %v", currentServiceCopy.Name, err) + return err + } + return nil + }) + + if retryErr != nil { + log.Errorf("Failed to set Services: %v", retryErr) + return retryErr + } + return nil +} + +func getLocalEndpointsFromEndpointslices(eps *discoveryv1.EndpointSlice, id string, config *kubevip.Config) []string { + var localendpoints []string + + shortname, shortnameErr := getShortname(id) + if shortnameErr != nil { + if config.EnableRoutingTable && (!config.EnableLeaderElection && !config.EnableServicesElection) { + log.Debugf("[endpointslices] %v, shortname will not be used", shortnameErr) + } else { + log.Errorf("[endpointslices] %v", shortnameErr) + } + } + + for i := range eps.Endpoints { + for j := range eps.Endpoints[i].Addresses { + // 1. Compare the hostname on the endpoint to the hostname + // 2. Compare the nodename on the endpoint to the hostname + // 3. Drop the FQDN to a shortname and compare to the nodename on the endpoint + + // 1. Compare the Hostname first (should be FQDN) + log.Debugf("[endpointslices] processing endpoint [%s]", eps.Endpoints[i].Addresses[j]) + if eps.Endpoints[i].Hostname != nil && id == *eps.Endpoints[i].Hostname { + if *eps.Endpoints[i].Conditions.Serving { + log.Debugf("[endpointslices] found endpoint - address: %s, hostname: %s", eps.Endpoints[i].Addresses[j], *eps.Endpoints[i].Hostname) + localendpoints = append(localendpoints, eps.Endpoints[i].Addresses[j]) + } + } else { + // 2. Compare the Nodename (from testing could be FQDN or short) + if eps.Endpoints[i].NodeName != nil { + if id == *eps.Endpoints[i].NodeName && *eps.Endpoints[i].Conditions.Serving { + if eps.Endpoints[i].Hostname != nil { + log.Debugf("[endpointslices] found endpoint - address: %s, hostname: %s, node: %s", eps.Endpoints[i].Addresses[j], *eps.Endpoints[i].Hostname, *eps.Endpoints[i].NodeName) + } else { + log.Debugf("[endpointslices] found endpoint - address: %s, node: %s", eps.Endpoints[i].Addresses[j], *eps.Endpoints[i].NodeName) + } + localendpoints = append(localendpoints, eps.Endpoints[i].Addresses[j]) + // 3. Compare to shortname + } else if shortnameErr != nil && shortname == *eps.Endpoints[i].NodeName && *eps.Endpoints[i].Conditions.Serving { + log.Debugf("[endpointslices] found endpoint - address: %s, shortname: %s, node: %s", eps.Endpoints[i].Addresses[j], shortname, *eps.Endpoints[i].NodeName) + localendpoints = append(localendpoints, eps.Endpoints[i].Addresses[j]) + + } + } + } + } + } + return localendpoints +} + +func getAllEndpointsFromEndpointslices(eps *discoveryv1.EndpointSlice) []string { + endpoints := []string{} + for _, ep := range eps.Endpoints { + endpoints = append(endpoints, ep.Addresses...) + } + return endpoints +} diff --git a/pkg/manager/watch_services.go b/pkg/manager/watch_services.go new file mode 100644 index 00000000..5a49b135 --- /dev/null +++ b/pkg/manager/watch_services.go @@ -0,0 +1,323 @@ +package manager + +import ( + "context" + "fmt" + "os" + "sync" + + "github.com/davecgh/go-spew/spew" + "github.com/kube-vip/kube-vip/pkg/vip" + "github.com/prometheus/client_golang/prometheus" + log "github.com/sirupsen/logrus" + v1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/tools/cache" + watchtools "k8s.io/client-go/tools/watch" +) + +// TODO: Fix the naming of these contexts + +// activeServiceLoadBalancer keeps track of services that already have a leaderElection in place +var activeServiceLoadBalancer map[string]context.Context + +// activeServiceLoadBalancer keeps track of services that already have a leaderElection in place +var activeServiceLoadBalancerCancel map[string]func() + +// activeService keeps track of services that already have a leaderElection in place +var activeService map[string]bool + +// watchedService keeps track of services that are already being watched +var watchedService map[string]bool + +// watchedService keeps track of routes that has been configured on the node +var configuredLocalRoutes map[string]bool + +func init() { + // Set up the caches for monitoring existing active or watched services + activeServiceLoadBalancerCancel = make(map[string]func()) + activeServiceLoadBalancer = make(map[string]context.Context) + activeService = make(map[string]bool) + watchedService = make(map[string]bool) + configuredLocalRoutes = make(map[string]bool) +} + +// This function handles the watching of a services endpoints and updates a load balancers endpoint configurations accordingly +func (sm *Manager) servicesWatcher(ctx context.Context, serviceFunc func(context.Context, *v1.Service, *sync.WaitGroup) error) error { + // Watch function + var wg sync.WaitGroup + + id, err := os.Hostname() + if err != nil { + return err + } + if sm.config.ServiceNamespace == "" { + // v1.NamespaceAll is actually "", but we'll stay with the const in case things change upstream + sm.config.ServiceNamespace = v1.NamespaceAll + log.Infof("(svcs) starting services watcher for all namespaces") + } else { + log.Infof("(svcs) starting services watcher for services in namespace [%s]", sm.config.ServiceNamespace) + } + + // Use a restartable watcher, as this should help in the event of etcd or timeout issues + rw, err := watchtools.NewRetryWatcher("1", &cache.ListWatch{ + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return sm.clientSet.CoreV1().Services(sm.config.ServiceNamespace).Watch(ctx, metav1.ListOptions{}) + }, + }) + if err != nil { + return fmt.Errorf("error creating services watcher: %s", err.Error()) + } + exitFunction := make(chan struct{}) + go func() { + select { + case <-sm.shutdownChan: + log.Debug("(svcs) shutdown called") + // Stop the retry watcher + rw.Stop() + return + case <-exitFunction: + log.Debug("(svcs) function ending") + // Stop the retry watcher + rw.Stop() + return + } + }() + ch := rw.ResultChan() + + // Used for tracking an active endpoint / pod + for event := range ch { + sm.countServiceWatchEvent.With(prometheus.Labels{"type": string(event.Type)}).Add(1) + + // We need to inspect the event and get ResourceVersion out of it + switch event.Type { + case watch.Added, watch.Modified: + // log.Debugf("Endpoints for service [%s] have been Created or modified", s.service.ServiceName) + svc, ok := event.Object.(*v1.Service) + if !ok { + return fmt.Errorf("unable to parse Kubernetes services from API watcher") + } + + // We only care about LoadBalancer services + if svc.Spec.Type != v1.ServiceTypeLoadBalancer { + break + } + + svcAddresses := fetchServiceAddresses(svc) + + // We only care about LoadBalancer services that have been allocated an address + if len(svcAddresses) <= 0 { + break + } + + // Check the loadBalancer class + if svc.Spec.LoadBalancerClass != nil { + // if this isn't nil then it has been configured, check if it the kube-vip loadBalancer class + if *svc.Spec.LoadBalancerClass != sm.config.LoadBalancerClassName { + log.Infof("(svcs) [%s] specified the loadBalancer class [%s], ignoring", svc.Name, *svc.Spec.LoadBalancerClass) + break + } + } else if sm.config.LoadBalancerClassOnly { + // if kube-vip is configured to only recognize services with kube-vip's lb class, then ignore the services without any lb class + log.Infof("(svcs) kube-vip configured to only recognize services with kube-vip's lb class but the service [%s] didn't specify any loadBalancer class, ignoring", svc.Name) + break + } + + // Check if we ignore this service + if svc.Annotations["kube-vip.io/ignore"] == "true" { + log.Infof("(svcs) [%s] has an ignore annotation for kube-vip", svc.Name) + break + } + + // The modified event should only be triggered if the service has been modified (i.e. moved somewhere else) + if event.Type == watch.Modified { + for _, addr := range svcAddresses { + //log.Debugf("(svcs) Retreiving local addresses, to ensure that this modified address doesn't exist: %s", addr) + f, err := vip.GarbageCollect(sm.config.Interface, addr) + if err != nil { + log.Errorf("(svcs) cleaning existing address error: [%s]", err.Error()) + } + if f { + log.Warnf("(svcs) already found existing address [%s] on adapter [%s]", addr, sm.config.Interface) + } + } + } + // Scenarios: + // 1. + if !activeService[string(svc.UID)] { + log.Debugf("(svcs) [%s] has been added/modified with addresses [%s]", svc.Name, fetchServiceAddresses(svc)) + + wg.Add(1) + activeServiceLoadBalancer[string(svc.UID)], activeServiceLoadBalancerCancel[string(svc.UID)] = context.WithCancel(context.TODO()) + // Background the services election + // EnableServicesElection enabled + // watchEndpoint will do a ServicesElection by Service and understands local endpoints + // + // EnableRoutingTable enabled and EnableLeaderElection disabled + // watchEndpoint will also not do a leaderElection by service. + if sm.config.EnableServicesElection || + ((sm.config.EnableRoutingTable || sm.config.EnableBGP) && (!sm.config.EnableLeaderElection && !sm.config.EnableServicesElection)) { + if svc.Spec.ExternalTrafficPolicy == v1.ServiceExternalTrafficPolicyTypeLocal { + // Start an endpoint watcher if we're not watching it already + if !watchedService[string(svc.UID)] { + // background the endpoint watcher + go func() { + if svc.Spec.ExternalTrafficPolicy == v1.ServiceExternalTrafficPolicyTypeLocal { + // Add Endpoint or EndpointSlices watcher + wg.Add(1) + if !sm.config.EnableEndpointSlices { + err = sm.watchEndpoint(activeServiceLoadBalancer[string(svc.UID)], id, svc, &wg) + if err != nil { + log.Error(err) + } + } else { + err = sm.watchEndpointSlices(activeServiceLoadBalancer[string(svc.UID)], id, svc, &wg) + if err != nil { + log.Error(err) + } + } + wg.Done() + } + }() + + if (sm.config.EnableRoutingTable || sm.config.EnableBGP) && (!sm.config.EnableLeaderElection && !sm.config.EnableServicesElection) { + wg.Add(1) + go func() { + err = serviceFunc(activeServiceLoadBalancer[string(svc.UID)], svc, &wg) + if err != nil { + log.Error(err) + } + wg.Done() + }() + } + // We're now watching this service + watchedService[string(svc.UID)] = true + } + } else if (sm.config.EnableBGP || sm.config.EnableRoutingTable) && (!sm.config.EnableLeaderElection && !sm.config.EnableServicesElection) { + go func() { + if svc.Spec.ExternalTrafficPolicy == v1.ServiceExternalTrafficPolicyTypeCluster { + // Add Endpoint watcher + wg.Add(1) + if !sm.config.EnableEndpointSlices { + err = sm.watchEndpoint(activeServiceLoadBalancer[string(svc.UID)], id, svc, &wg) + if err != nil { + log.Error(err) + } + } else { + err = sm.watchEndpointSlices(activeServiceLoadBalancer[string(svc.UID)], id, svc, &wg) + if err != nil { + log.Error(err) + } + } + wg.Done() + } + }() + + wg.Add(1) + go func() { + err = serviceFunc(activeServiceLoadBalancer[string(svc.UID)], svc, &wg) + if err != nil { + log.Error(err) + } + wg.Done() + }() + } else { + // Increment the waitGroup before the service Func is called (Done is completed in there) + wg.Add(1) + go func() { + err = serviceFunc(activeServiceLoadBalancer[string(svc.UID)], svc, &wg) + if err != nil { + log.Error(err) + } + wg.Done() + }() + } + activeService[string(svc.UID)] = true + } else { + // Increment the waitGroup before the service Func is called (Done is completed in there) + wg.Add(1) + err = serviceFunc(activeServiceLoadBalancer[string(svc.UID)], svc, &wg) + if err != nil { + log.Error(err) + } + wg.Done() + } + } + case watch.Deleted: + svc, ok := event.Object.(*v1.Service) + if !ok { + return fmt.Errorf("unable to parse Kubernetes services from API watcher") + } + if activeService[string(svc.UID)] { + + // We only care about LoadBalancer services + if svc.Spec.Type != v1.ServiceTypeLoadBalancer { + break + } + + // We can ignore this service + if svc.Annotations["kube-vip.io/ignore"] == "true" { + log.Infof("(svcs) [%s] has an ignore annotation for kube-vip", svc.Name) + break + } + + // If no leader election is enabled, delete routes here + if !sm.config.EnableLeaderElection && !sm.config.EnableServicesElection && + sm.config.EnableRoutingTable && configuredLocalRoutes[string(svc.UID)] { + configuredLocalRoutes[string(svc.UID)] = false + sm.clearRoutes(svc) + } + + // If this is an active service then and additional leaderElection will handle stopping + err := sm.deleteService(string(svc.UID)) + if err != nil { + log.Error(err) + } + + // Calls the cancel function of the context + if activeServiceLoadBalancerCancel[string(svc.UID)] != nil { + activeServiceLoadBalancerCancel[string(svc.UID)]() + } + activeService[string(svc.UID)] = false + watchedService[string(svc.UID)] = false + } + + if (sm.config.EnableBGP || sm.config.EnableRoutingTable) && sm.config.EnableLeaderElection && !sm.config.EnableServicesElection { + if sm.config.EnableBGP { + instance := sm.findServiceInstance(svc) + for _, vip := range instance.vipConfigs { + vipCidr := fmt.Sprintf("%s/%s", vip.VIP, vip.VIPCIDR) + err = sm.bgpServer.DelHost(vipCidr) + if err != nil { + log.Errorf("error deleting host %s: %s", vipCidr, err.Error()) + } + } + } else { + sm.clearRoutes(svc) + } + } + + log.Infof("(svcs) [%s/%s] has been deleted", svc.Namespace, svc.Name) + case watch.Bookmark: + // Un-used + case watch.Error: + log.Error("Error attempting to watch Kubernetes services") + + // This round trip allows us to handle unstructured status + errObject := apierrors.FromObject(event.Object) + statusErr, ok := errObject.(*apierrors.StatusError) + if !ok { + log.Errorf(spew.Sprintf("Received an error which is not *metav1.Status but %#+v", event.Object)) + } + + status := statusErr.ErrStatus + log.Errorf("services -> %v", status) + default: + } + } + close(exitFunction) + log.Warnln("Stopping watching services for type: LoadBalancer in all namespaces") + return nil +} diff --git a/pkg/manager/watcher_test.go b/pkg/manager/watcher_test.go new file mode 100644 index 00000000..69ace11b --- /dev/null +++ b/pkg/manager/watcher_test.go @@ -0,0 +1,145 @@ +package manager + +import ( + "reflect" + "testing" + + "github.com/kube-vip/kube-vip/pkg/bgp" + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestParseBgpAnnotations(t *testing.T) { + node := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Annotations: map[string]string{}}, + } + + bgpConfigBase := bgp.Config{ + HoldTime: 15, + KeepaliveInterval: 5, + } + _, _, err := parseBgpAnnotations(bgpConfigBase, node, "bgp") + if err == nil { + t.Fatal("Parsing BGP annotations should return an error when no annotations exist") + } + + node.Annotations = map[string]string{ + "bgp/node-asn": "65000", + "bgp/peer-asn": "64000", + "bgp/src-ip": "10.0.0.254", + } + + bgpConfig, bgpPeer, err := parseBgpAnnotations(bgpConfigBase, node, "bgp") + if err != nil { + t.Fatal("Parsing BGP annotations should return nil when minimum config is met") + } + + assert.Equal(t, uint32(65000), bgpConfig.AS, "bgpConfig.AS parsed incorrectly") + assert.Equal(t, uint32(64000), bgpPeer.AS, "bgpPeer.AS parsed incorrectly") + assert.Equal(t, "10.0.0.254", bgpConfig.RouterID, "bgpConfig.RouterID parsed incorrectly") + assert.EqualValues(t, 15, bgpConfig.HoldTime, "base bgpConfig.HoldTime should not be overwritten") + assert.EqualValues(t, 5, bgpConfig.KeepaliveInterval, "base bgpConfig.KeepaliveInterval should not be overwritten") + + node.Annotations = map[string]string{ + "bgp/node-asn": "65000", + "bgp/peer-asn": "64000", + "bgp/src-ip": "10.0.0.254", + "bgp/peer-ip": "10.0.0.1,10.0.0.2,10.0.0.3", + "bgp/bgp-pass": "cGFzc3dvcmQ=", // password + } + + bgpConfig, bgpPeer, err = parseBgpAnnotations(bgpConfigBase, node, "bgp") + if err != nil { + t.Fatal("Parsing BGP annotations should return nil when minimum config is met") + } + + bgpPeers := []bgp.Peer{ + {Address: "10.0.0.1", AS: uint32(64000), Password: "password"}, + {Address: "10.0.0.2", AS: uint32(64000), Password: "password"}, + {Address: "10.0.0.3", AS: uint32(64000), Password: "password"}, + } + assert.Equal(t, bgpPeers, bgpConfig.Peers, "bgpConfig.Peers parsed incorrectly") + assert.Equal(t, "10.0.0.3", bgpPeer.Address, "bgpPeer.Address parsed incorrectly") + assert.Equal(t, "password", bgpPeer.Password, "bgpPeer.Password parsed incorrectly") + assert.EqualValues(t, 15, bgpConfig.HoldTime, "base bgpConfig.HoldTime should not be overwritten") + assert.EqualValues(t, 5, bgpConfig.KeepaliveInterval, "base bgpConfig.KeepaliveInterval should not be overwritten") +} + +// Node, or local, ASN, default annotation metal.equinix.com/bgp-peers-{{n}}-node-asn +// Peer ASN, default annotation metal.equinix.com/bgp-peers-{{n}}-peer-asn +// Peer IP, default annotation metal.equinix.com/bgp-peers-{{n}}-peer-ip +// Source IP to use when communicating with peer, default annotation metal.equinix.com/bgp-peers-{{n}}-src-ip +// BGP password for peer, default annotation metal.equinix.com/bgp-peers-{{n}}-bgp-pass + +func TestParseNewBgpAnnotations(t *testing.T) { + node := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Annotations: map[string]string{}}, + } + + bgpConfigBase := bgp.Config{ + HoldTime: 15, + KeepaliveInterval: 5, + } + _, _, err := parseBgpAnnotations(bgpConfigBase, node, "bgp") + if err == nil { + t.Fatal("Parsing BGP annotations should return an error when no annotations exist") + } + + node.Annotations = map[string]string{ + "bgp/bgp-peers-0-node-asn": "65000", + "bgp/bgp-peers-0-peer-asn": "64000", + "bgp/bgp-peers-0-peer-ip": "10.0.0.1,10.0.0.2,10.0.0.3", + "bgp/bgp-peers-0-src-ip": "10.0.0.254", + "bgp/bgp-peers-0-bgp-pass": "cGFzc3dvcmQ=", // password + } + + bgpConfig, bgpPeer, err := parseBgpAnnotations(bgpConfigBase, node, "bgp") + if err != nil { + t.Fatalf("Parsing BGP annotations should return nil when minimum config is met [%v]", err) + } + + bgpPeers := []bgp.Peer{ + {Address: "10.0.0.1", AS: uint32(64000), Password: "password"}, + {Address: "10.0.0.2", AS: uint32(64000), Password: "password"}, + {Address: "10.0.0.3", AS: uint32(64000), Password: "password"}, + } + assert.Equal(t, bgpPeers, bgpConfig.Peers, "bgpConfig.Peers parsed incorrectly") + assert.Equal(t, "10.0.0.254", bgpConfig.SourceIP, "bgpConfig.SourceIP parsed incorrectly") + assert.Equal(t, "10.0.0.254", bgpConfig.RouterID, "bgpConfig.RouterID parsed incorrectly") + assert.Equal(t, "10.0.0.3", bgpPeer.Address, "bgpPeer.Address parsed incorrectly") + assert.Equal(t, "password", bgpPeer.Password, "bgpPeer.Password parsed incorrectly") + assert.EqualValues(t, 15, bgpConfig.HoldTime, "base bgpConfig.HoldTime should not be overwritten") + assert.EqualValues(t, 5, bgpConfig.KeepaliveInterval, "base bgpConfig.KeepaliveInterval should not be overwritten") +} + +func Test_parseBgpAnnotations(t *testing.T) { + type args struct { + node *corev1.Node + prefix string + } + tests := []struct { + name string + args args + want bgp.Config + want1 bgp.Peer + wantErr bool + }{ + // TODO: Add test cases. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, got1, err := parseBgpAnnotations(bgp.Config{}, tt.args.node, tt.args.prefix) + if (err != nil) != tt.wantErr { + t.Errorf("parseBgpAnnotations() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("parseBgpAnnotations() got = %v, want %v", got, tt.want) + } + if !reflect.DeepEqual(got1, tt.want1) { + t.Errorf("parseBgpAnnotations() got1 = %v, want %v", got1, tt.want1) + } + }) + } +} diff --git a/pkg/packet/eip.go b/pkg/packet/eip.go deleted file mode 100644 index e3696476..00000000 --- a/pkg/packet/eip.go +++ /dev/null @@ -1,51 +0,0 @@ -package packet - -import ( - "fmt" - "strings" - - "github.com/packethost/packngo" - "github.com/plunder-app/kube-vip/pkg/kubevip" - log "github.com/sirupsen/logrus" -) - -// AttachEIP will use the packet APIs to move an EIP and attach to a host -func AttachEIP(c *packngo.Client, k *kubevip.Config, hostname string) error { - - // Find our project - proj := findProject(k.PacketProject, c) - if proj == nil { - return fmt.Errorf("Unable to find Project [%s]", k.PacketProject) - } - - ips, _, _ := c.ProjectIPs.List(proj.ID, &packngo.ListOptions{}) - for _, ip := range ips { - - // Find the device id for our EIP - if ip.Address == k.VIP { - log.Infof("Found EIP ->%s ID -> %s\n", ip.Address, ip.ID) - // If attachements already exist then remove them - if len(ip.Assignments) != 0 { - hrefID := strings.Replace(ip.Assignments[0].Href, "/ips/", "", -1) - c.DeviceIPs.Unassign(hrefID) - } - } - } - - // Lookup this server through the packet API - thisDevice := findSelf(c, proj.ID) - if thisDevice == nil { - return fmt.Errorf("Unable to find local/this device in packet API") - } - - // Assign the EIP to this device - log.Infof("Assigning EIP to -> %s\n", thisDevice.Hostname) - _, _, err := c.DeviceIPs.Assign(thisDevice.ID, &packngo.AddressStruct{ - Address: k.VIP, - }) - if err != nil { - return err - } - - return nil -} diff --git a/pkg/packet/utils.go b/pkg/packet/utils.go deleted file mode 100644 index 1d624073..00000000 --- a/pkg/packet/utils.go +++ /dev/null @@ -1,36 +0,0 @@ -package packet - -import ( - "os" - - "github.com/packethost/packngo" - log "github.com/sirupsen/logrus" -) - -func findProject(project string, c *packngo.Client) *packngo.Project { - l := &packngo.ListOptions{Includes: []string{project}} - ps, _, err := c.Projects.List(l) - if err != nil { - log.Error(err) - } - for _, p := range ps { - - // Find our project - if p.Name == project { - return &p - } - } - return nil -} - -func findSelf(c *packngo.Client, projectID string) *packngo.Device { - // Go through devices - dev, _, _ := c.Devices.List(projectID, &packngo.ListOptions{}) - for _, d := range dev { - me, _ := os.Hostname() - if me == d.Hostname { - return &d - } - } - return nil -} diff --git a/pkg/service/manager.go b/pkg/service/manager.go deleted file mode 100644 index d9563672..00000000 --- a/pkg/service/manager.go +++ /dev/null @@ -1,289 +0,0 @@ -package service - -import ( - "context" - "encoding/json" - "fmt" - "io/ioutil" - "os" - "os/signal" - "path/filepath" - "strings" - "syscall" - "time" - - "github.com/davecgh/go-spew/spew" - dhclient "github.com/digineo/go-dhclient" - "github.com/plunder-app/kube-vip/pkg/cluster" - "github.com/plunder-app/kube-vip/pkg/kubevip" - log "github.com/sirupsen/logrus" - v1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/leaderelection/resourcelock" - watchtools "k8s.io/client-go/tools/watch" - - "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/clientcmd" - "k8s.io/client-go/tools/leaderelection" -) - -const plunderLock = "plunder-lock" - -// OutSideCluster allows the controller to be started using a local kubeConfig for testing -var OutSideCluster bool - -// EnableArp - determines the use of ARP broadcasts -var EnableArp bool - -// Interface - determines the interface that all Loadbalancers/macvlans will bind too -var Interface string - -type plndrServices struct { - Services []service `json:"services"` -} - -type dhcpService struct { - // dhcpClient (used DHCP for the vip) - dhcpClient *dhclient.Client - dhcpInterface string -} - -type serviceInstance struct { - // Virtual IP / Load Balancer configuration - vipConfig kubevip.Config - // Kubernetes service mapping - service service - // cluster instance - cluster cluster.Cluster - - // Custom settings - dhcp *dhcpService -} - -// TODO - call from a package (duplicated struct in the cloud-provider code) -type service struct { - Vip string `json:"vip"` - Port int `json:"port"` - UID string `json:"uid"` - Type string `json:"type"` - - ServiceName string `json:"serviceName"` -} - -// Manager degines the manager of the load-balancing services -type Manager struct { - clientSet *kubernetes.Clientset - configMap string - // Keeps track of all running instances - serviceInstances []serviceInstance -} - -// NewManager will create a new managing object -func NewManager(configMap string) (*Manager, error) { - var clientset *kubernetes.Clientset - if OutSideCluster == false { - // This will attempt to load the configuration when running within a POD - cfg, err := rest.InClusterConfig() - if err != nil { - return nil, fmt.Errorf("error creating kubernetes client config: %s", err.Error()) - } - clientset, err = kubernetes.NewForConfig(cfg) - - if err != nil { - return nil, fmt.Errorf("error creating kubernetes client: %s", err.Error()) - } - // use the current context in kubeconfig - } else { - config, err := clientcmd.BuildConfigFromFlags("", filepath.Join(os.Getenv("HOME"), ".kube", "config")) - if err != nil { - panic(err.Error()) - } - clientset, err = kubernetes.NewForConfig(config) - - if err != nil { - return nil, fmt.Errorf("error creating kubernetes client: %s", err.Error()) - } - } - - return &Manager{ - clientSet: clientset, - configMap: configMap, - }, nil -} - -// Start will begin the ConfigMap watcher -func (sm *Manager) Start() error { - - ns, err := returnNameSpace() - if err != nil { - return err - } - - id, err := os.Hostname() - if err != nil { - return err - } - - // Build a options structure to defined what we're looking for - listOptions := metav1.ListOptions{ - FieldSelector: fmt.Sprintf("metadata.name=%s", sm.configMap), - } - log.Infof("Beginning cluster membership, namespace [%s], lock name [%s], id [%s]", ns, plunderLock, id) - // we use the Lease lock type since edits to Leases are less common - // and fewer objects in the cluster watch "all Leases". - lock := &resourcelock.LeaseLock{ - LeaseMeta: metav1.ObjectMeta{ - Name: plunderLock, - Namespace: ns, - }, - Client: sm.clientSet.CoordinationV1(), - LockConfig: resourcelock.ResourceLockConfig{ - Identity: id, - }, - } - - // use a Go context so we can tell the leaderelection code when we - // want to step down - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - // listen for interrupts or the Linux SIGTERM signal and cancel - // our context, which the leader election code will observe and - // step down - signalChan := make(chan os.Signal, 1) - // Add Notification for Userland interrupt - signal.Notify(signalChan, syscall.SIGINT) - - // Add Notification for SIGTERM (sent from Kubernetes) - signal.Notify(signalChan, syscall.SIGTERM) - - // Add Notification for SIGKILL (sent from Kubernetes) - signal.Notify(signalChan, syscall.SIGKILL) - go func() { - <-signalChan - log.Info("Received termination, signaling shutdown") - // Cancel the context, which will in turn cancel the leadership - cancel() - }() - - // start the leader election code loop - leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{ - Lock: lock, - // IMPORTANT: you MUST ensure that any code you have that - // is protected by the lease must terminate **before** - // you call cancel. Otherwise, you could have a background - // loop still running and another process could - // get elected before your background loop finished, violating - // the stated goal of the lease. - ReleaseOnCancel: true, - LeaseDuration: 10 * time.Second, - RenewDeadline: 5 * time.Second, - RetryPeriod: 1 * time.Second, - Callbacks: leaderelection.LeaderCallbacks{ - OnStartedLeading: func(ctx context.Context) { - // we're notified when we start - - // Watch function - // Use a restartable watcher, as this should help in the event of etcd or timeout issues - rw, err := watchtools.NewRetryWatcher("1", &cache.ListWatch{ - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - return sm.clientSet.CoreV1().ConfigMaps(ns).Watch(context.TODO(), listOptions) - }, - }) - - if err != nil { - log.Errorf("error creating watcher: %s", err.Error()) - ctx.Done() - } - - ch := rw.ResultChan() - defer rw.Stop() - log.Infof("Beginning watching Kubernetes configMap [%s]", sm.configMap) - - var svcs plndrServices - //signalChan := make(chan os.Signal, 1) - //signal.Notify(signalChan, os.Interrupt) - go func() { - for event := range ch { - - // We need to inspect the event and get ResourceVersion out of it - switch event.Type { - case watch.Added, watch.Modified: - log.Debugf("ConfigMap [%s] has been Created or modified", sm.configMap) - cm, ok := event.Object.(*v1.ConfigMap) - if !ok { - log.Errorf("Unable to parse ConfigMap from watcher") - break - } - data := cm.Data["plndr-services"] - json.Unmarshal([]byte(data), &svcs) - log.Debugf("Found %d services defined in ConfigMap", len(svcs.Services)) - - err = sm.syncServices(&svcs) - if err != nil { - log.Errorf("%v", err) - } - case watch.Deleted: - log.Debugf("ConfigMap [%s] has been Deleted", sm.configMap) - - case watch.Bookmark: - // Un-used - case watch.Error: - log.Infoln("err") - - // This round trip allows us to handle unstructured status - errObject := apierrors.FromObject(event.Object) - statusErr, ok := errObject.(*apierrors.StatusError) - if !ok { - log.Fatalf(spew.Sprintf("Received an error which is not *metav1.Status but %#+v", event.Object)) - // Retry unknown errors - //return false, 0 - } - - status := statusErr.ErrStatus - log.Errorf("%v", status) - - default: - } - } - }() - - <-signalChan - }, - OnStoppedLeading: func() { - // we can do cleanup here - log.Infof("leader lost: %s", id) - for x := range sm.serviceInstances { - sm.serviceInstances[x].cluster.Stop() - } - }, - OnNewLeader: func(identity string) { - // we're notified when new leader elected - if identity == id { - // I just got the lock - return - } - log.Infof("new leader elected: %s", identity) - }, - }, - }) - - //<-signalChan - log.Infof("Shutting down Kube-Vip") - - return nil -} - -func returnNameSpace() (string, error) { - if data, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace"); err == nil { - if ns := strings.TrimSpace(string(data)); len(ns) > 0 { - return ns, nil - } - return "", err - } - return "", fmt.Errorf("Unable to find Namespace") -} diff --git a/pkg/service/services.go b/pkg/service/services.go deleted file mode 100644 index 5e8f193f..00000000 --- a/pkg/service/services.go +++ /dev/null @@ -1,237 +0,0 @@ -package service - -import ( - "context" - "fmt" - "net" - - dhclient "github.com/digineo/go-dhclient" - "github.com/plunder-app/kube-vip/pkg/cluster" - "github.com/plunder-app/kube-vip/pkg/kubevip" - log "github.com/sirupsen/logrus" - "github.com/vishvananda/netlink" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func (sm *Manager) stopService(uid string) error { - found := false - for x := range sm.serviceInstances { - if sm.serviceInstances[x].service.UID == uid { - found = true - sm.serviceInstances[x].cluster.Stop() - } - } - if found == false { - return fmt.Errorf("Unable to find/stop service [%s]", uid) - } - return nil -} - -func (sm *Manager) deleteService(uid string) error { - var updatedInstances []serviceInstance - found := false - for x := range sm.serviceInstances { - // Add the running services to the new array - if sm.serviceInstances[x].service.UID != uid { - updatedInstances = append(updatedInstances, sm.serviceInstances[x]) - } else { - // Flip the found when we match - found = true - if sm.serviceInstances[x].dhcp != nil { - sm.serviceInstances[x].dhcp.dhcpClient.Stop() - macvlan, err := netlink.LinkByName(sm.serviceInstances[x].dhcp.dhcpInterface) - if err != nil { - return fmt.Errorf("Error finding VIP Interface, for building DHCP Link : %v", err) - } - netlink.LinkDel(macvlan) - } - } - } - // If we've been through all services and not found the correct one then error - if found == false { - return fmt.Errorf("Unable to find/stop service [%s]", uid) - } - - // Update the service array - sm.serviceInstances = updatedInstances - - log.Debugf("Removed [%s] from manager, [%d] services remain", uid, len(sm.serviceInstances)) - - return nil -} - -func (sm *Manager) syncServices(s *plndrServices) error { - log.Debugf("[STARTING] Service Sync") - // Iterate through the synchronising services - for x := range s.Services { - foundInstance := false - for y := range sm.serviceInstances { - if s.Services[x].UID == sm.serviceInstances[y].service.UID { - // We have found this instance in the manager and we can update it - foundInstance = true - } - } - - // Generate new Virtual IP configuration - newVip := kubevip.Config{ - VIP: s.Services[x].Vip, - Interface: Interface, - SingleNode: true, - GratuitousARP: EnableArp, - } - - // This instance wasn't found, we need to add it to the manager - if foundInstance == false { - // Create new service - var newService serviceInstance - - // If this was purposely created with the address 0.0.0.0 then we will create a macvlan on the main interface and try DHCP - if s.Services[x].Vip == "0.0.0.0" { - - parent, err := netlink.LinkByName(Interface) - if err != nil { - return fmt.Errorf("Error finding VIP Interface, for building DHCP Link : %v", err) - } - - // Create macvlan - - // Generate name from UID - interfaceName := fmt.Sprintf("vip-%s", s.Services[x].UID[0:8]) - - mac := &netlink.Macvlan{LinkAttrs: netlink.LinkAttrs{Name: interfaceName, ParentIndex: parent.Attrs().Index}, Mode: netlink.MACVLAN_MODE_BRIDGE} - err = netlink.LinkAdd(mac) - if err != nil { - return fmt.Errorf("Could not add %s: %v", interfaceName, err) - } - - err = netlink.LinkSetUp(mac) - if err != nil { - return fmt.Errorf("Could not bring up interface [%s] : %v", interfaceName, err) - } - - iface, err := net.InterfaceByName(interfaceName) - if err != nil { - return fmt.Errorf("Error finding new DHCP interface by name [%v]", err) - } - - client := dhclient.Client{ - Iface: iface, - OnBound: func(lease *dhclient.Lease) { - - // Set VIP to Address from lease - newVip.VIP = lease.FixedAddress.String() - log.Infof("New VIP [%s] for [%s/%s] ", newVip.VIP, s.Services[x].ServiceName, s.Services[x].UID) - - // Generate Load Balancer configu - newLB := kubevip.LoadBalancer{ - Name: fmt.Sprintf("%s-load-balancer", s.Services[x].ServiceName), - Port: s.Services[x].Port, - Type: s.Services[x].Type, - BindToVip: true, - } - - // Add Load Balancer Configuration - newVip.LoadBalancers = append(newVip.LoadBalancers, newLB) - - // Create Add configuration to the new service - newService.vipConfig = newVip - newService.service = s.Services[x] - - // TODO - start VIP - c, err := cluster.InitCluster(&newService.vipConfig, false) - if err != nil { - log.Errorf("Failed to add Service [%s] / [%s]", newService.service.ServiceName, newService.service.UID) - //return err - } - err = c.StartSingleNode(&newService.vipConfig, false) - if err != nil { - log.Errorf("Failed to add Service [%s] / [%s]", newService.service.ServiceName, newService.service.UID) - //return err - } - newService.cluster = *c - - // Begin watching endpoints for this service - go sm.newWatcher(&newService) - - // Add new service to manager configuration - sm.serviceInstances = append(sm.serviceInstances, newService) - - // Update the service - // listOptions := metav1.ListOptions{ - // FieldSelector: fmt.Sprintf("metadata.uid=%s", newService.service.UID), - // } - ns, err := returnNameSpace() - if err != nil { - log.Errorf("Error finding Namespace") - return - } - dhcpService, err := sm.clientSet.CoreV1().Services(ns).Get(context.TODO(), newService.service.ServiceName, metav1.GetOptions{}) - if err != nil { - log.Errorf("Error finding Service [%s] : %v", newService.service.ServiceName, err) - return - } - dhcpService.Spec.LoadBalancerIP = newVip.VIP - _, err = sm.clientSet.CoreV1().Services(ns).Update(context.TODO(), dhcpService, metav1.UpdateOptions{}) - if err != nil { - log.Errorf("Error updating Service [%s] : %v", newService.service.ServiceName, err) - return - } - }, - } - - newService.dhcp = &dhcpService{ - dhcpClient: &client, - dhcpInterface: interfaceName, - } - - // Start the DHCP Client - newService.dhcp.dhcpClient.Start() - - // Change the interface name to our new DHCP macvlan interface - newVip.Interface = interfaceName - log.Infof("DHCP Interface and Client is up and active [%s]", interfaceName) - return nil - - } - - log.Infof("New VIP [%s] for [%s/%s] ", s.Services[x].Vip, s.Services[x].ServiceName, s.Services[x].UID) - - // Generate Load Balancer configu - newLB := kubevip.LoadBalancer{ - Name: fmt.Sprintf("%s-load-balancer", s.Services[x].ServiceName), - Port: s.Services[x].Port, - Type: s.Services[x].Type, - BindToVip: true, - } - - // Add Load Balancer Configuration - newVip.LoadBalancers = append(newVip.LoadBalancers, newLB) - - // Create Add configuration to the new service - newService.vipConfig = newVip - newService.service = s.Services[x] - - // TODO - start VIP - c, err := cluster.InitCluster(&newService.vipConfig, false) - if err != nil { - log.Errorf("Failed to add Service [%s] / [%s]", s.Services[x].ServiceName, s.Services[x].UID) - return err - } - err = c.StartSingleNode(&newService.vipConfig, false) - if err != nil { - log.Errorf("Failed to add Service [%s] / [%s]", s.Services[x].ServiceName, s.Services[x].UID) - return err - } - newService.cluster = *c - - // Begin watching endpoints for this service - go sm.newWatcher(&newService) - - // Add new service to manager configuration - sm.serviceInstances = append(sm.serviceInstances, newService) - } - } - log.Debugf("[COMPLETE] Service Sync") - - return nil -} diff --git a/pkg/service/watcher.go b/pkg/service/watcher.go deleted file mode 100644 index a15cd29c..00000000 --- a/pkg/service/watcher.go +++ /dev/null @@ -1,116 +0,0 @@ -package service - -import ( - "fmt" - - "github.com/davecgh/go-spew/spew" - "github.com/plunder-app/kube-vip/pkg/kubevip" - log "github.com/sirupsen/logrus" - "golang.org/x/net/context" - v1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/tools/cache" - watchtools "k8s.io/client-go/tools/watch" -) - -// This file handles the watching of a services endpoints and updates a load balancers endpoint configurations accordingly - -func (sm *Manager) newWatcher(s *serviceInstance) error { - // Build a options structure to defined what we're looking for - listOptions := metav1.ListOptions{ - FieldSelector: fmt.Sprintf("metadata.name=%s", s.service.ServiceName), - } - ns, err := returnNameSpace() - if err != nil { - return err - } - // Watch function - // Use a restartable watcher, as this should help in the event of etcd or timeout issues - rw, err := watchtools.NewRetryWatcher("1", &cache.ListWatch{ - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - return sm.clientSet.CoreV1().Endpoints(ns).Watch(context.TODO(), listOptions) - }, - }) - if err != nil { - return fmt.Errorf("error creating watcher: %s", err.Error()) - } - - ch := rw.ResultChan() - defer rw.Stop() - log.Infof("Beginning watching Kubernetes Endpoints for service [%s]", s.service.ServiceName) - - for event := range ch { - - // We need to inspect the event and get ResourceVersion out of it - switch event.Type { - case watch.Added, watch.Modified: - log.Debugf("Endpoints for service [%s] have been Created or modified", s.service.ServiceName) - ep, ok := event.Object.(*v1.Endpoints) - if !ok { - return fmt.Errorf("Unable to parse Endpoints from watcher") - } - s.vipConfig.LoadBalancers[0].Backends = rebuildEndpoints(*ep) - - log.Debugf("Load-Balancer updated with [%d] backends", len(s.vipConfig.LoadBalancers[0].Backends)) - case watch.Deleted: - log.Debugf("Endpoints for service [%s] have been Deleted", s.service.ServiceName) - log.Infof("Service [%s] has been deleted, stopping VIP", s.service.ServiceName) - // Stopping the service from running - sm.stopService(s.service.UID) - // Remove this service from the list of services we manage - sm.deleteService(s.service.UID) - return nil - case watch.Bookmark: - // Un-used - case watch.Error: - log.Infoln("err") - - // This round trip allows us to handle unstructured status - errObject := apierrors.FromObject(event.Object) - statusErr, ok := errObject.(*apierrors.StatusError) - if !ok { - log.Fatalf(spew.Sprintf("Received an error which is not *metav1.Status but %#+v", event.Object)) - // Retry unknown errors - //return false, 0 - } - - status := statusErr.ErrStatus - log.Errorf("%v", status) - default: - } - } - return nil -} - -func rebuildEndpoints(eps v1.Endpoints) []kubevip.BackEnd { - var addresses []string - var ports []int32 - - for x := range eps.Subsets { - // Loop over addresses - for y := range eps.Subsets[x].Addresses { - addresses = append(addresses, eps.Subsets[x].Addresses[y].IP) - } - for y := range eps.Subsets[x].Ports { - ports = append(ports, eps.Subsets[x].Ports[y].Port) - } - } - var newBackend []kubevip.BackEnd - - // Build endpoints - for x := range addresses { - for y := range ports { - // Print out Backends if debug logging is enabled - if log.GetLevel() == log.DebugLevel { - fmt.Printf("-> Address: %s:%d \n", addresses[x], ports[y]) - } - newBackend = append(newBackend, kubevip.BackEnd{ - Address: addresses[x], - Port: int(ports[y]), - }) - } - } - return newBackend -} diff --git a/pkg/vip/address.go b/pkg/vip/address.go index f4a716e1..93b2574b 100644 --- a/pkg/vip/address.go +++ b/pkg/vip/address.go @@ -1,79 +1,323 @@ package vip import ( + "fmt" + "os" + "strconv" + "strings" "sync" "github.com/pkg/errors" + log "github.com/sirupsen/logrus" "github.com/vishvananda/netlink" + "golang.org/x/sys/unix" + v1 "k8s.io/api/core/v1" + + "github.com/kube-vip/kube-vip/pkg/iptables" ) const ( - defaultValidLft = 60 + defaultValidLft = 60 + iptablesComment = "%s kube-vip load balancer IP" + ignoreServiceSecurityAnnotation = "kube-vip.io/ignore-service-security" ) // Network is an interface that enable managing operations for a given IP type Network interface { AddIP() error + AddRoute() error DeleteIP() error + DeleteRoute() error IsSet() (bool, error) IP() string SetIP(ip string) error + SetServicePorts(service *v1.Service) Interface() string + IsDADFAIL() bool + IsDNS() bool + IsDDNS() bool + DDNSHostName() string + DNSName() string } // network - This allows network configuration type network struct { mu sync.Mutex - address *netlink.Addr - link netlink.Link - isDNS bool -} + address *netlink.Addr + link netlink.Link + ports []v1.ServicePort + serviceName string + ignoreSecurity bool -// NewConfig will attempt to provide an interface to the kernel network configuration -func NewConfig(address string, iface string) (Network, error) { - result := &network{} + dnsName string + isDDNS bool + + routeTable int + routingTableType int +} - link, err := netlink.LinkByName(iface) +func netlinkParse(addr string) (*netlink.Addr, error) { + mask, err := GetFullMask(addr) if err != nil { - return result, errors.Wrapf(err, "could not get link for interface '%s'", iface) + return nil, err } - result.link = link + return netlink.ParseAddr(addr + mask) +} + +// NewConfig will attempt to provide an interface to the kernel network configuration +func NewConfig(address string, iface string, subnet string, isDDNS bool, tableID int, tableType int, dnsMode string) ([]Network, error) { + networks := []Network{} if IsIP(address) { - result.address, err = netlink.ParseAddr(address + "/32") + result := &network{} + + link, err := netlink.LinkByName(iface) if err != nil { - return result, errors.Wrapf(err, "could not parse address '%s'", address) + return networks, errors.Wrapf(err, "could not get link for interface '%s'", iface) + } + + result.link = link + result.routeTable = tableID + result.routingTableType = tableType + + // Check if the subnet needs overriding + if subnet != "" { + result.address, err = netlink.ParseAddr(address + subnet) + if err != nil { + return networks, errors.Wrapf(err, "could not parse address '%s'", address) + } + } else { + result.address, err = netlinkParse(address) + if err != nil { + return networks, errors.Wrapf(err, "could not parse address '%s'", address) + } + } + // Ensure we don't have a global address on loopback + if iface == "lo" { + result.address.Scope = unix.RT_SCOPE_HOST + } + networks = append(networks, result) + } else { + // try to resolve the address + ips, err := LookupHost(address, dnsMode) + if err != nil { + // return early for ddns if no IP is allocated for the domain + // when leader starts, should do get IP from DHCP for the domain + if isDDNS { + return networks, nil + } + return nil, err + } + + for _, ip := range ips { + + result := &network{} + + link, err := netlink.LinkByName(iface) + if err != nil { + return networks, errors.Wrapf(err, "could not get link for interface '%s'", iface) + } + + result.link = link + result.routeTable = tableID + result.routingTableType = tableType + + // address is DNS + result.isDDNS = isDDNS + result.dnsName = address + + // we're able to resolve store this as the initial IP + if result.address, err = netlinkParse(ip); err != nil { + return networks, err + } + // set ValidLft so that the VIP expires if the DNS entry is updated, otherwise it'll be refreshed by the DNS prober + result.address.ValidLft = defaultValidLft + + networks = append(networks, result) } - return result, nil - } - // try to resolve the address - ip, err := lookupHost(address) - result.isDNS = err == nil - if err != nil { - return nil, err } - // we're able to resolve store this as the initial IP - if result.address, err = netlink.ParseAddr(ip + "/32"); err != nil { - return result, err + return networks, nil +} + +// AddRoute - Add an IP address to a route table +func (configurator *network) AddRoute() error { + routeScope := netlink.SCOPE_UNIVERSE + if configurator.routingTableType == unix.RTN_LOCAL { + routeScope = netlink.SCOPE_LINK + } + route := &netlink.Route{ + Scope: routeScope, + Dst: configurator.address.IPNet, + LinkIndex: configurator.link.Attrs().Index, + Table: configurator.routeTable, + Type: configurator.routingTableType, } - // set ValidLft so that the VIP expires if the DNS entry is updated, otherwise it'll be refreshed by the DNS prober - result.address.ValidLft = defaultValidLft + return netlink.RouteAdd(route) +} - return result, err +// DeleteRoute - Delete an IP address from a route table +func (configurator *network) DeleteRoute() error { + routeScope := netlink.SCOPE_UNIVERSE + if configurator.routingTableType == unix.RTN_LOCAL { + routeScope = netlink.SCOPE_LINK + } + route := &netlink.Route{ + Scope: routeScope, + Dst: configurator.address.IPNet, + LinkIndex: configurator.link.Attrs().Index, + Table: configurator.routeTable, + Type: configurator.routingTableType, + } + return netlink.RouteDel(route) } -//AddIP - Add an IP address to the interface +// AddIP - Add an IP address to the interface func (configurator *network) AddIP() error { if err := netlink.AddrReplace(configurator.link, configurator.address); err != nil { return errors.Wrap(err, "could not add ip") } + + if os.Getenv("enable_service_security") == "true" && !configurator.ignoreSecurity { + if err := configurator.addIptablesRulesToLimitTrafficPorts(); err != nil { + return errors.Wrap(err, "could not add iptables rules to limit traffic ports") + } + } + + return nil +} + +func (configurator *network) addIptablesRulesToLimitTrafficPorts() error { + ipt, err := iptables.New() + if err != nil { + return errors.Wrap(err, "could not create iptables client") + } + + vip := configurator.address.IP.String() + comment := fmt.Sprintf(iptablesComment, configurator.serviceName) + if err := insertCommonIPTablesRules(ipt, vip, comment); err != nil { + return fmt.Errorf("could not add common iptables rules: %w", err) + } + log.Debugf("add iptables rules, vip: %s, ports: %+v", vip, configurator.ports) + if err := configurator.insertIPTablesRulesForServicePorts(ipt, vip, comment); err != nil { + return fmt.Errorf("could not add iptables rules for service ports: %v", err) + } + return nil } -//DeleteIP - Remove an IP address from the interface +func (configurator *network) insertIPTablesRulesForServicePorts(ipt *iptables.IPTables, vip, comment string) error { + isPortsRuleExisting := make([]bool, len(configurator.ports)) + + // delete rules of ports that are not in the service + rules, err := ipt.List(iptables.TableFilter, iptables.ChainInput) + if err != nil { + return fmt.Errorf("could not list iptables rules: %w", err) + } + for _, rule := range rules { + // only handle rules with kube-vip comment + if iptables.GetIPTablesRuleSpecification(rule, "--comment") != comment { + continue + } + // if the rule is not for the vip, delete it + if iptables.GetIPTablesRuleSpecification(rule, "-d") != vip { + if err := ipt.Delete(iptables.TableFilter, iptables.ChainInput, rule); err != nil { + return fmt.Errorf("could not delete iptables rule: %w", err) + } + } + + protocol := iptables.GetIPTablesRuleSpecification(rule, "-p") + port := iptables.GetIPTablesRuleSpecification(rule, "--dport") + // ignore DHCP client port + if protocol == string(v1.ProtocolUDP) && port == dhcpClientPort { + continue + } + // if the rule is for the vip, but its protocol and port are not in the service, delete it + toBeDeleted := true + for i, p := range configurator.ports { + if string(p.Protocol) == protocol && strconv.Itoa(int(p.Port)) == port { + // the rule is for the vip and its protocol and port are in the service, keep it and mark it as existing + toBeDeleted = false + isPortsRuleExisting[i] = true + } + } + if toBeDeleted { + if err := ipt.Delete(iptables.TableFilter, iptables.ChainInput, strings.Split(rule, "")...); err != nil { + return fmt.Errorf("could not delete iptables rule: %w", err) + } + } + } + // add rules of ports that are not existing + // iptables -A INPUT -d -p --dport -j ACCEPT -m comment β€”comment β€œ kube-vip load balancer IP” + for i, ok := range isPortsRuleExisting { + if !ok { + if err := ipt.InsertUnique(iptables.TableFilter, iptables.ChainInput, 1, "-d", vip, "-p", + string(configurator.ports[i].Protocol), "--dport", strconv.Itoa(int(configurator.ports[i].Port)), + "-m", "comment", "--comment", comment, "-j", "ACCEPT"); err != nil { + return fmt.Errorf("could not add iptables rule to accept the traffic to VIP %s for allowed "+ + "port %d: %v", vip, configurator.ports[i].Port, err) + } + } + } + + return nil +} + +func insertCommonIPTablesRules(ipt *iptables.IPTables, vip, comment string) error { + if err := ipt.InsertUnique(iptables.TableFilter, iptables.ChainInput, 1, "-d", vip, "-p", + string(v1.ProtocolUDP), "--dport", dhcpClientPort, "-m", "comment", "--comment", comment, "-j", "ACCEPT"); err != nil { + return fmt.Errorf("could not add iptables rule to accept the traffic to VIP %s for DHCP client port: %w", vip, err) + } + // add rule to drop the traffic to VIP that is not allowed + // iptables -A INPUT -d -j DROP + if err := ipt.InsertUnique(iptables.TableFilter, iptables.ChainInput, 2, "-d", vip, "-m", + "comment", "--comment", comment, "-j", "DROP"); err != nil { + return fmt.Errorf("could not add iptables rule to drop the traffic to VIP %s: %v", vip, err) + } + return nil +} + +func deleteCommonIPTablesRules(ipt *iptables.IPTables, vip, comment string) error { + if err := ipt.DeleteIfExists(iptables.TableFilter, iptables.ChainInput, "-d", vip, "-p", + string(v1.ProtocolUDP), "--dport", dhcpClientPort, "-m", "comment", "--comment", comment, "-j", "ACCEPT"); err != nil { + return fmt.Errorf("could not delete iptables rule to accept the traffic to VIP %s for DHCP client port: %w", vip, err) + } + // add rule to drop the traffic to VIP that is not allowed + // iptables -A INPUT -d -j DROP + if err := ipt.DeleteIfExists(iptables.TableFilter, iptables.ChainInput, "-d", vip, "-m", "comment", + "--comment", comment, "-j", "DROP"); err != nil { + return fmt.Errorf("could not delete iptables rule to drop the traffic to VIP %s: %v", vip, err) + } + return nil +} + +func (configurator *network) removeIptablesRuleToLimitTrafficPorts() error { + ipt, err := iptables.New() + if err != nil { + return errors.Wrap(err, "could not create iptables client") + } + vip := configurator.address.IP.String() + comment := fmt.Sprintf(iptablesComment, configurator.serviceName) + + if err := deleteCommonIPTablesRules(ipt, vip, comment); err != nil { + return fmt.Errorf("could not delete common iptables rules: %w", err) + } + + log.Debugf("remove iptables rules, vip: %s, ports: %+v", vip, configurator.ports) + for _, port := range configurator.ports { + // iptables -D INPUT -d -p --dport -j ACCEPT + if err := ipt.DeleteIfExists(iptables.TableFilter, iptables.ChainInput, "-d", vip, "-p", string(port.Protocol), + "--dport", strconv.Itoa(int(port.Port)), "-m", "comment", "--comment", comment, "-j", "ACCEPT"); err != nil { + return fmt.Errorf("could not delete iptables rule to accept the traffic to VIP %s for allowed port %d: %v", vip, port.Port, err) + } + } + + return nil +} + +// DeleteIP - Remove an IP address from the interface func (configurator *network) DeleteIP() error { result, err := configurator.IsSet() if err != nil { @@ -89,13 +333,49 @@ func (configurator *network) DeleteIP() error { return errors.Wrap(err, "could not delete ip") } + if os.Getenv("enable_service_security") == "true" && !configurator.ignoreSecurity { + if err := configurator.removeIptablesRuleToLimitTrafficPorts(); err != nil { + return errors.Wrap(err, "could not remove iptables rules to limit traffic ports") + } + } + return nil } +// IsDADFAIL - Returns true if the address is IPv6 and has DADFAILED flag +func (configurator *network) IsDADFAIL() bool { + if configurator.address == nil || !IsIPv6(configurator.address.IP.String()) { + return false + } + + // Get all the address + addresses, err := netlink.AddrList(configurator.link, netlink.FAMILY_V6) + if err != nil { + return false + } + + // Find the VIP and check if it is DADFAILED + for _, address := range addresses { + if address.IP.Equal(configurator.address.IP) && addressHasDADFAILEDFlag(address) { + return true + } + } + + return false +} + +func addressHasDADFAILEDFlag(address netlink.Addr) bool { + return address.Flags&unix.IFA_F_DADFAILED != 0 +} + // IsSet - Check to see if VIP is set func (configurator *network) IsSet() (result bool, err error) { var addresses []netlink.Addr + if configurator.address == nil { + return false, nil + } + addresses, err = netlink.AddrList(configurator.link, 0) if err != nil { err = errors.Wrap(err, "could not list addresses") @@ -117,17 +397,28 @@ func (configurator *network) SetIP(ip string) error { configurator.mu.Lock() defer configurator.mu.Unlock() - addr, err := netlink.ParseAddr(ip + "/32") + addr, err := netlinkParse(ip) if err != nil { return err } - if configurator.address != nil && configurator.isDNS { + if configurator.address != nil && configurator.IsDNS() { addr.ValidLft = defaultValidLft } configurator.address = addr return nil } +// SetServicePorts updates the service ports from the service +// If you want to limit traffic to the VIP to only the service ports, add service ports to the network firstly. +func (configurator *network) SetServicePorts(service *v1.Service) { + configurator.mu.Lock() + defer configurator.mu.Unlock() + + configurator.ports = service.Spec.Ports + configurator.serviceName = service.Namespace + "/" + service.Name + configurator.ignoreSecurity = service.Annotations[ignoreServiceSecurityAnnotation] == "true" +} + // IP - return the IP Address func (configurator *network) IP() string { configurator.mu.Lock() @@ -136,7 +427,59 @@ func (configurator *network) IP() string { return configurator.address.IP.String() } +// DNSName return the configured dnsName when use DNS +func (configurator *network) DNSName() string { + return configurator.dnsName +} + +// IsDNS - when dnsName is configured +func (configurator *network) IsDNS() bool { + return configurator.dnsName != "" +} + +// IsDDNS - return true if use dynamic dns +func (configurator *network) IsDDNS() bool { + return configurator.isDDNS +} + +// DDNSHostName - return the hostname for dynamic dns +// when dDNSHostName is not empty, use DHCP to get IP for hostname: dDNSHostName +// it's expected that dynamic DNS should be configured so +// the fqdn for apiserver endpoint is dDNSHostName.{LocalDomain} +func (configurator *network) DDNSHostName() string { + return getHostName(configurator.dnsName) +} + // Interface - return the Interface name func (configurator *network) Interface() string { return configurator.link.Attrs().Name } + +func GarbageCollect(adapter, address string) (found bool, err error) { + + // Get adapter + link, err := netlink.LinkByName(adapter) + if err != nil { + return true, errors.Wrapf(err, "could not get link for interface '%s'", adapter) + } + + // Get addresses on adapter + addrs, err := netlink.AddrList(link, netlink.FAMILY_ALL) + if err != nil { + return false, err + } + + // Compare all addresses to new service address, and remove if needed + for _, existing := range addrs { + if existing.IP.String() == address { + // We've found the existing address + found = true + // linting issue + existing := existing + if err = netlink.AddrDel(link, &existing); err != nil { + return true, errors.Wrap(err, "could not delete ip") + } + } + } + return // Didn't find the address on the adapter +} diff --git a/pkg/vip/arp.go b/pkg/vip/arp.go index a8b1642f..7a8cee2f 100644 --- a/pkg/vip/arp.go +++ b/pkg/vip/arp.go @@ -1,6 +1,7 @@ +//go:build linux // +build linux -// These syscalls are only supported on Linux, so this uses a build directive during compilation. Other OS's will use the arp_unsupported.go and recieve an error +// These syscalls are only supported on Linux, so this uses a build directive during compilation. Other OS's will use the arp_unsupported.go and receive an error package vip @@ -11,8 +12,6 @@ import ( "net" "syscall" "unsafe" - - log "github.com/sirupsen/logrus" ) const ( @@ -136,7 +135,7 @@ func sendARP(iface *net.Interface, m *arpMessage) error { Halen: m.hardwareAddressLength, } target := ethernetBroadcast - for i := 0; i < len(target); i++ { + for i := 0; i < len(target); i++ { //nolint ll.Addr[i] = target[i] } @@ -167,7 +166,7 @@ func ARPSendGratuitous(address, ifaceName string) error { return fmt.Errorf("failed to parse address %s", ip) } - log.Infof("Broadcasting ARP update for %s (%s) via %s", address, iface.HardwareAddr, iface.Name) + // This is a debug message, enable debugging to ensure that the gratuitous arp is repeating m, err := gratuitousARP(ip, iface.HardwareAddr) if err != nil { return err diff --git a/pkg/vip/arp_unsupported.go b/pkg/vip/arp_unsupported.go index 5d732582..c6230b53 100644 --- a/pkg/vip/arp_unsupported.go +++ b/pkg/vip/arp_unsupported.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package vip diff --git a/pkg/vip/ddns.go b/pkg/vip/ddns.go new file mode 100644 index 00000000..7ff9f701 --- /dev/null +++ b/pkg/vip/ddns.go @@ -0,0 +1,83 @@ +package vip + +import ( + "context" + "net" + "time" + + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" +) + +// DDNSManager will start a dhclient to retrieve and keep the lease for the IP +// for the dDNSHostName +// will return the IP allocated +type DDNSManager interface { + Start() (string, error) +} + +type ddnsManager struct { + ctx context.Context + network Network +} + +// NewDDNSManager returns a newly created Dynamic DNS manager +func NewDDNSManager(ctx context.Context, network Network) DDNSManager { + return &ddnsManager{ + ctx: ctx, + network: network, + } +} + +// Start will start the dhcpclient routine to keep the lease +// and return the IP it got from DHCP +func (ddns *ddnsManager) Start() (string, error) { + interfaceName := ddns.network.Interface() + iface, err := net.InterfaceByName(interfaceName) + if err != nil { + return "", err + } + + client := NewDHCPClient(iface, false, "") + + client.WithHostName(ddns.network.DDNSHostName()) + + go client.Start() + + log.Info("waiting for ip from dhcp") + ip, timeout := "", time.After(1*time.Minute) + + select { + case <-ddns.ctx.Done(): + client.Stop() + return "", errors.New("context cancelled") + case <-timeout: + client.Stop() + return "", errors.New("failed to get IP from dhcp for ddns in 1 minutes") + case ip = <-client.IPChannel(): + log.Info("got ip from dhcp: ", ip) + } + + // lease.FixedAddress.String() could return + if ip == "" { + return "", errors.New("failed to get IP from dhcp for ddns, got ip as ") + } + + // start a go routine to stop dhclient when lose leader election + // also to keep read the ip from channel + // so onbound function is unblocked to send the ip + go func(ctx context.Context) { + for { + select { + case <-ctx.Done(): + log.Info("stop dhclient for ddns") + client.Stop() + return + case ip := <-client.IPChannel(): + log.Info("got ip from dhcp: ", ip) + } + } + }(ddns.ctx) + + return ip, nil +} diff --git a/pkg/vip/dhcp.go b/pkg/vip/dhcp.go new file mode 100644 index 00000000..78703683 --- /dev/null +++ b/pkg/vip/dhcp.go @@ -0,0 +1,288 @@ +package vip + +// DHCP client implementation that refers to https://www.rfc-editor.org/rfc/rfc2131.html + +import ( + "context" + "fmt" + "net" + "time" + + "github.com/insomniacslk/dhcp/dhcpv4" + "github.com/insomniacslk/dhcp/dhcpv4/nclient4" + "github.com/jpillora/backoff" + log "github.com/sirupsen/logrus" +) + +const dhcpClientPort = "68" +const defaultDHCPRenew = time.Hour +const maxBackoffAttempts = 3 + +// DHCPClient is responsible for maintaining ipv4 lease for one specified interface +type DHCPClient struct { + iface *net.Interface + ddnsHostName string + lease *nclient4.Lease + initRebootFlag bool + requestedIP net.IP + stopChan chan struct{} // used as a signal to release the IP and stop the dhcp client daemon + releasedChan chan struct{} // indicate that the IP has been released + errorChan chan error // indicates there was an error on the IP request + ipChan chan string +} + +// NewDHCPClient returns a new DHCP Client. +func NewDHCPClient(iface *net.Interface, initRebootFlag bool, requestedIP string) *DHCPClient { + return &DHCPClient{ + iface: iface, + stopChan: make(chan struct{}), + releasedChan: make(chan struct{}), + errorChan: make(chan error), + initRebootFlag: initRebootFlag, + requestedIP: net.ParseIP(requestedIP), + ipChan: make(chan string), + } +} + +func (c *DHCPClient) WithHostName(hostname string) *DHCPClient { + c.ddnsHostName = hostname + return c +} + +// Stop state-transition process and close dhcp client +func (c *DHCPClient) Stop() { + close(c.ipChan) + close(c.stopChan) + <-c.releasedChan +} + +// Gets the IPChannel for consumption +func (c *DHCPClient) IPChannel() chan string { + return c.ipChan +} + +// Gets the ErrorChannel for consumption +func (c *DHCPClient) ErrorChannel() chan error { + return c.errorChan +} + +// Start state-transition process of dhcp client +// +// -------- ------- +// +// | | +-------------------------->| |<-------------------+ +// | INIT- | | +-------------------->| INIT | | +// | REBOOT |DHCPNAK/ +---------->| |<---+ | +// | |Restart| | ------- | | +// +// -------- | DHCPNAK/ | | | +// | Discard offer | -/Send DHCPDISCOVER | +// +// -/Send DHCPREQUEST | | | +// +// | | | DHCPACK v | | +// ----------- | (not accept.)/ ----------- | | +// +// | | | Send DHCPDECLINE | | | +// | REBOOTING | | | | SELECTING |<----+ | +// | | | / | | |DHCPOFFER/ | +// +// ----------- | / ----------- | |Collect | +// | | / | | | replies | +// +// DHCPACK/ | / +----------------+ +-------+ | +// Record lease, set| | v Select offer/ | +// timers T1, T2 ------------ send DHCPREQUEST | | +// +// | +----->| | DHCPNAK, Lease expired/ | +// | | | REQUESTING | Halt network | +// DHCPOFFER/ | | | | +// Discard ------------ | | +// | | | | ----------- | +// | +--------+ DHCPACK/ | | | +// | Record lease, set -----| REBINDING | | +// | timers T1, T2 / | | | +// | | DHCPACK/ ----------- | +// | v Record lease, set ^ | +// +----------------> ------- /timers T1,T2 | | +// +----->| |<---+ | | +// | | BOUND |<---+ | | +// DHCPOFFER, DHCPACK, | | | T2 expires/ DHCPNAK/ +// DHCPNAK/Discard ------- | Broadcast Halt network +// | | | | DHCPREQUEST | +// +-------+ | DHCPACK/ | | +// T1 expires/ Record lease, set | | +// Send DHCPREQUEST timers T1, T2 | | +// to leasing server | | | +// | ---------- | | +// | | |------------+ | +// +->| RENEWING | | +// | |----------------------------+ +// ---------- +// Figure: State-transition diagram for DHCP clients +func (c *DHCPClient) Start() { + lease := c.requestWithBackoff() + + c.initRebootFlag = false + c.lease = lease + + // Set up two ticker to renew/rebind regularly + t1Timeout := c.lease.ACK.IPAddressLeaseTime(defaultDHCPRenew) / 2 + t2Timeout := (c.lease.ACK.IPAddressLeaseTime(defaultDHCPRenew) / 8) * 7 + log.Debugf("t1 %v t2 %v", t1Timeout, t2Timeout) + t1, t2 := time.NewTicker(t1Timeout), time.NewTicker(t2Timeout) + + for { + select { + case <-t1.C: + // renew is a unicast request of the IP renewal + // A point on renew is: the library does not return the right message (NAK) + // on renew error due to IP Change, but instead it returns a different error + // This way there's not much to do other than log and continue, as the renew error + // may be an offline server, or may be an incorrect package match + lease, err := c.renew() + if err == nil { + c.lease = lease + log.Infof("renew, lease: %+v", lease) + t2.Reset(t2Timeout) + } else { + log.Errorf("renew failed, error: %s", err.Error()) + } + case <-t2.C: + // rebind is just like a request, but forcing to provide a new IP address + lease, err := c.request(true) + if err == nil { + c.lease = lease + log.Infof("rebind, lease: %+v", lease) + } else { + if _, ok := err.(*nclient4.ErrNak); !ok { + t1.Stop() + t2.Stop() + log.Errorf("rebind failed, error: %s", err.Error()) + return + } + log.Warnf("ip %s may have changed: %s", c.lease.ACK.YourIPAddr, err.Error()) + c.initRebootFlag = false + c.lease = c.requestWithBackoff() + } + t1.Reset(t1Timeout) + t2.Reset(t2Timeout) + + case <-c.stopChan: + // release is a unicast request of the IP release. + if err := c.release(); err != nil { + log.Errorf("release lease failed, error: %s, lease: %+v", err.Error(), c.lease) + } else { + log.Infof("release, lease: %+v", c.lease) + } + t1.Stop() + t2.Stop() + + close(c.releasedChan) + return + } + } +} + +// -------------------------------------------------------- +// | |INIT-REBOOT | RENEWING |REBINDING | +// -------------------------------------------------------- +// |broad/unicast |broadcast | unicast |broadcast | +// |server-ip |MUST NOT | MUST NOT |MUST NOT | +// |requested-ip |MUST | MUST NOT |MUST NOT | +// |ciaddr |zero | IP address |IP address| +// -------------------------------------------------------- + +func (c *DHCPClient) requestWithBackoff() *nclient4.Lease { + backoff := backoff.Backoff{ + Factor: 2, + Jitter: true, + Min: 10 * time.Second, + Max: 1 * time.Minute, + } + + var lease *nclient4.Lease + var err error + + for { + log.Debugf("trying to get a new IP, attempt %f", backoff.Attempt()) + lease, err = c.request(false) + if err != nil { + dur := backoff.Duration() + if backoff.Attempt() > maxBackoffAttempts-1 { + errMsg := fmt.Errorf("failed to get an IP address after %d attempts, error %s, giving up", maxBackoffAttempts, err.Error()) + log.Error(errMsg) + c.errorChan <- errMsg + c.Stop() + return nil + } + log.Errorf("request failed, error: %s (waiting %v)", err.Error(), dur) + time.Sleep(dur) + continue + } + backoff.Reset() + break + } + + if c.ipChan != nil { + log.Debugf("using channel") + c.ipChan <- lease.ACK.YourIPAddr.String() + } + + return lease +} + +func (c *DHCPClient) request(rebind bool) (*nclient4.Lease, error) { + dhclient, err := nclient4.New(c.iface.Name) + if err != nil { + return nil, fmt.Errorf("create a client for iface %s failed, error: %w", c.iface.Name, err) + } + + defer dhclient.Close() + + modifiers := make([]dhcpv4.Modifier, 0) + + if c.ddnsHostName != "" { + modifiers = append(modifiers, + dhcpv4.WithOption(dhcpv4.OptHostName(c.ddnsHostName)), + dhcpv4.WithOption(dhcpv4.OptClientIdentifier([]byte(c.ddnsHostName))), + ) + } + + // if initRebootFlag is set, this means we have an IP already set on c.requestedIP that should be used + if c.initRebootFlag { + log.Debugf("init-reboot ip %s", c.requestedIP) + modifiers = append(modifiers, dhcpv4.WithOption(dhcpv4.OptRequestedIPAddress(c.requestedIP))) + } + + // if this is a rebind, then the IP we should set is the one that already exists in lease + if rebind { + log.Debugf("rebinding ip %s", c.lease.ACK.YourIPAddr) + modifiers = append(modifiers, dhcpv4.WithOption(dhcpv4.OptRequestedIPAddress(c.lease.ACK.YourIPAddr))) + } + + return dhclient.Request(context.TODO(), modifiers...) +} + +func (c *DHCPClient) release() error { + dhclient, err := nclient4.New(c.iface.Name) + if err != nil { + return fmt.Errorf("create release client failed, error: %w, iface: %s, server ip: %v", err, c.iface.Name, c.lease.ACK.ServerIPAddr) + } + defer dhclient.Close() + + // TODO modify lease + return dhclient.Release(c.lease) +} + +func (c *DHCPClient) renew() (*nclient4.Lease, error) { + // renew needs a unicast client. This is due to some servers (like dnsmasq) require the exact request coming from the vip interface + dhclient, err := nclient4.New(c.iface.Name, + nclient4.WithUnicast(&net.UDPAddr{IP: c.lease.ACK.YourIPAddr, Port: nclient4.ClientPort})) + if err != nil { + return nil, fmt.Errorf("create renew client failed, error: %w, server ip: %v", err, c.lease.ACK.ServerIPAddr) + } + defer dhclient.Close() + + return dhclient.Renew(context.TODO(), c.lease) +} diff --git a/pkg/vip/dns.go b/pkg/vip/dns.go index 17f1f419..fbaaa121 100644 --- a/pkg/vip/dns.go +++ b/pkg/vip/dns.go @@ -13,15 +13,13 @@ type IPUpdater interface { } type ipUpdater struct { - dnsName string - vip Network + vip Network } // NewIPUpdater creates a DNSUpdater -func NewIPUpdater(dnsName string, vip Network) IPUpdater { +func NewIPUpdater(vip Network) IPUpdater { return &ipUpdater{ - dnsName: dnsName, - vip: vip, + vip: vip, } } @@ -31,18 +29,29 @@ func (d *ipUpdater) Run(ctx context.Context) { for { select { case <-ctx.Done(): + log.Infof("stop ipUpdater") return default: - ip, err := lookupHost(d.dnsName) + mode := "ipv4" + if IsIPv6(d.vip.IP()) { + mode = "ipv6" + } + + ip, err := LookupHost(d.vip.DNSName(), mode) if err != nil { - log.Warnf("cannot lookup %s: %v", d.dnsName, err) + log.Warnf("cannot lookup %s: %v", d.vip.DNSName(), err) // fallback to renewing the existing IP - ip = d.vip.IP() + ip = []string{d.vip.IP()} } log.Infof("setting %s as an IP", ip) - d.vip.SetIP(ip) - d.vip.AddIP() + if err := d.vip.SetIP(ip[0]); err != nil { + log.Errorf("setting %s as an IP: %v", ip, err) + } + + if err := d.vip.AddIP(); err != nil { + log.Errorf("error adding virtual IP: %v", err) + } } time.Sleep(3 * time.Second) diff --git a/pkg/vip/egress.go b/pkg/vip/egress.go new file mode 100644 index 00000000..9ce34817 --- /dev/null +++ b/pkg/vip/egress.go @@ -0,0 +1,280 @@ +package vip + +import ( + "fmt" + "strings" + + iptables "github.com/kube-vip/kube-vip/pkg/iptables" + log "github.com/sirupsen/logrus" + + ct "github.com/florianl/go-conntrack" +) + +//Notes: https://github.com/cloudnativelabs/kube-router/issues/434 + +// This file contains all of the functions related to changing SNAT for a +// pod so that it appears to be coming from a VIP. + +// 1. Create a new chain in the mangle table +// 2. Ignore (or RETURN) packets going to a service or other pod address +// 3. Mark packets coming from a pod +// 4. Add a rule in the mangle chain PREROUTING to jump to the new chain created above +// 5. Mark packets going through this host (not originating) (might not be needed) +// 6. Perform source nating on marked packets + +// Create new iptables client +// Test to find out what exists before hand + +const MangleChainName = "KUBE-VIP-EGRESS" +const Comment = "a3ViZS12aXAK=kube-vip" + +type Egress struct { + ipTablesClient *iptables.IPTables + comment string +} + +func CreateIptablesClient(nftables bool, namespace string, protocol iptables.Protocol) (*Egress, error) { + log.Infof("[egress] Creating an iptables client, nftables mode [%t]", nftables) + e := new(Egress) + var err error + + options := []iptables.Option{} + options = append(options, iptables.EnableNFTables(nftables)) + + if protocol == iptables.ProtocolIPv6 { + options = append(options, iptables.IPFamily(iptables.ProtocolIPv6), iptables.Timeout(5)) + } + + e.ipTablesClient, err = iptables.New(options...) + e.comment = Comment + "-" + namespace + return e, err +} + +func (e *Egress) CheckMangleChain(name string) (bool, error) { + log.Infof("[egress] Checking for Chain [%s]", name) + return e.ipTablesClient.ChainExists("mangle", name) +} + +func (e *Egress) DeleteMangleChain(name string) error { + return e.ipTablesClient.ClearAndDeleteChain("mangle", name) +} + +func (e *Egress) DeleteManglePrerouting(name string) error { + return e.ipTablesClient.Delete("mangle", "PREROUTING", "-j", name) +} + +func (e *Egress) DeleteMangleMarking(podIP, name string) error { + log.Infof("[egress] Stopping marking packets on network [%s]", podIP) + + exists, _ := e.ipTablesClient.Exists("mangle", name, "-s", podIP, "-j", "MARK", "--set-mark", "64/64", "-m", "comment", "--comment", e.comment) + + if !exists { + return fmt.Errorf("unable to find source Mangle rule for [%s]", podIP) + } + return e.ipTablesClient.Delete("mangle", name, "-s", podIP, "-j", "MARK", "--set-mark", "64/64", "-m", "comment", "--comment", e.comment) +} + +func (e *Egress) DeleteSourceNat(podIP, vip string) error { + log.Infof("[egress] Removing source nat from [%s] => [%s]", podIP, vip) + + exists, _ := e.ipTablesClient.Exists("nat", "POSTROUTING", "-s", podIP+"/32", "-m", "mark", "--mark", "64/64", "-j", "SNAT", "--to-source", vip, "-m", "comment", "--comment", e.comment) + + if !exists { + return fmt.Errorf("unable to find source Nat rule for [%s]", podIP) + } + return e.ipTablesClient.Delete("nat", "POSTROUTING", "-s", podIP+"/32", "-m", "mark", "--mark", "64/64", "-j", "SNAT", "--to-source", vip, "-m", "comment", "--comment", e.comment) +} + +func (e *Egress) DeleteSourceNatForDestinationPort(podIP, vip, port, proto string) error { + log.Infof("[egress] Adding source nat from [%s] => [%s]", podIP, vip) + + exists, _ := e.ipTablesClient.Exists("nat", "POSTROUTING", "-s", podIP+"/32", "-m", "mark", "--mark", "64/64", "-j", "SNAT", "--to-source", vip, "-p", proto, "--dport", port, "-m", "comment", "--comment", e.comment) + + if !exists { + return fmt.Errorf("unable to find source Nat rule for [%s], with destination port [%s]", podIP, port) + } + return e.ipTablesClient.Delete("nat", "POSTROUTING", "-s", podIP+"/32", "-m", "mark", "--mark", "64/64", "-j", "SNAT", "--to-source", vip, "-p", proto, "--dport", port, "-m", "comment", "--comment", e.comment) +} + +func (e *Egress) CreateMangleChain(name string) error { + + log.Infof("[egress] Creating Chain [%s]", name) + // Creates a new chain in the mangle table + return e.ipTablesClient.NewChain("mangle", name) + +} +func (e *Egress) AppendReturnRulesForDestinationSubnet(name, subnet string) error { + log.Infof("[egress] Adding jump for subnet [%s] to RETURN to previous chain/rules", subnet) + exists, _ := e.ipTablesClient.Exists("mangle", name, "-d", subnet, "-j", "RETURN", "-m", "comment", "--comment", e.comment) + if !exists { + return e.ipTablesClient.Append("mangle", name, "-d", subnet, "-j", "RETURN", "-m", "comment", "--comment", e.comment) + } + return nil +} + +func (e *Egress) AppendReturnRulesForMarking(name, subnet string) error { + log.Infof("[egress] Marking packets on network [%s]", subnet) + exists, _ := e.ipTablesClient.Exists("mangle", name, "-s", subnet, "-j", "MARK", "--set-mark", "64/64", "-m", "comment", "--comment", e.comment) + if !exists { + return e.ipTablesClient.Append("mangle", name, "-s", subnet, "-j", "MARK", "--set-mark", "64/64", "-m", "comment", "--comment", e.comment) + } + return nil +} + +func (e *Egress) InsertMangeTableIntoPrerouting(name string) error { + log.Infof("[egress] Adding jump from mangle prerouting to [%s]", name) + if exists, err := e.ipTablesClient.Exists("mangle", "PREROUTING", "-j", name, "-m", "comment", "--comment", e.comment); err != nil { + return err + } else if exists { + if err2 := e.ipTablesClient.Delete("mangle", "PREROUTING", "-j", name, "-m", "comment", "--comment", e.comment); err2 != nil { + return err2 + } + } + + return e.ipTablesClient.Insert("mangle", "PREROUTING", 1, "-j", name, "-m", "comment", "--comment", e.comment) +} + +func (e *Egress) InsertSourceNat(vip, podIP string) error { + log.Infof("[egress] Adding source nat from [%s] => [%s]", podIP, vip) + if exists, err := e.ipTablesClient.Exists("nat", "POSTROUTING", "-s", podIP+"/32", "-m", "mark", "--mark", "64/64", "-j", "SNAT", "--to-source", vip, "-m", "comment", "--comment", e.comment); err != nil { + return err + } else if exists { + if err2 := e.ipTablesClient.Delete("nat", "POSTROUTING", "-s", podIP+"/32", "-m", "mark", "--mark", "64/64", "-j", "SNAT", "--to-source", vip, "-m", "comment", "--comment", e.comment); err2 != nil { + return err2 + } + } + + return e.ipTablesClient.Insert("nat", "POSTROUTING", 1, "-s", podIP+"/32", "-m", "mark", "--mark", "64/64", "-j", "SNAT", "--to-source", vip, "-m", "comment", "--comment", e.comment) +} + +func (e *Egress) InsertSourceNatForDestinationPort(vip, podIP, port, proto string) error { + log.Infof("[egress] Adding source nat from [%s] => [%s], with destination port [%s]", podIP, vip, port) + if exists, err := e.ipTablesClient.Exists("nat", "POSTROUTING", "-s", podIP+"/32", "-m", "mark", "--mark", "64/64", "-j", "SNAT", "--to-source", vip, "-p", proto, "--dport", port, "-m", "comment", "--comment", e.comment); err != nil { + return err + } else if exists { + if err2 := e.ipTablesClient.Delete("nat", "POSTROUTING", "-s", podIP+"/32", "-m", "mark", "--mark", "64/64", "-j", "SNAT", "--to-source", vip, "-p", proto, "--dport", port, "-m", "comment", "--comment", e.comment); err2 != nil { + return err2 + } + } + + return e.ipTablesClient.Insert("nat", "POSTROUTING", 1, "-s", podIP+"/32", "-m", "mark", "--mark", "64/64", "-j", "SNAT", "--to-source", vip, "-p", proto, "--dport", port, "-m", "comment", "--comment", e.comment) +} + +func DeleteExistingSessions(sessionIP string, destination bool) error { + + nfct, err := ct.Open(&ct.Config{}) + if err != nil { + log.Errorf("could not create nfct: %v", err) + return err + } + defer nfct.Close() + sessions, err := nfct.Dump(ct.Conntrack, ct.IPv4) + if err != nil { + log.Errorf("could not dump sessions: %v", err) + return err + } + // by default we only clear source (i.e. connections going from the vip (egress)) + if !destination { + for _, session := range sessions { + //fmt.Printf("Looking for [%s] found [%s]\n", podIP, session.Origin.Dst.String()) + + if session.Origin.Src.String() == sessionIP /*&& *session.Origin.Proto.DstPort == uint16(destinationPort)*/ { + //fmt.Printf("Source -> %s Destination -> %s:%d\n", session.Origin.Src.String(), session.Origin.Dst.String(), *session.Origin.Proto.DstPort) + err = nfct.Delete(ct.Conntrack, ct.IPv4, session) + if err != nil { + log.Errorf("could not delete sessions: %v", err) + } + } + } + } else { + // This will clear any "dangling" outbound connections. + for _, session := range sessions { + //fmt.Printf("Looking for [%s] found [%s]\n", podIP, session.Origin.Dst.String()) + + if session.Origin.Dst.String() == sessionIP /*&& *session.Origin.Proto.DstPort == uint16(destinationPort)*/ { + //fmt.Printf("Source -> %s Destination -> %s:%d\n", session.Origin.Src.String(), session.Origin.Dst.String(), *session.Origin.Proto.DstPort) + err = nfct.Delete(ct.Conntrack, ct.IPv4, session) + if err != nil { + log.Errorf("could not delete sessions: %v", err) + } + } + } + } + + return nil +} + +// Debug functions + +func (e *Egress) DumpChain(name string) error { + log.Infof("Dumping chain [%s]", name) + c, err := e.ipTablesClient.List("mangle", name) + if err != nil { + return err + } + for x := range c { + log.Infof("Rule -> %s", c[x]) + } + return nil +} + +func (e *Egress) CleanIPtables() error { + natRules, err := e.ipTablesClient.List("nat", "POSTROUTING") + if err != nil { + return err + } + foundNatRules := e.findRules(natRules) + log.Warnf("[egress] Cleaning [%d] dangling postrouting nat rules", len(foundNatRules)) + for x := range foundNatRules { + err = e.ipTablesClient.Delete("nat", "POSTROUTING", foundNatRules[x][2:]...) + if err != nil { + log.Errorf("[egress] Error removing rule [%v]", err) + } + } + exists, err := e.CheckMangleChain(MangleChainName) + if err != nil { + log.Debugf("[egress] No Mangle chain exists [%v]", err) + } + if exists { + mangleRules, err := e.ipTablesClient.List("mangle", MangleChainName) + if err != nil { + return err + } + foundNatRules = e.findRules(mangleRules) + log.Warnf("[egress] Cleaning [%d] dangling prerouting mangle rules", len(foundNatRules)) + for x := range foundNatRules { + err = e.ipTablesClient.Delete("mangle", MangleChainName, foundNatRules[x][2:]...) + if err != nil { + log.Errorf("[egress] Error removing rule [%v]", err) + } + } + + // For unknown reasons RHEL and the nftables wrapper sometimes leave dangling rules + // So we shall nuke them from orbit (just to be sure) + + // err = e.ipTablesClient.ClearChain("mangle", MangleChainName) + // if err != nil { + // log.Errorf("[egress] Error removing flushing table [%v]", err) + // } + } else { + log.Warnf("No existing mangle chain [%s] exists", MangleChainName) + } + return nil +} + +func (e *Egress) findRules(rules []string) [][]string { + var foundRules [][]string + + for i := range rules { + r := strings.Split(rules[i], " ") + for x := range r { + if r[x] == "\""+e.comment+"\"" { + // Remove the quotes around the comment + r[x] = strings.Trim(r[x], "\"") + foundRules = append(foundRules, r) + } + } + } + + return foundRules +} diff --git a/pkg/vip/egress_test.go b/pkg/vip/egress_test.go new file mode 100644 index 00000000..8466e437 --- /dev/null +++ b/pkg/vip/egress_test.go @@ -0,0 +1,36 @@ +package vip + +import ( + "fmt" + "reflect" + "testing" +) + +func Test_findRules(t *testing.T) { + e := Egress{comment: Comment + "-" + "default"} + type args struct { + rules []string + } + tests := []struct { + name string + args args + want [][]string + }{ + { + "test", + args{[]string{ + "-A PREROUTING -m comment --comment \"cali:6gwbT8clXdHdC1b1\" -j cali-PREROUTING", + fmt.Sprintf("-A KUBE-VIP-EGRESS -s 172.17.88.190/32 -m comment --comment \"%s\" -j MARK --set-xmark 0x40/0x40", e.comment), + fmt.Sprintf("-A POSTROUTING -m comment --comment \"%s\" -j RETURN", e.comment), + }}, + [][]string{{"-A", "KUBE-VIP-EGRESS", "-s", "172.17.88.190/32", "-m", "comment", "--comment", e.comment, "-j", "MARK", "--set-xmark", "0x40/0x40"}, {"-A", "POSTROUTING", "-m", "comment", "--comment", e.comment, "-j", "RETURN"}}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := e.findRules(tt.args.rules); !reflect.DeepEqual(got, tt.want) { + t.Errorf("findRules() = \n%v, want \n%v", got, tt.want) + } + }) + } +} diff --git a/pkg/vip/ndp.go b/pkg/vip/ndp.go new file mode 100644 index 00000000..d7ebec35 --- /dev/null +++ b/pkg/vip/ndp.go @@ -0,0 +1,71 @@ +package vip + +import ( + "fmt" + "net" + "net/netip" + + "github.com/mdlayher/ndp" + + log "github.com/sirupsen/logrus" +) + +// NdpResponder defines the parameters for the NDP connection. +type NdpResponder struct { + intf string + hardwareAddr net.HardwareAddr + conn *ndp.Conn +} + +// NewNDPResponder takes an ifaceName and returns a new NDP responder and error if encountered. +func NewNDPResponder(ifaceName string) (*NdpResponder, error) { + iface, err := net.InterfaceByName(ifaceName) + if err != nil { + return nil, fmt.Errorf("failed to get interface %q: %v", ifaceName, err) + } + + // Use link-local address as the source IPv6 address for NDP communications. + conn, _, err := ndp.Listen(iface, ndp.LinkLocal) + if err != nil { + return nil, fmt.Errorf("creating NDP responder for %q: %s", iface.Name, err) + } + + ret := &NdpResponder{ + intf: iface.Name, + hardwareAddr: iface.HardwareAddr, + conn: conn, + } + return ret, nil +} + +// Close closes the NDP responder connection. +func (n *NdpResponder) Close() error { + return n.conn.Close() +} + +// SendGratuitous broadcasts an NDP update or returns error if encountered. +func (n *NdpResponder) SendGratuitous(address string) error { + ip, err := netip.ParseAddr(address) + if err != nil { + return fmt.Errorf("failed to parse address %s", ip) + } + + log.Infof("Broadcasting NDP update for %s (%s) via %s", address, n.hardwareAddr, n.intf) + return n.advertise(netip.IPv6LinkLocalAllNodes(), ip, true) +} + +func (n *NdpResponder) advertise(dst, target netip.Addr, gratuitous bool) error { + m := &ndp.NeighborAdvertisement{ + Solicited: !gratuitous, + Override: gratuitous, // Should clients replace existing cache entries + TargetAddress: target, + Options: []ndp.Option{ + &ndp.LinkLayerAddress{ + Direction: ndp.Target, + Addr: n.hardwareAddr, + }, + }, + } + log.Infof("ndp: %v", m) + return n.conn.WriteTo(m, nil, dst) +} diff --git a/pkg/vip/util.go b/pkg/vip/util.go index bf6401f5..77b0b3cb 100644 --- a/pkg/vip/util.go +++ b/pkg/vip/util.go @@ -1,24 +1,189 @@ package vip import ( + "context" + "crypto/rand" + "fmt" "net" + "strings" + "syscall" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + "github.com/vishvananda/netlink" ) // LookupHost resolves dnsName and return an IP or an error -func lookupHost(dnsName string) (string, error) { - addrs, err := net.LookupHost(dnsName) +func LookupHost(dnsName, dnsMode string) ([]string, error) { + result, err := net.LookupHost(dnsName) if err != nil { - return "", err + return nil, err + } + if len(result) == 0 { + return nil, errors.Errorf("empty address for %s", dnsName) + } + addrs := []string{} + switch dnsMode { + case "ipv4", "ipv6", "dual": + a, err := getIPbyFamily(result, dnsMode) + if err != nil { + return nil, err + } + addrs = append(addrs, a...) + default: + addrs = append(addrs, result[0]) + } + + return addrs, nil +} + +func getIPbyFamily(addresses []string, family string) ([]string, error) { + var checkers []func(string) bool + families := []string{} + if family == "dual" || family == "ipv4" { + checkers = append(checkers, IsIPv4) + families = append(families, "IPv4") } - if len(addrs) == 0 { - return "", errors.Errorf("empty address for %s", dnsName) + if family == "dual" || family == "ipv6" { + checkers = append(checkers, IsIPv6) + families = append(families, "IPv6") } - return addrs[0], nil + + addrs := []string{} + for i, c := range checkers { + addr, err := getIPbyChecker(addresses, c) + if err != nil { + return nil, fmt.Errorf("error getting %s address: %w", families[i], err) + } + addrs = append(addrs, addr) + } + + return addrs, nil +} + +func getIPbyChecker(addresses []string, checker func(string) bool) (string, error) { + for _, addr := range addresses { + if checker(addr) { + return addr, nil + } + } + return "", fmt.Errorf("address not found") } // IsIP returns if address is an IP or not func IsIP(address string) bool { ip := net.ParseIP(address) return ip != nil -} \ No newline at end of file +} + +// getHostName return the hostname from the fqdn +func getHostName(dnsName string) string { + if dnsName == "" { + return "" + } + + fields := strings.Split(dnsName, ".") + return fields[0] +} + +// IsIPv4 returns true only if address is a valid IPv4 address +func IsIPv4(address string) bool { + ip := net.ParseIP(address) + if ip == nil { + return false + } + return ip.To4() != nil +} + +// IsIPv6 returns true only if address is a valid IPv6 address +func IsIPv6(address string) bool { + ip := net.ParseIP(address) + if ip == nil { + return false + } + return ip.To4() == nil +} + +// GetFullMask returns /32 for an IPv4 address and /128 for an IPv6 address +func GetFullMask(address string) (string, error) { + if IsIPv4(address) { + return "/32", nil + } + if IsIPv6(address) { + return "/128", nil + } + return "", fmt.Errorf("failed to parse %s as either IPv4 or IPv6", address) +} + +// GetDefaultGatewayInterface return default gateway interface link +func GetDefaultGatewayInterface() (*net.Interface, error) { + routes, err := netlink.RouteList(nil, syscall.AF_INET) + if err != nil { + return nil, err + } + + routes6, err := netlink.RouteList(nil, syscall.AF_INET6) + if err != nil { + return nil, err + } + + routes = append(routes, routes6...) + + for _, route := range routes { + if route.Dst == nil || route.Dst.String() == "0.0.0.0/0" || route.Dst.String() == "::/0" { + if route.LinkIndex <= 0 { + return nil, errors.New("Found default route but could not determine interface") + } + return net.InterfaceByIndex(route.LinkIndex) + } + } + + return nil, errors.New("Unable to find default route") +} + +// MonitorDefaultInterface monitor the default interface and catch the event of the default route +func MonitorDefaultInterface(ctx context.Context, defaultIF *net.Interface) error { + routeCh := make(chan netlink.RouteUpdate) + if err := netlink.RouteSubscribe(routeCh, ctx.Done()); err != nil { + return fmt.Errorf("subscribe route failed, error: %w", err) + } + + for { + select { + case r := <-routeCh: + log.Debugf("type: %d, route: %+v", r.Type, r.Route) + if r.Type == syscall.RTM_DELROUTE && (r.Dst == nil || r.Dst.String() == "0.0.0.0/0") && r.LinkIndex == defaultIF.Index { + return fmt.Errorf("default route deleted and the default interface may be invalid") + } + case <-ctx.Done(): + return nil + } + } +} + +func GenerateMac() (mac string) { + buf := make([]byte, 3) + _, err := rand.Read(buf) + if err != nil { + return + } + + /** + * The first 3 bytes need to match a real manufacturer + * you can refer to the following lists for examples: + * - https://gist.github.com/aallan/b4bb86db86079509e6159810ae9bd3e4 + * - https://macaddress.io/database-download + */ + mac = fmt.Sprintf("%s:%s:%s:%02x:%02x:%02x", "00", "00", "6C", buf[0], buf[1], buf[2]) + log.Infof("Generated mac: %s", mac) + return mac +} + +func GetIPs(vip string) []string { + addresses := []string{} + vips := strings.Split(vip, ",") + for _, v := range vips { + addresses = append(addresses, strings.TrimSpace(v)) + } + return addresses +} diff --git a/pkg/wireguard/architecture.md b/pkg/wireguard/architecture.md new file mode 100644 index 00000000..47de96f9 --- /dev/null +++ b/pkg/wireguard/architecture.md @@ -0,0 +1,29 @@ +# Wireguard Architecture + +This brief document is largely for my own notes about how this functionality is added to `kube-vip`. + +## Overview + +- New Flags +- Startup +- Secret(s) + +### New Flags + +A `--wireguard` flag or `vip_wireguard` environment variable will determine if the Wireguard mode is enabled, if this is the case then it will start the wireguard manager process. + +###Β Startup + +This will require `kube-vip` starting as a daemonset as it will need to read existing data (secrets) from inside the cluster. + +### Secrets + +Create a private key for the cluster: + +``` +PRIKEY=$(wg genkey) +PUBKEY=$(echo $PRIKEY | wg pubkey) +PEERKEY=$(sudo wg show wg0 public-key) +echo "kubectl create -n kube-system secret generic wireguard --from-literal=privateKey=$PRIKEY --from-literal=peerPublicKey=$PEERKEY --from-literal=peerEndpoint=192.168.0.179" +sudo wg set wg0 peer $PUBKEY allowed-ips 10.0.0.0/8 +``` diff --git a/pkg/wireguard/wireguard.go b/pkg/wireguard/wireguard.go new file mode 100644 index 00000000..0eac845b --- /dev/null +++ b/pkg/wireguard/wireguard.go @@ -0,0 +1,64 @@ +package wireguard + +import ( + "fmt" + "net" + "os" + "time" + + "golang.zx2c4.com/wireguard/wgctrl" + "golang.zx2c4.com/wireguard/wgctrl/wgtypes" +) + +func ConfigureInterface(priKey, peerPublicKey, endpoint string) error { + + client, err := wgctrl.New() + if err != nil { + return fmt.Errorf("failed to open client: %v", err) + } + defer client.Close() + + pri, err := wgtypes.ParseKey(priKey) + if err != nil { + return fmt.Errorf("failed to generate private key: %v", err) + } + + pub, err := wgtypes.ParseKey(peerPublicKey) // Should be generated by the remote peer + if err != nil { + return fmt.Errorf("failed to parse public key: %v", err) + } + + //log.Printf("Public Key [%s]", pri.PublicKey()) + + port := 51820 + ka := 20 * time.Second + + conf := wgtypes.Config{ + PrivateKey: &pri, + ListenPort: &port, + ReplacePeers: true, + Peers: []wgtypes.PeerConfig{{ + PublicKey: pub, + Remove: false, + UpdateOnly: false, + Endpoint: &net.UDPAddr{ + IP: net.ParseIP(endpoint), + Port: 51820, + }, + PersistentKeepaliveInterval: &ka, + ReplaceAllowedIPs: true, + AllowedIPs: []net.IPNet{{ + IP: net.ParseIP("10.0.0.0"), + Mask: net.ParseIP("0.0.0.0").DefaultMask(), + }}, + }}, + } + + if err := client.ConfigureDevice("wg0", conf); err != nil { + if os.IsNotExist(err) { + return fmt.Errorf("wg0 doesn't exist [%s]", err) + } + return fmt.Errorf("unknown config error: %v", err) + } + return nil +} diff --git a/testing/e2e/README.md b/testing/e2e/README.md new file mode 100644 index 00000000..ac739652 --- /dev/null +++ b/testing/e2e/README.md @@ -0,0 +1,26 @@ +# Running End To End Tests +Prerequisites: +* Tests must be run on a Linux OS +* Docker installed with IPv6 enabled [how to enable IPv6](https://docs.docker.com/config/daemon/ipv6/) + * You will need to restart your Docker engine after updating the config +* Target kube-vip Docker image exists locally. Either build the image locally + with `make dockerx86Local` or `docker pull` the image from a registry. + +Run the tests from the repo root: +``` +make e2e-tests +``` + +Note: To preserve the test cluster after a test run, run the following: +``` +make E2E_PRESERVE_CLUSTER=true e2e-tests +``` + +The E2E tests: +* Start a local kind cluster +* Load the local docker image into kind +* Test connectivity to the control plane using the VIP +* Kills the current leader + * This causes leader election to occur +* Attempts to connect to the control plane using the VIP + * The new leader will need send ndp advertisements before this can succeed within a timeout diff --git a/testing/e2e/e2e/Dockerfile b/testing/e2e/e2e/Dockerfile new file mode 100644 index 00000000..74db1526 --- /dev/null +++ b/testing/e2e/e2e/Dockerfile @@ -0,0 +1,16 @@ +# syntax=docker/dockerfile:experimental + +FROM golang:1.20-alpine as dev +RUN apk add --no-cache git ca-certificates +RUN adduser -D appuser +COPY . /src/ +WORKDIR /src + +ENV GO111MODULE=on +RUN --mount=type=cache,sharing=locked,id=gomod,target=/go/pkg/mod/cache \ + --mount=type=cache,sharing=locked,id=goroot,target=/root/.cache/go-build \ + CGO_ENABLED=0 GOOS=linux go build -ldflags '-s -w -extldflags -static' -o e2eClient /src/main.go + +FROM scratch +COPY --from=dev /src/e2eClient / +CMD ["/e2eClient"] diff --git a/testing/e2e/e2e/Makefile b/testing/e2e/e2e/Makefile new file mode 100644 index 00000000..2ea6e738 --- /dev/null +++ b/testing/e2e/e2e/Makefile @@ -0,0 +1,61 @@ + +SHELL := /bin/bash + +# The name of the executable (default is current directory name) +TARGET := e2e +.DEFAULT_GOAL: $(TARGET) + +# These will be provided to the target +VERSION := 0.0.1 +BUILD := `git rev-parse HEAD` + +# Operating System Default (LINUX) +TARGETOS=linux + +# Use linker flags to provide version/build settings to the target +LDFLAGS=-ldflags "-X=main.Version=$(VERSION) -X=main.Build=$(BUILD) -s" + +# go source files, ignore vendor directory +SRC = $(shell find . -type f -name '*.go' -not -path "./vendor/*") + +DOCKERTAG ?= $(VERSION) +REPOSITORY = plndr + +.PHONY: all build clean install uninstall fmt simplify check run + +all: check install + +$(TARGET): $(SRC) + @go build $(LDFLAGS) -o $(TARGET) + +build: $(TARGET) + @true + +clean: + @rm -f $(TARGET) + +install: + @echo Building and Installing project + @go install $(LDFLAGS) + +uninstall: clean + @rm -f $$(which ${TARGET}) + +fmt: + @gofmt -l -w $(SRC) + +docker: + # @docker buildx build --platform linux/amd64,linux/arm64,linux/arm/v7,linux/ppc64le,linux/s390x --push -t $(REPOSITORY)/$(TARGET):$(DOCKERTAG) . + @docker buildx build --platform linux/amd64 --push -t $(REPOSITORY)/$(TARGET):$(DOCKERTAG) . + @echo New Multi Architecture Docker image created + +simplify: + @gofmt -s -l -w $(SRC) + +check: + @test -z $(shell gofmt -l main.go | tee /dev/stderr) || echo "[WARN] Fix formatting issues with 'make fmt'" + @for d in $$(go list ./... | grep -v /vendor/); do golint $${d}; done + @go tool vet ${SRC} + +run: install + @$(TARGET) diff --git a/testing/e2e/e2e/go.mod b/testing/e2e/e2e/go.mod new file mode 100644 index 00000000..d25994bc --- /dev/null +++ b/testing/e2e/e2e/go.mod @@ -0,0 +1,7 @@ +module github.com/kube-vip/kube-vip/testing/e2e/servicesClient + +go 1.19 + +require github.com/sirupsen/logrus v1.9.0 + +require golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 // indirect diff --git a/testing/e2e/e2e/go.sum b/testing/e2e/e2e/go.sum new file mode 100644 index 00000000..ed655373 --- /dev/null +++ b/testing/e2e/e2e/go.sum @@ -0,0 +1,15 @@ +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= +github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 h1:0A+M6Uqn+Eje4kHMK80dtF3JCXC4ykBgQG4Fe06QRhQ= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/testing/e2e/e2e/main.go b/testing/e2e/e2e/main.go new file mode 100644 index 00000000..61a620ba --- /dev/null +++ b/testing/e2e/e2e/main.go @@ -0,0 +1,66 @@ +package main + +// This is largely to test outbound (egress) connections +import ( + "fmt" + "net" + "net/http" + "os" + "strings" + "time" + + log "github.com/sirupsen/logrus" +) + +func main() { + // Lookup environment variables + mode, exists := os.LookupEnv("E2EMODE") + if !exists { + log.Fatal("The environment variable E2ESERVER, was not set") + } + + switch mode { + case strings.ToUpper("SERVER"): + http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, "Hello!") + }) + + log.Info("Starting server at port 80") + if err := http.ListenAndServe(":80", nil); err != nil { + log.Fatal(err) + } + case strings.ToUpper("CLIENT"): + address, exists := os.LookupEnv("E2EADDRESS") + if !exists { + log.Fatal("The environment variable E2EADDRESS, was not set") + } + for { + + // Connect to e2e endpoint with a second timeout + conn, err := net.DialTimeout("tcp", address+":12345", time.Second) + if err != nil { + log.Fatalf("Dial failed: %v", err.Error()) + } + _, err = conn.Write([]byte("The Grid, a digital frontier")) + if err != nil { + log.Fatalf("Write data failed: %v ", err.Error()) + } + + // buffer to get data + received := make([]byte, 1024) + _, err = conn.Read(received) + if err != nil { + log.Fatalf("Read data failed:", err.Error()) + } + + println("Received message: %s", string(received)) + + conn.Close() + // Wait for a second and connect again + time.Sleep(time.Second) + } + default: + log.Fatalf("Unknown mode [%s]", mode) + } + +} diff --git a/testing/e2e/e2e_suite_test.go b/testing/e2e/e2e_suite_test.go new file mode 100644 index 00000000..a9092081 --- /dev/null +++ b/testing/e2e/e2e_suite_test.go @@ -0,0 +1,25 @@ +//go:build e2e +// +build e2e + +package e2e_test + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/kube-vip/kube-vip/testing/e2e" +) + +func TestE2E(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "E2E Suite") +} + +var _ = SynchronizedBeforeSuite( + func() { + e2e.EnsureKindNetwork() + }, + func() {}, +) diff --git a/testing/e2e/e2e_test.go b/testing/e2e/e2e_test.go new file mode 100644 index 00000000..8e5752fa --- /dev/null +++ b/testing/e2e/e2e_test.go @@ -0,0 +1,448 @@ +//go:build e2e +// +build e2e + +package e2e_test + +import ( + "bytes" + "crypto/tls" + "fmt" + "io/ioutil" + "net/http" + "os" + "os/exec" + "path/filepath" + "strings" + "text/template" + "time" + + "k8s.io/klog/v2" + "sigs.k8s.io/kind/pkg/apis/config/v1alpha4" + kindconfigv1alpha4 "sigs.k8s.io/kind/pkg/apis/config/v1alpha4" + "sigs.k8s.io/kind/pkg/cluster" + "sigs.k8s.io/kind/pkg/log" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/format" + "github.com/onsi/gomega/gexec" + + "github.com/kube-vip/kube-vip/pkg/vip" + "github.com/kube-vip/kube-vip/testing/e2e" +) + +var _ = Describe("kube-vip broadcast neighbor", func() { + var ( + logger log.Logger + imagePath string + k8sImagePath string + configPath string + kubeVIPManifestTemplate *template.Template + clusterName string + tempDirPath string + v129 bool + ) + + BeforeEach(func() { + klog.SetOutput(GinkgoWriter) + logger = e2e.TestLogger{} + + imagePath = os.Getenv("E2E_IMAGE_PATH") // Path to kube-vip image + configPath = os.Getenv("CONFIG_PATH") // path to the api server config + k8sImagePath = os.Getenv("K8S_IMAGE_PATH") // path to the kubernetes image (version for kind) + if configPath == "" { + configPath = "/etc/kubernetes/admin.conf" + } + _, v129 = os.LookupEnv("V129") + curDir, err := os.Getwd() + Expect(err).NotTo(HaveOccurred()) + templatePath := filepath.Join(curDir, "kube-vip.yaml.tmpl") + + kubeVIPManifestTemplate, err = template.New("kube-vip.yaml.tmpl").ParseFiles(templatePath) + Expect(err).NotTo(HaveOccurred()) + + tempDirPath, err = ioutil.TempDir("", "kube-vip-test") + Expect(err).NotTo(HaveOccurred()) + }) + + AfterEach(func() { + if os.Getenv("E2E_PRESERVE_CLUSTER") == "true" { + return + } + + provider := cluster.NewProvider( + cluster.ProviderWithLogger(logger), + cluster.ProviderWithDocker(), + ) + + Expect(provider.Delete(clusterName, "")).To(Succeed()) + + Expect(os.RemoveAll(tempDirPath)).To(Succeed()) + }) + + Describe("kube-vip IPv4 functionality", func() { + var ( + clusterConfig kindconfigv1alpha4.Cluster + ipv4VIP string + ) + + BeforeEach(func() { + clusterName = fmt.Sprintf("%s-ipv4", filepath.Base(tempDirPath)) + + clusterConfig = kindconfigv1alpha4.Cluster{ + Networking: kindconfigv1alpha4.Networking{ + IPFamily: kindconfigv1alpha4.IPv4Family, + }, + Nodes: []kindconfigv1alpha4.Node{}, + } + + manifestPath := filepath.Join(tempDirPath, "kube-vip-ipv4.yaml") + + for i := 0; i < 3; i++ { + nodeConfig := kindconfigv1alpha4.Node{ + Role: kindconfigv1alpha4.ControlPlaneRole, + ExtraMounts: []kindconfigv1alpha4.Mount{ + { + HostPath: manifestPath, + ContainerPath: "/etc/kubernetes/manifests/kube-vip.yaml", + }, + }, + } + // Override the kind image version + if k8sImagePath != "" { + nodeConfig.Image = k8sImagePath + } + clusterConfig.Nodes = append(clusterConfig.Nodes, nodeConfig) + } + + manifestFile, err := os.Create(manifestPath) + Expect(err).NotTo(HaveOccurred()) + + defer manifestFile.Close() + + ipv4VIP = e2e.GenerateIPv4VIP() + + Expect(kubeVIPManifestTemplate.Execute(manifestFile, e2e.KubevipManifestValues{ + ControlPlaneVIP: ipv4VIP, + ImagePath: imagePath, + ConfigPath: configPath, + })).To(Succeed()) + + if v129 { + // create a seperate manifest + manifestPath2 := filepath.Join(tempDirPath, "kube-vip-ipv4-first.yaml") + + // change the path of the mount to the new file + clusterConfig.Nodes[0].ExtraMounts[0].HostPath = manifestPath2 + + manifestFile2, err := os.Create(manifestPath2) + Expect(err).NotTo(HaveOccurred()) + + defer manifestFile2.Close() + + Expect(kubeVIPManifestTemplate.Execute(manifestFile2, e2e.KubevipManifestValues{ + ControlPlaneVIP: ipv4VIP, + ImagePath: imagePath, + ConfigPath: "/etc/kubernetes/super-admin.conf", // Change the kuberenetes file + })).To(Succeed()) + } + }) + + It("provides an IPv4 VIP address for the Kubernetes control plane nodes", func() { + go func() { + time.Sleep(30 * time.Second) + By(withTimestamp("loading local docker image to kind cluster")) + e2e.LoadDockerImageToKind(logger, imagePath, clusterName) + }() + + By(withTimestamp("creating a kind cluster with multiple control plane nodes")) + createKindCluster(logger, &clusterConfig, clusterName) + + By(withTimestamp("checking that the Kubernetes control plane nodes are accessible via the assigned IPv4 VIP")) + // Allow enough time for control plane nodes to load the docker image and + // use the default timeout for establishing a connection to the VIP + assertControlPlaneIsRoutable(ipv4VIP, time.Duration(0), 20*time.Second) + + // wait for a bit + By(withTimestamp("sitting for a few seconds to hopefully allow the roles to have been created in the cluster")) + time.Sleep(30 * time.Second) + + By(withTimestamp("killing the leader Kubernetes control plane node to trigger a fail-over scenario")) + killLeader(ipv4VIP, clusterName) + + By(withTimestamp("checking that the Kubernetes control plane nodes are still accessible via the assigned IPv4 VIP with little downtime")) + // Allow at most 20 seconds of downtime when polling the control plane nodes + assertControlPlaneIsRoutable(ipv4VIP, 1*time.Second, 30*time.Second) + }) + }) + + Describe("kube-vip IPv6 functionality", func() { + var ( + clusterConfig kindconfigv1alpha4.Cluster + ipv6VIP string + ) + + BeforeEach(func() { + clusterName = fmt.Sprintf("%s-ipv6", filepath.Base(tempDirPath)) + + clusterConfig = kindconfigv1alpha4.Cluster{ + Networking: kindconfigv1alpha4.Networking{ + IPFamily: kindconfigv1alpha4.IPv6Family, + }, + Nodes: []kindconfigv1alpha4.Node{}, + } + + manifestPath := filepath.Join(tempDirPath, "kube-vip-ipv6.yaml") + + for i := 0; i < 3; i++ { + nodeConfig := kindconfigv1alpha4.Node{ + Role: kindconfigv1alpha4.ControlPlaneRole, + ExtraMounts: []kindconfigv1alpha4.Mount{ + { + HostPath: manifestPath, + ContainerPath: "/etc/kubernetes/manifests/kube-vip.yaml", + }, + }, + } + // Override the kind image version + if k8sImagePath != "" { + nodeConfig.Image = k8sImagePath + } + clusterConfig.Nodes = append(clusterConfig.Nodes, nodeConfig) + } + + ipv6VIP = e2e.GenerateIPv6VIP() + + manifestFile, err := os.Create(manifestPath) + Expect(err).NotTo(HaveOccurred()) + + defer manifestFile.Close() + + Expect(kubeVIPManifestTemplate.Execute(manifestFile, e2e.KubevipManifestValues{ + ControlPlaneVIP: ipv6VIP, + ImagePath: imagePath, + ConfigPath: configPath, + })).To(Succeed()) + + if v129 { + // create a seperate manifest + manifestPath2 := filepath.Join(tempDirPath, "kube-vip-ipv6-first.yaml") + + // change the path of the mount to the new file + clusterConfig.Nodes[0].ExtraMounts[0].HostPath = manifestPath2 + + manifestFile2, err := os.Create(manifestPath2) + Expect(err).NotTo(HaveOccurred()) + + defer manifestFile2.Close() + + Expect(kubeVIPManifestTemplate.Execute(manifestFile2, e2e.KubevipManifestValues{ + ControlPlaneVIP: ipv6VIP, + ImagePath: imagePath, + ConfigPath: "/etc/kubernetes/super-admin.conf", // Change the kuberenetes file + })).To(Succeed()) + } + }) + + It("provides an IPv6 VIP address for the Kubernetes control plane nodes", func() { + go func() { + time.Sleep(30 * time.Second) + By(withTimestamp("loading local docker image to kind cluster")) + e2e.LoadDockerImageToKind(logger, imagePath, clusterName) + }() + + By(withTimestamp("creating a kind cluster with multiple control plane nodes")) + createKindCluster(logger, &clusterConfig, clusterName) + + By(withTimestamp("checking that the Kubernetes control plane nodes are accessible via the assigned IPv6 VIP")) + // Allow enough time for control plane nodes to load the docker image and + // use the default timeout for establishing a connection to the VIP + assertControlPlaneIsRoutable(ipv6VIP, time.Duration(0), 20*time.Second) + + // wait for a bit + By(withTimestamp("sitting for a few seconds to hopefully allow the roles to have been created in the cluster")) + time.Sleep(30 * time.Second) + + By(withTimestamp("killing the leader Kubernetes control plane node to trigger a fail-over scenario")) + killLeader(ipv6VIP, clusterName) + + By(withTimestamp("checking that the Kubernetes control plane nodes are still accessible via the assigned IPv6 VIP with little downtime")) + // Allow at most 20 seconds of downtime when polling the control plane nodes + assertControlPlaneIsRoutable(ipv6VIP, 1*time.Second, 30*time.Second) + }) + }) + + Describe("kube-vip DualStack functionality", func() { + var ( + clusterConfig kindconfigv1alpha4.Cluster + dualstackVIP string + ) + + BeforeEach(func() { + clusterName = fmt.Sprintf("%s-dualstack", filepath.Base(tempDirPath)) + + clusterConfig = kindconfigv1alpha4.Cluster{ + Networking: kindconfigv1alpha4.Networking{ + IPFamily: kindconfigv1alpha4.DualStackFamily, + }, + Nodes: []kindconfigv1alpha4.Node{}, + } + + manifestPath := filepath.Join(tempDirPath, "kube-vip-dualstack.yaml") + + for i := 0; i < 3; i++ { + nodeConfig := kindconfigv1alpha4.Node{ + Role: kindconfigv1alpha4.ControlPlaneRole, + ExtraMounts: []kindconfigv1alpha4.Mount{ + { + HostPath: manifestPath, + ContainerPath: "/etc/kubernetes/manifests/kube-vip.yaml", + }, + }, + } + // Override the kind image version + if k8sImagePath != "" { + nodeConfig.Image = k8sImagePath + } + clusterConfig.Nodes = append(clusterConfig.Nodes, nodeConfig) + } + + dualstackVIP = e2e.GenerateDualStackVIP() + + manifestFile, err := os.Create(manifestPath) + Expect(err).NotTo(HaveOccurred()) + + defer manifestFile.Close() + + Expect(kubeVIPManifestTemplate.Execute(manifestFile, e2e.KubevipManifestValues{ + ControlPlaneVIP: dualstackVIP, + ImagePath: imagePath, + ConfigPath: configPath, + })).To(Succeed()) + + if v129 { + // create a seperate manifest + manifestPath2 := filepath.Join(tempDirPath, "kube-vip-ipv6-first.yaml") + + // change the path of the mount to the new file + clusterConfig.Nodes[0].ExtraMounts[0].HostPath = manifestPath2 + + manifestFile2, err := os.Create(manifestPath2) + Expect(err).NotTo(HaveOccurred()) + + defer manifestFile2.Close() + + Expect(kubeVIPManifestTemplate.Execute(manifestFile2, e2e.KubevipManifestValues{ + ControlPlaneVIP: dualstackVIP, + ImagePath: imagePath, + ConfigPath: "/etc/kubernetes/super-admin.conf", // Change the kuberenetes file + })).To(Succeed()) + } + }) + + It("provides an DualStack VIP addresses for the Kubernetes control plane nodes", func() { + go func() { + time.Sleep(30 * time.Second) + By(withTimestamp("loading local docker image to kind cluster")) + e2e.LoadDockerImageToKind(logger, imagePath, clusterName) + }() + + vips := vip.GetIPs(dualstackVIP) + + By(withTimestamp("creating a kind cluster with multiple control plane nodes")) + createKindCluster(logger, &clusterConfig, clusterName) + + By(withTimestamp("checking that the Kubernetes control plane nodes are accessible via the assigned IPv6 VIP")) + // Allow enough time for control plane nodes to load the docker image and + // use the default timeout for establishing a connection to the VIP + assertControlPlaneIsRoutable(vips[1], time.Duration(0), 20*time.Second) + + By(withTimestamp("checking that the Kubernetes control plane nodes are accessible via the assigned IPv4 VIP")) + // Allow enough time for control plane nodes to load the docker image and + // use the default timeout for establishing a connection to the VIP + assertControlPlaneIsRoutable(vips[0], time.Duration(0), 20*time.Second) + + // wait for a bit + By(withTimestamp("sitting for a few seconds to hopefully allow the roles to have been created in the cluster")) + time.Sleep(30 * time.Second) + + By(withTimestamp("killing the leader Kubernetes control plane node to trigger a fail-over scenario")) + killLeader(vips[1], clusterName) + + By(withTimestamp("checking that the Kubernetes control plane nodes are still accessible via the assigned IPv6 VIP with little downtime")) + // Allow at most 20 seconds of downtime when polling the control plane nodes + assertControlPlaneIsRoutable(vips[1], 1*time.Second, 30*time.Second) + + By(withTimestamp("checking that the Kubernetes control plane nodes are still accessible via the assigned IPv4 VIP with little downtime")) + // Allow at most 20 seconds of downtime when polling the control plane nodes + assertControlPlaneIsRoutable(vips[0], 1*time.Second, 30*time.Second) + }) + }) +}) + +func createKindCluster(logger log.Logger, config *v1alpha4.Cluster, clusterName string) { + provider := cluster.NewProvider( + cluster.ProviderWithLogger(logger), + cluster.ProviderWithDocker(), + ) + format.UseStringerRepresentation = true // Otherwise error stacks have binary format. + Expect(provider.Create( + clusterName, + cluster.CreateWithV1Alpha4Config(config), + cluster.CreateWithRetain(os.Getenv("E2E_PRESERVE_CLUSTER") == "true"), // If create fails, we'll need the cluster alive to debug + )).To(Succeed()) +} + +func assertControlPlaneIsRoutable(controlPlaneVIP string, transportTimeout, eventuallyTimeout time.Duration) { + if strings.Contains(controlPlaneVIP, ":") { + controlPlaneVIP = fmt.Sprintf("[%s]", controlPlaneVIP) + } + + transport := &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, // nolint + } + client := &http.Client{Transport: transport, Timeout: transportTimeout} + Eventually(func() int { + resp, _ := client.Get(fmt.Sprintf("https://%s:6443/livez", controlPlaneVIP)) + if resp == nil { + return -1 + } + defer resp.Body.Close() + return resp.StatusCode + }, eventuallyTimeout).Should(Equal(http.StatusOK), "Failed to connect to VIP") +} + +func killLeader(leaderIPAddr string, clusterName string) { + dockerControlPlaneContainerNames := []string{ + fmt.Sprintf("%s-control-plane", clusterName), + fmt.Sprintf("%s-control-plane2", clusterName), + fmt.Sprintf("%s-control-plane3", clusterName), + } + var leaderName string + for _, name := range dockerControlPlaneContainerNames { + cmdOut := new(bytes.Buffer) + cmd := exec.Command( + "docker", "exec", name, "ip", "addr", + ) + cmd.Stdout = cmdOut + Eventually(cmd.Run(), "600s").Should(Succeed()) + + if strings.Contains(cmdOut.String(), leaderIPAddr) { + leaderName = name + break + } + } + Expect(leaderName).ToNot(BeEmpty()) + + cmd := exec.Command( + "docker", "kill", leaderName, + ) + + session, err := gexec.Start(cmd, GinkgoWriter, GinkgoWriter) + Expect(err).NotTo(HaveOccurred()) + Eventually(session, "5s").Should(gexec.Exit(0)) +} + +func withTimestamp(text string) string { + return fmt.Sprintf("%s: %s", time.Now(), text) +} diff --git a/testing/e2e/etcd/README.md b/testing/e2e/etcd/README.md new file mode 100644 index 00000000..d7e0f3c5 --- /dev/null +++ b/testing/e2e/etcd/README.md @@ -0,0 +1,32 @@ +# Running etcd Tests +## Prerequisites: +* Docker + +If you want to use an image that only exists in your local docker cache, use this env var (modify registry and tag accordingly): +```sh +export E2E_IMAGE_PATH=plndr/kube-vip:v0.6.2 +``` + +If you want to preserve the etcd nodes after a test run, use the following: +```sh +export E2E_PRESERVE_CLUSTER=true +``` + +Note that you'll need to delete them before being able to run the test again, this is only for debugging. You can use `kind delete cluster` or just `docker rm` the containers. + +Tu run the tests: +```sh +ginkgo -vv --tags=e2e testing/e2e/etcd + +``` + +The E2E tests: +1. Start 3 kind nodes (using docker) +2. Load the local docker image into kind +3. Init the etcd cluster and join all nodes +4. Verify the etcd API can be accessed through the VIP + 1. This proves leader election through etcd in kube-vip is working. +5. Removes the first node (which is probably the VIP leader) +4. Verify the etcd API can be accessed through the VIP + +> Note: this has only been tested on Linux but it might work on Mac \ No newline at end of file diff --git a/testing/e2e/etcd/cluster.go b/testing/e2e/etcd/cluster.go new file mode 100644 index 00000000..0cf68673 --- /dev/null +++ b/testing/e2e/etcd/cluster.go @@ -0,0 +1,341 @@ +//go:build e2e +// +build e2e + +package etcd + +import ( + "context" + "path/filepath" + "strings" + "time" + + . "github.com/onsi/gomega" + "go.etcd.io/etcd/client/pkg/v3/transport" + clientv3 "go.etcd.io/etcd/client/v3" + "golang.org/x/exp/slices" + kindconfigv1alpha4 "sigs.k8s.io/kind/pkg/apis/config/v1alpha4" + "sigs.k8s.io/kind/pkg/cluster" + "sigs.k8s.io/kind/pkg/cluster/nodes" + "sigs.k8s.io/kind/pkg/cluster/nodeutils" + + "github.com/kube-vip/kube-vip/testing/e2e" +) + +type ClusterSpec struct { + Nodes int + Name string + VIP string + KubeVIPImage string + KubeVIPpManifestPath string + KubeletManifestPath string + KubeletFlagsPath string + EtcdCertsFolder string + Logger e2e.TestLogger +} + +type Cluster struct { + *ClusterSpec + Nodes []nodes.Node + + provider *cluster.Provider +} + +func CreateCluster(ctx context.Context, spec *ClusterSpec) *Cluster { + c := &Cluster{ + ClusterSpec: spec, + } + + c.provider = cluster.NewProvider( + cluster.ProviderWithLogger(spec.Logger), + cluster.ProviderWithDocker(), + ) + + c.Logger.Printf("Creating kind nodes") + c.initKindCluster() + + c.Logger.Printf("Loading kube-vip image into nodes") + e2e.LoadDockerImageToKind(spec.Logger, spec.KubeVIPImage, spec.Name) + + c.Logger.Printf("Starting etcd cluster") + c.initEtcd(ctx) + + c.Logger.Printf("Checking 1 node etcd is available through VIP") + c.VerifyEtcdThroughVIP(ctx, 15*time.Second) + + c.Logger.Printf("Adding the rest of the nodes to the etcd cluster") + c.joinRestOfNodes(ctx) + + c.Logger.Printf("Checking health for all nodes") + for _, node := range c.Nodes { + c.expectEtcdNodeHealthy(ctx, node, 15*time.Second) + } + + c.Logger.Printf("Checking %d nodes etcd is available through VIP", c.ClusterSpec.Nodes) + c.VerifyEtcdThroughVIP(ctx, 15*time.Second) + + return c +} + +func (c *Cluster) initKindCluster() { + kindCluster := &kindconfigv1alpha4.Cluster{ + Networking: kindconfigv1alpha4.Networking{ + IPFamily: kindconfigv1alpha4.IPv4Family, + }, + } + + for i := 0; i < c.ClusterSpec.Nodes; i++ { + kindCluster.Nodes = append(kindCluster.Nodes, kindconfigv1alpha4.Node{ + Role: kindconfigv1alpha4.ControlPlaneRole, + ExtraMounts: []kindconfigv1alpha4.Mount{ + { + HostPath: c.ClusterSpec.KubeVIPpManifestPath, + ContainerPath: "/etc/kubernetes/manifests/kube-vip.yaml", + }, + { + HostPath: c.ClusterSpec.KubeletManifestPath, + ContainerPath: "/var/lib/kubelet/config.yaml", + }, + { + HostPath: c.ClusterSpec.KubeletFlagsPath, + ContainerPath: "/etc/default/kubelet", + }, + }, + }) + } + + Expect(c.provider.Create( + c.Name, + cluster.CreateWithV1Alpha4Config(kindCluster), + cluster.CreateWithRetain(true), + cluster.CreateWithStopBeforeSettingUpKubernetes(true), + cluster.CreateWithWaitForReady(2*time.Minute), + cluster.CreateWithNodeImage("public.ecr.aws/eks-anywhere/kubernetes-sigs/kind/node:v1.26.7-eks-d-1-26-16-eks-a-47"), + )).To(Succeed()) +} + +func (c *Cluster) initEtcd(ctx context.Context) { + var err error + c.Nodes, err = c.provider.ListInternalNodes(c.Name) + slices.SortFunc(c.Nodes, func(a, b nodes.Node) int { + aName := a.String() + bName := b.String() + if aName < bName { + return 1 + } else if aName > bName { + return -1 + } + + return 0 + }) + + Expect(err).NotTo(HaveOccurred()) + firstNode := c.Nodes[0] + + createCerts(firstNode) + + // We need to run all phases individually to be able to re-run the health phase + // In CI it can take longer than the 30 seconds timeout that is hardcoded in etcdadm + // If etcdadm added the option to configure this timeout, we could change this to just + // call etcdadm init to run all phases. + + flags := []string{ + "--init-system", "kubelet", + "--certs-dir", "/etc/kubernetes/pki/etcd", + "--server-cert-extra-sans", strings.Join([]string{"etcd", c.VIP, e2e.NodeIPv4(firstNode)}, ","), + "--version", "3.5.8-eks-1-26-16", + "--image-repository", "public.ecr.aws/eks-distro/etcd-io/etcd", + } + + e2e.RunInNode(firstNode, + "etcdadm", + initArgsForPhase("init", "install", flags)..., + ) + + e2e.RunInNode(firstNode, + "etcdadm", + initArgsForPhase("init", "certificates", flags)..., + ) + + e2e.RunInNode(firstNode, + "etcdadm", + initArgsForPhase("init", "snapshot", flags)..., + ) + + e2e.RunInNode(firstNode, + "etcdadm", + initArgsForPhase("init", "configure", flags)..., + ) + + e2e.RunInNode(firstNode, + "etcdadm", + initArgsForPhase("init", "start", flags)..., + ) + + e2e.RunInNode(firstNode, + "etcdadm", + initArgsForPhase("init", "etcdctl", flags)..., + ) + + Eventually(func() error { + return runInNode(firstNode, + "etcdadm", + initArgsForPhase("init", "health", flags)..., + ) + }, 3*time.Minute).Should(Succeed(), "etcd should become healthy in node in less than 3 minutes") + + e2e.RunInNode(firstNode, + "etcdadm", + initArgsForPhase("init", "post-init-instructions", flags)..., + ) + + bindEtcdListenerToAllIPs(firstNode) + + e2e.CopyFolderFromNodeToDisk(firstNode, "/etc/kubernetes/pki/etcd", c.EtcdCertsFolder) + + c.expectEtcdNodeHealthy(ctx, firstNode, 15*time.Second) +} + +func runInNode(node nodes.Node, command string, args ...string) error { + return e2e.PrintCommandOutputIfErr(node.Command(command, args...).Run()) +} + +func initArgsForPhase(command, phaseName string, flags []string) []string { + c := make([]string, 0, 3+len(flags)) + c = append(c, command, "phase", phaseName) + c = append(c, flags...) + return c +} + +func (c *Cluster) joinRestOfNodes(ctx context.Context) { + for _, node := range c.Nodes[1:] { + c.joinNode(ctx, c.Nodes[0], node) + } +} + +func (c *Cluster) joinNode(ctx context.Context, firstNode, node nodes.Node) { + nodeutils.CopyNodeToNode(firstNode, node, "/etc/kubernetes/pki/ca.crt") + nodeutils.CopyNodeToNode(firstNode, node, "/etc/kubernetes/pki/ca.key") + nodeutils.CopyNodeToNode(firstNode, node, "/etc/kubernetes/pki/etcd/ca.crt") + nodeutils.CopyNodeToNode(firstNode, node, "/etc/kubernetes/pki/etcd/ca.key") + + e2e.RunInNode(node, + "etcdadm", + "join", + "https://"+e2e.NodeIPv4(firstNode)+":2379", + "--init-system", "kubelet", + "--certs-dir", "/etc/kubernetes/pki/etcd", + "--server-cert-extra-sans", strings.Join([]string{"etcd", c.VIP, e2e.NodeIPv4(node)}, ","), + "--version", "3.5.8-eks-1-26-16", + "--image-repository", "public.ecr.aws/eks-distro/etcd-io/etcd", + ) + + bindEtcdListenerToAllIPs(node) + + c.expectEtcdNodeHealthy(ctx, node, 30*time.Second) +} + +func (c *Cluster) DeleteEtcdMember(ctx context.Context, toDelete, toKeep nodes.Node) { + // point client to the node we are keeping because we are going to use it to remove the other node + // and vip is possibly pointing to that node + client := c.newEtcdClient(e2e.NodeIPv4(toDelete)) + defer client.Close() + members, err := client.MemberList(ctx) + Expect(err).NotTo(HaveOccurred()) + c.Logger.Printf("Members: %v", members.Members) + + nodeName := toDelete.String() + for _, m := range members.Members { + if m.Name == nodeName { + c.Logger.Printf("Removing node %s with memberID %d", m.Name, m.ID) + // We need to retry this request because etcd will reject it if the + // server doesn't have recent connections to enough active members + // to protect the quorum. (active - 1) >= 1+((members-1)/2) + Eventually(func() error { + _, err := client.MemberRemove(ctx, m.ID) + return err + }).WithPolling(time.Second).WithTimeout(10*time.Second).Should( + Succeed(), "removing member should succeed once all members have connections to each other", + ) + + break + } + } + + e2e.DeleteNodes(toDelete) +} + +func (c *Cluster) Delete() { + Expect(c.provider.Delete(c.Name, "")).To(Succeed()) +} + +func startKubeletForEtcd(node nodes.Node) { + e2e.RunInNode(node, + "kubeadm", "init", "phase", "kubeconfig", "admin", "--config", "/kind/kubeadm.conf", + ) + e2e.RunInNode(node, + "kubeadm", "init", "phase", "kubelet-start", "--config", "/kind/kubeadm.conf", + ) +} + +func createCerts(node nodes.Node) { + e2e.RunInNode(node, + "kubeadm", + "init", + "phase", "certs", "ca", "--config", "/kind/kubeadm.conf", + ) + + e2e.RunInNode(node, + "kubeadm", + "init", + "phase", "certs", "etcd-ca", "--config", "/kind/kubeadm.conf", + ) +} + +func bindEtcdListenerToAllIPs(node nodes.Node) { + // There is no easy way to make etcdadm configure etcd to bind to 0.0.0.0 + // so we just manually update the manifest after it's created and restart it + // We want to listen in 0.0.0.0 so our kube-vip can connect to it. + e2e.RunInNode(node, + "sed", "-i", `s/https:\/\/.*,https:\/\/127.0.0.1:2379/https:\/\/0.0.0.0:2379/g`, "/etc/kubernetes/manifests/etcd.manifest", + ) + + e2e.StopPodInNode(node, "etcd") + + e2e.RunInNode(node, + "systemctl", "restart", "kubelet", + ) +} + +func (c *Cluster) newEtcdClient(serverIPs ...string) *clientv3.Client { + tlsInfo := transport.TLSInfo{ + TrustedCAFile: filepath.Join(c.EtcdCertsFolder, "ca.crt"), + CertFile: filepath.Join(c.EtcdCertsFolder, "etcdctl-etcd-client.crt"), + KeyFile: filepath.Join(c.EtcdCertsFolder, "etcdctl-etcd-client.key"), + } + + clientTLS, err := tlsInfo.ClientConfig() + Expect(err).NotTo(HaveOccurred()) + + endpoints := make([]string, 0, len(serverIPs)) + for _, ip := range serverIPs { + endpoints = append(endpoints, ip+":2379") + } + + client, err := clientv3.New(clientv3.Config{ + Endpoints: endpoints, + TLS: clientTLS, + DialTimeout: time.Second, + }) + Expect(err).NotTo(HaveOccurred()) + + return client +} + +func (c *Cluster) VerifyEtcdThroughVIP(ctx context.Context, timeout time.Duration) { + etcdClient := c.newEtcdClient(c.VIP) + defer etcdClient.Close() + rCtx, cancel := context.WithTimeout(ctx, timeout) + _, err := etcdClient.MemberList(rCtx) + Expect(err).NotTo(HaveOccurred()) + cancel() +} diff --git a/testing/e2e/etcd/election_test.go b/testing/e2e/etcd/election_test.go new file mode 100644 index 00000000..c415d383 --- /dev/null +++ b/testing/e2e/etcd/election_test.go @@ -0,0 +1,113 @@ +//go:build e2e +// +build e2e + +package etcd_test + +import ( + "context" + "os" + "path/filepath" + "text/template" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/format" + "k8s.io/klog/v2" + + "github.com/kube-vip/kube-vip/testing/e2e" + "github.com/kube-vip/kube-vip/testing/e2e/etcd" +) + +type testConfig struct { + logger e2e.TestLogger + kubeVipImage string + kubeVipManifestPath string + clusterName string + vip string + etcdCertsFolder string + currentDir string + cluster *etcd.Cluster +} + +func (t *testConfig) cleanup() { + if os.Getenv("E2E_PRESERVE_CLUSTER") == "true" { + return + } + + t.cluster.Delete() + Expect(os.RemoveAll(t.kubeVipManifestPath)).To(Succeed()) + Expect(os.RemoveAll(t.etcdCertsFolder)).To(Succeed()) +} + +var _ = Describe("kube-vip with etcd leader election", func() { + ctx := context.Background() + test := &testConfig{} + + AfterEach(func() { + test.cleanup() + }) + + BeforeEach(func() { + By("configuring test", func() { + var err error + format.UseStringerRepresentation = true // Otherwise error stacks have binary format. + klog.SetOutput(GinkgoWriter) + + test.clusterName = "kube-vip-etcd-test" // this needs to unique per it block + test.logger = e2e.TestLogger{} + test.etcdCertsFolder = "certs" + + test.kubeVipImage = os.Getenv("E2E_IMAGE_PATH") + + test.vip = e2e.GenerateIPv4VIP() + test.logger.Printf("Selected VIP %s", test.vip) + + test.currentDir, err = os.Getwd() + Expect(err).NotTo(HaveOccurred()) + + tempDirPath, err := os.MkdirTemp("", "kube-vip-test") + Expect(err).NotTo(HaveOccurred()) + + test.kubeVipManifestPath = filepath.Join(tempDirPath, "etcd-vip-ipv4.yaml") + manifestFile, err := os.Create(test.kubeVipManifestPath) + Expect(err).NotTo(HaveOccurred()) + defer manifestFile.Close() + + templatePath := filepath.Join(test.currentDir, "kube-etcd-vip.yaml.tmpl") + kubeVIPManifestTemplate, err := template.New("kube-etcd-vip.yaml.tmpl").ParseFiles(templatePath) + Expect(err).NotTo(HaveOccurred()) + Expect(kubeVIPManifestTemplate.Execute(manifestFile, e2e.KubevipManifestValues{ + ControlPlaneVIP: test.vip, + ImagePath: test.kubeVipImage, + })).To(Succeed()) + }) + + By("creating etcd cluster", func() { + spec := &etcd.ClusterSpec{ + Name: test.clusterName, + Nodes: 2, + VIP: test.vip, + KubeVIPImage: test.kubeVipImage, + KubeVIPpManifestPath: test.kubeVipManifestPath, + KubeletManifestPath: filepath.Join(test.currentDir, "kubelet.yaml"), + KubeletFlagsPath: filepath.Join(test.currentDir, "kubelet-flags.env"), + EtcdCertsFolder: filepath.Join(test.currentDir, test.etcdCertsFolder), + Logger: test.logger, + } + + test.cluster = etcd.CreateCluster(ctx, spec) + }) + }) + + When("an etcd node is removed", func() { + It("elects a new kube-vip leader and provides a VIP to the second node", func() { + By("removing as member and killing the first node", func() { + test.cluster.DeleteEtcdMember(ctx, test.cluster.Nodes[0], test.cluster.Nodes[1]) + }) + By("verifying etcd is up and accessible through the vip", func() { + test.cluster.VerifyEtcdThroughVIP(ctx, 40*time.Second) + }) + }) + }) +}) diff --git a/testing/e2e/etcd/etcd_suite_test.go b/testing/e2e/etcd/etcd_suite_test.go new file mode 100644 index 00000000..365a09df --- /dev/null +++ b/testing/e2e/etcd/etcd_suite_test.go @@ -0,0 +1,25 @@ +//go:build e2e +// +build e2e + +package etcd_test + +import ( + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "github.com/kube-vip/kube-vip/testing/e2e" +) + +func TestEtcd(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Etcd Suite") +} + +var _ = SynchronizedBeforeSuite( + func() { + e2e.EnsureKindNetwork() + }, + func() {}, +) diff --git a/testing/e2e/etcd/health.go b/testing/e2e/etcd/health.go new file mode 100644 index 00000000..8cbda7ea --- /dev/null +++ b/testing/e2e/etcd/health.go @@ -0,0 +1,121 @@ +//go:build e2e +// +build e2e + +package etcd + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "path/filepath" + "time" + + "github.com/kube-vip/kube-vip/testing/e2e" + . "github.com/onsi/gomega" + "github.com/pkg/errors" + "go.etcd.io/etcd/client/pkg/v3/transport" + "sigs.k8s.io/kind/pkg/cluster/nodes" +) + +func (c *Cluster) expectEtcdNodeHealthy(ctx context.Context, node nodes.Node, timeout time.Duration) { + httpClient := c.newEtcdHTTPClient() + client := c.newEtcdClient(e2e.NodeIPv4(node)) + nodeEtcdEndpoint := etcdEndpointForNode(node) + Eventually(func(g Gomega) error { + health, err := getEtcdHealth(httpClient, node) + g.Expect(err).NotTo(HaveOccurred()) + if !health.Healthy() { + c.Logger.Printf("Member %s is not healthy with reason: %s", node.String(), health.Reason) + } + g.Expect(health.Healthy()).To(BeTrue(), "member is not healthy with reason: %s", health.Reason) + statusCtx, statusCancel := context.WithTimeout(ctx, 2*time.Second) + defer statusCancel() + status, err := client.Status(statusCtx, nodeEtcdEndpoint) + g.Expect(err).NotTo(HaveOccurred()) + + g.Expect(status.Errors).To(BeEmpty(), "member should not have any errors in status") + g.Expect(status.IsLearner).To(BeFalse(), "member should not be a learner") + + alarmsCtx, alarmsCancel := context.WithTimeout(ctx, 2*time.Second) + defer alarmsCancel() + alarms, err := client.AlarmList(alarmsCtx) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(alarms.Alarms).To(BeEmpty(), "cluster should not have any alarms") + + return nil + }, timeout).Should(Succeed(), "node %s should eventually be healthy", node.String()) +} + +func (c *Cluster) newEtcdHTTPClient() *http.Client { + tlsInfo := transport.TLSInfo{ + TrustedCAFile: filepath.Join(c.EtcdCertsFolder, "ca.crt"), + CertFile: filepath.Join(c.EtcdCertsFolder, "etcdctl-etcd-client.crt"), + KeyFile: filepath.Join(c.EtcdCertsFolder, "etcdctl-etcd-client.key"), + } + + clientTLS, err := tlsInfo.ClientConfig() + Expect(err).NotTo(HaveOccurred()) + + return &http.Client{ + Timeout: 2 * time.Second, + Transport: &http.Transport{ + TLSClientConfig: clientTLS, + }, + } +} + +type etcdHealthCheckResponse struct { + Health string `json:"health"` + Reason string `json:"reason"` +} + +func (h *etcdHealthCheckResponse) Healthy() bool { + return h.Health == "true" +} + +func getEtcdHealth(c *http.Client, node nodes.Node) (*etcdHealthCheckResponse, error) { + req, err := http.NewRequest("GET", etcdHealthEndpoint(node), nil) + if err != nil { + return nil, err + } + + resp, err := c.Do(req) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusOK { + return nil, errors.Wrapf(err, "etcd member not ready, returned http status %d", resp.StatusCode) + } + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + health, err := parseEtcdHealthResponse(body) + if err != nil { + return nil, err + } + + return health, nil +} + +func etcdEndpointForNode(node nodes.Node) string { + return e2e.NodeIPv4(node) + ":2379" +} + +func etcdHealthEndpoint(node nodes.Node) string { + return fmt.Sprintf("https://%s:2379/health", e2e.NodeIPv4(node)) +} + +func parseEtcdHealthResponse(data []byte) (*etcdHealthCheckResponse, error) { + obj := &etcdHealthCheckResponse{} + if err := json.Unmarshal(data, obj); err != nil { + return nil, err + } + return obj, nil +} diff --git a/testing/e2e/etcd/kube-etcd-vip.yaml.tmpl b/testing/e2e/etcd/kube-etcd-vip.yaml.tmpl new file mode 100644 index 00000000..a5257c91 --- /dev/null +++ b/testing/e2e/etcd/kube-etcd-vip.yaml.tmpl @@ -0,0 +1,51 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: kube-vip + namespace: kube-system +spec: + containers: + - name: kube-vip + args: + - manager + - --leaderElectionType=etcd + - --etcdCACert=/etc/kubernetes/pki/etcd/ca.crt + - --etcdCert=/etc/kubernetes/pki/etcd/server.crt + - --etcdKey=/etc/kubernetes/pki/etcd/server.key + - --etcdEndpoints=127.0.0.1:2379 + env: + - name: vip_arp + value: "true" + - name: vip_interface + value: eth0 + - name: vip_leaderelection + value: "true" + - name: address + value: "{{ .ControlPlaneVIP }}" + - name: vip_leaseduration + value: "2" + - name: vip_renewdeadline + value: "3" + - name: vip_retryperiod + value: "1" + - name: cp_enable + value: "true" + - name: vip_loglevel + value: "5" + image: "{{ .ImagePath }}" + imagePullPolicy: Never + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + volumeMounts: + # force kube-vip to use CP ip from admin.conf instead of localhost + - mountPath: /etc/kubernetes/pki/etcd + name: etcd-certs + hostNetwork: true + volumes: + - hostPath: + path: /etc/kubernetes/pki/etcd + name: etcd-certs diff --git a/testing/e2e/etcd/kubelet-flags.env b/testing/e2e/etcd/kubelet-flags.env new file mode 100644 index 00000000..12833606 --- /dev/null +++ b/testing/e2e/etcd/kubelet-flags.env @@ -0,0 +1 @@ +KUBELET_EXTRA_ARGS="--kubeconfig='' --bootstrap-kubeconfig='' --container-runtime-endpoint=unix:///run/containerd/containerd.sock --node-labels= --pod-infra-container-image=registry.k8s.io/pause:3.9" \ No newline at end of file diff --git a/testing/e2e/etcd/kubelet.yaml b/testing/e2e/etcd/kubelet.yaml new file mode 100644 index 00000000..952a250d --- /dev/null +++ b/testing/e2e/etcd/kubelet.yaml @@ -0,0 +1,26 @@ +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +authentication: + anonymous: + enabled: true + webhook: + enabled: false +authorization: + mode: AlwaysAllow +enableServer: false +logging: + flushFrequency: 0 + options: + json: + infoBufferSize: "0" + verbosity: 0 +podCIDR: 10.241.1.0/24 +staticPodPath: /etc/kubernetes/manifests +cgroupDriver: systemd +cgroupRoot: /kubelet +failSwapOn: false +imageGCHighThresholdPercent: 100 +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" diff --git a/testing/e2e/ip.go b/testing/e2e/ip.go new file mode 100644 index 00000000..ad2cc70d --- /dev/null +++ b/testing/e2e/ip.go @@ -0,0 +1,133 @@ +//go:build e2e +// +build e2e + +package e2e + +import ( + "bufio" + "bytes" + "encoding/binary" + "fmt" + "io" + "net" + "os/exec" + "strings" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/onsi/gomega/gexec" + kindconfigv1alpha4 "sigs.k8s.io/kind/pkg/apis/config/v1alpha4" + "sigs.k8s.io/kind/pkg/cluster" +) + +func EnsureKindNetwork() { + By("checking if the Docker \"kind\" network exists") + cmd := exec.Command("docker", "inspect", "kind") + session, err := gexec.Start(cmd, GinkgoWriter, GinkgoWriter) + Expect(err).NotTo(HaveOccurred()) + Eventually(session).Should(gexec.Exit()) + if session.ExitCode() == 0 { + return + } + + By("Docker \"kind\" network was not found. Creating dummy Kind cluster to ensure creation") + clusterConfig := kindconfigv1alpha4.Cluster{ + Networking: kindconfigv1alpha4.Networking{ + IPFamily: kindconfigv1alpha4.IPv6Family, + }, + } + + provider := cluster.NewProvider( + cluster.ProviderWithDocker(), + ) + dummyClusterName := fmt.Sprintf("dummy-cluster-%d", time.Now().Unix()) + Expect(provider.Create( + dummyClusterName, + cluster.CreateWithV1Alpha4Config(&clusterConfig), + )).To(Succeed()) + + By("deleting dummy Kind cluster") + Expect(provider.Delete(dummyClusterName, "")) + + By("checking if the Docker \"kind\" network was successfully created") + cmd = exec.Command("docker", "inspect", "kind") + session, err = gexec.Start(cmd, GinkgoWriter, GinkgoWriter) + Expect(err).NotTo(HaveOccurred()) + Eventually(session).Should(gexec.Exit(0)) +} + +func GenerateIPv6VIP() string { + cidrs := getKindNetworkSubnetCIDRs() + + for _, cidr := range cidrs { + ip, ipNet, parseErr := net.ParseCIDR(cidr) + Expect(parseErr).NotTo(HaveOccurred()) + + if ip.To4() == nil { + lowerMask := binary.BigEndian.Uint64(ipNet.Mask[8:]) + lowerStart := binary.BigEndian.Uint64(ipNet.IP[8:]) + lowerEnd := (lowerStart & lowerMask) | (^lowerMask) + + chosenVIP := make([]byte, 16) + // Copy upper half into chosenVIP + copy(chosenVIP, ipNet.IP[0:8]) + // Copy lower half into chosenVIP + binary.BigEndian.PutUint64(chosenVIP[8:], lowerEnd-5) + return net.IP(chosenVIP).String() + } + } + Fail("Could not find any IPv6 CIDRs in the Docker \"kind\" network") + return "" +} + +func GenerateIPv4VIP() string { + cidrs := getKindNetworkSubnetCIDRs() + + for _, cidr := range cidrs { + ip, ipNet, parseErr := net.ParseCIDR(cidr) + Expect(parseErr).NotTo(HaveOccurred()) + + if ip.To4() != nil { + mask := binary.BigEndian.Uint32(ipNet.Mask) + start := binary.BigEndian.Uint32(ipNet.IP) + end := (start & mask) | (^mask) + + chosenVIP := make([]byte, 4) + binary.BigEndian.PutUint32(chosenVIP, end-5) + return net.IP(chosenVIP).String() + } + } + Fail("Could not find any IPv4 CIDRs in the Docker \"kind\" network") + return "" +} + +func GenerateDualStackVIP() string { + return GenerateIPv4VIP() + "," + GenerateIPv6VIP() +} + +func getKindNetworkSubnetCIDRs() []string { + cmd := exec.Command( + "docker", "inspect", "kind", + "--format", `{{ range $i, $a := .IPAM.Config }}{{ println .Subnet }}{{ end }}`, + ) + cmdOut := new(bytes.Buffer) + cmd.Stdout = cmdOut + Expect(cmd.Run()).To(Succeed(), "The Docker \"kind\" network was not found.") + reader := bufio.NewReader(cmdOut) + + cidrs := []string{} + for { + line, readErr := reader.ReadString('\n') + if readErr != nil && readErr != io.EOF { + Expect(readErr).NotTo(HaveOccurred(), "Error finding subnet CIDRs in the Docker \"kind\" network") + } + + cidrs = append(cidrs, strings.TrimSpace(line)) + if readErr == io.EOF { + break + } + } + + return cidrs +} diff --git a/testing/e2e/kind.go b/testing/e2e/kind.go new file mode 100644 index 00000000..59f0798d --- /dev/null +++ b/testing/e2e/kind.go @@ -0,0 +1,147 @@ +//go:build e2e +// +build e2e + +package e2e + +import ( + "bufio" + "bytes" + "fmt" + "os" + "path/filepath" + + . "github.com/onsi/gomega" + "github.com/pkg/errors" + "k8s.io/klog/v2" + "sigs.k8s.io/kind/pkg/cluster/nodes" + "sigs.k8s.io/kind/pkg/cluster/nodeutils" + "sigs.k8s.io/kind/pkg/cmd" + load "sigs.k8s.io/kind/pkg/cmd/kind/load/docker-image" + "sigs.k8s.io/kind/pkg/exec" + kindlog "sigs.k8s.io/kind/pkg/log" +) + +func DeleteNodes(n ...nodes.Node) { + Expect(deleteNodes(n...)).To(Succeed()) +} + +func deleteNodes(n ...nodes.Node) error { + if len(n) == 0 { + return nil + } + const command = "docker" + args := make([]string, 0, len(n)+3) // allocate once + args = append(args, + "rm", + "-f", // force the container to be delete now + "-v", // delete volumes + ) + for _, node := range n { + args = append(args, node.String()) + } + if err := exec.Command(command, args...).Run(); err != nil { + return errors.Wrap(err, "failed to delete nodes") + } + return nil +} + +func NodeIPv4(node nodes.Node) string { + ip, _, err := node.IP() + Expect(err).NotTo(HaveOccurred()) + return ip +} + +func LoadDockerImageToKind(logger kindlog.Logger, imagePath, clusterName string) { + loadImageCmd := load.NewCommand(logger, cmd.StandardIOStreams()) + loadImageCmd.SetArgs([]string{"--name", clusterName, imagePath}) + Expect(loadImageCmd.Execute()).To(Succeed()) +} + +func RunInNode(node nodes.Node, command string, args ...string) { + Expect(PrintCommandOutputIfErr( + node.Command(command, args...).Run(), + )).To(Succeed()) +} + +func StopPodInNode(node nodes.Node, containerName string) { + RunInNode(node, + "bash", "-c", + fmt.Sprintf( + "crictl pods --output json --name %s-%s | jq -r \".items[0].id\" | xargs crictl stopp", + containerName, + node.String(), + ), + ) +} + +func CopyFromNodeToDisk(node nodes.Node, org, dst string) { + dstFile, err := os.Create(dst) + Expect(err).NotTo(HaveOccurred()) + defer dstFile.Close() + + Expect(node.Command("cat", org).SetStdout(dstFile).Run()).To(Succeed()) +} + +func CopyFolderFromNodeToDisk(node nodes.Node, org, dst string) { + Expect(os.MkdirAll(dst, 0o755)).To(Succeed()) + + for _, file := range filesInNodeFolder(node, org) { + CopyFromNodeToDisk(node, file, filepath.Join(dst, filepath.Base(file))) + } +} + +func CopyFolderFromNodeToNode(org, dst nodes.Node, folder string) { + for _, folder := range foldersInNodeFolder(org, folder) { + CopyFolderFromNodeToNode(org, dst, folder) + } + + for _, file := range filesInNodeFolder(org, folder) { + Expect(nodeutils.CopyNodeToNode(org, dst, file)).To(Succeed()) + } +} + +func filesInNodeFolder(node nodes.Node, folder string) []string { + return commandOutputInLines( + node, + "find", folder, "-maxdepth", "1", "-mindepth", "1", "-type", "f", + ) +} + +func foldersInNodeFolder(node nodes.Node, folder string) []string { + return commandOutputInLines( + node, + "find", folder, "-maxdepth", "1", "-mindepth", "1", "-type", "d", + ) +} + +func commandOutputInLines(node nodes.Node, command string, args ...string) []string { + var linesB bytes.Buffer + Expect(node.Command( + command, args..., + ).SetStdout(&linesB).Run()).To(Succeed()) + + var lines []string + scanner := bufio.NewScanner(&linesB) + for scanner.Scan() { + if l := scanner.Text(); l != "" { + lines = append(lines, l) + } + } + Expect(scanner.Err()).To(Succeed()) + + return lines +} + +func PrintCommandOutputIfErr(err error) error { + tErr := err + for tErr != nil { + runErrP := &exec.RunError{} + runErr := &runErrP + if errors.As(tErr, runErr) { + klog.Errorf("Command failed %s:\n%s", (*runErr).Command, string((*runErr).Output)) + break + } + } + + return tErr +} diff --git a/testing/e2e/kube-vip.yaml.tmpl b/testing/e2e/kube-vip.yaml.tmpl new file mode 100644 index 00000000..dba27284 --- /dev/null +++ b/testing/e2e/kube-vip.yaml.tmpl @@ -0,0 +1,49 @@ +--- +apiVersion: v1 +kind: Pod +metadata: + name: kube-vip + namespace: kube-system +spec: + containers: + - name: kube-vip + args: + - manager + - --prometheusHTTPServer + - "" + env: + - name: vip_arp + value: "true" + - name: vip_interface + value: eth0 + - name: vip_leaderelection + value: "true" + - name: address + value: "{{ .ControlPlaneVIP }}" + - name: vip_leaseduration + value: "5" + - name: vip_renewdeadline + value: "3" + - name: vip_retryperiod + value: "1" + - name: cp_enable + value: "true" + image: "{{ .ImagePath }}" + imagePullPolicy: Never + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + volumeMounts: + - mountPath: /etc/kubernetes/admin.conf + name: kubeconfig + hostAliases: + - hostnames: + - kubernetes + ip: 127.0.0.1 + hostNetwork: true + volumes: + - hostPath: + path: "{{ .ConfigPath }}" + name: kubeconfig diff --git a/testing/e2e/logger.go b/testing/e2e/logger.go new file mode 100644 index 00000000..865ecbe8 --- /dev/null +++ b/testing/e2e/logger.go @@ -0,0 +1,43 @@ +//go:build e2e +// +build e2e + +package e2e + +import ( + "k8s.io/klog/v2" + "sigs.k8s.io/kind/pkg/log" +) + +type TestLogger struct{} + +func (t TestLogger) Warnf(format string, args ...interface{}) { + klog.Warningf(format, args...) +} + +func (t TestLogger) Warn(message string) { + klog.Warning(message) +} + +func (t TestLogger) Error(message string) { + klog.Error(message) +} + +func (t TestLogger) Errorf(format string, args ...interface{}) { + klog.Errorf(format, args...) +} + +func (t TestLogger) Printf(format string, args ...interface{}) { + klog.Infof(format, args...) +} + +func (t TestLogger) V(level log.Level) log.InfoLogger { + return TestInfoLogger{Verbose: klog.V(klog.Level(level))} +} + +type TestInfoLogger struct { + klog.Verbose +} + +func (t TestInfoLogger) Info(message string) { + t.Verbose.Info(message) +} diff --git a/testing/e2e/services/controlplane.go b/testing/e2e/services/controlplane.go new file mode 100644 index 00000000..9dcca25d --- /dev/null +++ b/testing/e2e/services/controlplane.go @@ -0,0 +1,157 @@ +package main + +import ( + "bufio" + "bytes" + "encoding/binary" + "fmt" + "html/template" + "io" + "net" + "os" + "os/exec" + "path/filepath" + "strings" + + log "github.com/sirupsen/logrus" +) + +func getKindNetworkSubnetCIDRs() ([]string, error) { + cmd := exec.Command( + "docker", "inspect", "kind", + "--format", `{{ range $i, $a := .IPAM.Config }}{{ println .Subnet }}{{ end }}`, + ) + cmdOut := new(bytes.Buffer) + cmd.Stdout = cmdOut + err := cmd.Run() + if err != nil { + return nil, err + } + reader := bufio.NewReader(cmdOut) + + cidrs := []string{} + for { + line, readErr := reader.ReadString('\n') + if readErr != nil && readErr != io.EOF { + return nil, fmt.Errorf("error finding subnet CIDRs in the Docker \"kind\" network, %s", err) + } + + cidrs = append(cidrs, strings.TrimSpace(line)) + if readErr == io.EOF { + break + } + } + + return cidrs, nil +} + +func generateIPv4VIP() (string, error) { + cidrs, err := getKindNetworkSubnetCIDRs() + if err != nil { + return "", err + } + for _, cidr := range cidrs { + ip, ipNet, parseErr := net.ParseCIDR(cidr) + if err != nil { + return "", parseErr + } + if ip.To4() != nil { + mask := binary.BigEndian.Uint32(ipNet.Mask) + start := binary.BigEndian.Uint32(ipNet.IP) + end := (start & mask) | (^mask) + + chosenVIP := make([]byte, 4) + binary.BigEndian.PutUint32(chosenVIP, end-5) + return net.IP(chosenVIP).String(), nil + } + } + return "", fmt.Errorf("could not find any IPv4 CIDRs in the Docker \"kind\" network") +} + +func generateIPv6VIP() (string, error) { + cidrs, err := getKindNetworkSubnetCIDRs() + if err != nil { + return "", err + } + for _, cidr := range cidrs { + ip, ipNet, parseErr := net.ParseCIDR(cidr) + if err != nil { + return "", parseErr + } + if ip.To4() == nil { + lowerMask := binary.BigEndian.Uint64(ipNet.Mask[8:]) + lowerStart := binary.BigEndian.Uint64(ipNet.IP[8:]) + lowerEnd := (lowerStart & lowerMask) | (^lowerMask) + + chosenVIP := make([]byte, 16) + // Copy upper half into chosenVIP + copy(chosenVIP, ipNet.IP[0:8]) + // Copy lower half into chosenVIP + binary.BigEndian.PutUint64(chosenVIP[8:], lowerEnd-5) + return net.IP(chosenVIP).String(), nil + } + } + return "", fmt.Errorf("could not find any IPv6 CIDRs in the Docker \"kind\" network") + +} + +func (config *testConfig) manifestGen() error { + curDir, err := os.Getwd() + if err != nil { + return err + } + templatePath := filepath.Join(curDir, "testing/e2e/kube-vip.yaml.tmpl") + + kubeVIPManifestTemplate, err := template.New("kube-vip.yaml.tmpl").ParseFiles(templatePath) + if err != nil { + return err + } + tempDirPath, err := os.MkdirTemp("", "kube-vip-test") + if err != nil { + return err + } + + var manifestFile *os.File + + if config.IPv6 { + config.Name = fmt.Sprintf("%s-ipv6", filepath.Base(tempDirPath)) + config.ManifestPath = filepath.Join(tempDirPath, "kube-vip-ipv6.yaml") + manifestFile, err = os.Create(config.ManifestPath) + if err != nil { + return err + } + defer manifestFile.Close() + + config.ControlPlaneAddress, err = generateIPv6VIP() + if err != nil { + return err + } + } else { + config.Name = fmt.Sprintf("%s-ipv4", filepath.Base(tempDirPath)) + config.ManifestPath = filepath.Join(tempDirPath, "kube-vip-ipv4.yaml") + manifestFile, err = os.Create(config.ManifestPath) + if err != nil { + return err + } + defer manifestFile.Close() + + config.ControlPlaneAddress, err = generateIPv4VIP() + if err != nil { + return err + } + } + log.Infof("πŸ—ƒοΈ Manifest path %s", config.ManifestPath) + err = kubeVIPManifestTemplate.Execute(manifestFile, kubevipManifestValues{ + ControlPlaneVIP: config.ControlPlaneAddress, + ImagePath: config.ImagePath, + }) + return err +} + +// func (config *testConfig) startTest(ctx context.Context, clientset *kubernetes.Clientset) error { +// if config.ControlPlaneAddress == "" { +// log.Fatal("no control plane address exists") +// } + +// return nil +// } diff --git a/testing/e2e/services/kind-config.yaml b/testing/e2e/services/kind-config.yaml new file mode 100644 index 00000000..2eaffe06 --- /dev/null +++ b/testing/e2e/services/kind-config.yaml @@ -0,0 +1,11 @@ +# three node (two workers) cluster config +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +nodes: +- role: control-plane +- role: control-plane +- role: control-plane +- role: worker +- role: worker +- role: worker + diff --git a/testing/e2e/services/kind.go b/testing/e2e/services/kind.go new file mode 100644 index 00000000..14b7cf0f --- /dev/null +++ b/testing/e2e/services/kind.go @@ -0,0 +1,182 @@ +package main + +import ( + "bytes" + "fmt" + "os/exec" + "strings" + "time" + + log "github.com/sirupsen/logrus" + kindconfigv1alpha4 "sigs.k8s.io/kind/pkg/apis/config/v1alpha4" + "sigs.k8s.io/kind/pkg/cluster" + "sigs.k8s.io/kind/pkg/cmd" + load "sigs.k8s.io/kind/pkg/cmd/kind/load/docker-image" +) + +var provider *cluster.Provider + +type kubevipManifestValues struct { + ControlPlaneVIP string + ImagePath string +} + +type nodeAddresses struct { + node string + addresses []string +} + +func (config *testConfig) createKind() error { + + clusterConfig := kindconfigv1alpha4.Cluster{ + Networking: kindconfigv1alpha4.Networking{ + IPFamily: kindconfigv1alpha4.IPv4Family, + }, + Nodes: []kindconfigv1alpha4.Node{ + { + Role: kindconfigv1alpha4.ControlPlaneRole, + }, + }, + } + if config.IPv6 { + // Change Networking Family + clusterConfig.Networking.IPFamily = kindconfigv1alpha4.IPv6Family + } + if config.Dualstack { + // Change Networking Family + clusterConfig.Networking.IPFamily = kindconfigv1alpha4.DualStackFamily + } + + if config.ControlPlane { + err := config.manifestGen() + if err != nil { + return err + } + + // Add two additional control plane nodes (3) + clusterConfig.Nodes = append(clusterConfig.Nodes, kindconfigv1alpha4.Node{Role: kindconfigv1alpha4.ControlPlaneRole}) + clusterConfig.Nodes = append(clusterConfig.Nodes, kindconfigv1alpha4.Node{Role: kindconfigv1alpha4.ControlPlaneRole}) + + // Add the extra static pod manifest + mount := kindconfigv1alpha4.Mount{ + HostPath: config.ManifestPath, + ContainerPath: "/etc/kubernetes/manifests/kube-vip.yaml", + } + for x := range clusterConfig.Nodes { + if clusterConfig.Nodes[x].Role == kindconfigv1alpha4.ControlPlaneRole { + clusterConfig.Nodes[x].ExtraMounts = append(clusterConfig.Nodes[x].ExtraMounts, mount) + } + } + } else { + // Add three additional worker nodes + clusterConfig.Nodes = append(clusterConfig.Nodes, kindconfigv1alpha4.Node{Role: kindconfigv1alpha4.WorkerRole}) + clusterConfig.Nodes = append(clusterConfig.Nodes, kindconfigv1alpha4.Node{Role: kindconfigv1alpha4.WorkerRole}) + clusterConfig.Nodes = append(clusterConfig.Nodes, kindconfigv1alpha4.Node{Role: kindconfigv1alpha4.WorkerRole}) + } + + provider = cluster.NewProvider(cluster.ProviderWithLogger(cmd.NewLogger()), cluster.ProviderWithDocker()) + clusters, err := provider.List() + if err != nil { + return err + } + found := false + for x := range clusters { + if clusters[x] == "services" { + log.Infof("Cluster already exists") + found = true + } + } + if !found { + err := provider.Create("services", cluster.CreateWithV1Alpha4Config(&clusterConfig)) + if err != nil { + return err + + } + loadImageCmd := load.NewCommand(cmd.NewLogger(), cmd.StandardIOStreams()) + loadImageCmd.SetArgs([]string{"--name", "services", config.ImagePath}) + err = loadImageCmd.Execute() + if err != nil { + return err + } + nodes, err := provider.ListNodes("services") + if err != nil { + return err + } + + // HMMM, if we want to run workloads on the control planes (todo) + if config.ControlPlane { + for x := range nodes { + cmd := exec.Command("kubectl", "taint", "nodes", nodes[x].String(), "node-role.kubernetes.io/control-plane:NoSchedule-") //nolint:all + _, _ = cmd.CombinedOutput() + } + } + + globalRange := "172.18.100.10-172.18.100.30" + if config.IPv6 { + globalRange = "fd34:70db:8529:1e3d:0000:0000:0000:0010-fd34:70db:8529:1e3d:0000:0000:0000:0030" + } + + cmd := exec.Command("kubectl", "create", "configmap", "--namespace", "kube-system", "kubevip", "--from-literal", "range-global="+globalRange) + if _, err := cmd.CombinedOutput(); err != nil { + return err + } + cmd = exec.Command("kubectl", "create", "-f", "https://raw.githubusercontent.com/kube-vip/kube-vip-cloud-provider/main/manifest/kube-vip-cloud-controller.yaml") + if _, err := cmd.CombinedOutput(); err != nil { + return err + } + cmd = exec.Command("kubectl", "create", "-f", "https://kube-vip.io/manifests/rbac.yaml") + if _, err := cmd.CombinedOutput(); err != nil { + return err + } + log.Infof("πŸ’€ sleeping for a few seconds to let controllers start") + time.Sleep(time.Second * 5) + } + return nil +} + +func deleteKind() error { + log.Info("🧽 deleting Kind cluster") + return provider.Delete("services", "") +} + +func getAddressesOnNodes() ([]nodeAddresses, error) { + nodesConfig := []nodeAddresses{} + nodes, err := provider.ListNodes("services") + if err != nil { + return nodesConfig, err + } + for x := range nodes { + var b bytes.Buffer + + exec := nodes[x].Command("hostname", "--all-ip-addresses") + exec.SetStderr(&b) + exec.SetStdin(&b) + exec.SetStdout(&b) + err = exec.Run() + if err != nil { + return nodesConfig, err + } + nodesConfig = append(nodesConfig, nodeAddresses{ + node: nodes[x].String(), + addresses: strings.Split(b.String(), " "), + }) + } + return nodesConfig, nil +} + +func checkNodesForDuplicateAddresses(nodes []nodeAddresses, address string) error { + var foundOnNode []string + // Iterate over all nodes to find addresses, where there is an address match add to array + for x := range nodes { + for y := range nodes[x].addresses { + if nodes[x].addresses[y] == address { + foundOnNode = append(foundOnNode, nodes[x].node) + } + } + } + // If one address is on multiple nodes, then something has gone wrong + if len(foundOnNode) > 1 { + return fmt.Errorf("‼️ multiple nodes [%s] have address [%s]", strings.Join(foundOnNode, " "), address) + } + return nil +} diff --git a/testing/e2e/services/kubernetes.go b/testing/e2e/services/kubernetes.go new file mode 100644 index 00000000..d97d4835 --- /dev/null +++ b/testing/e2e/services/kubernetes.go @@ -0,0 +1,309 @@ +package main + +import ( + "context" + "fmt" + "time" + + appsv1 "k8s.io/api/apps/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" + + log "github.com/sirupsen/logrus" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/watch" + watchtools "k8s.io/client-go/tools/watch" +) + +// service defines the settings for a new service +type service struct { + name string + egress bool // enable egress + policyLocal bool // set the policy to local pods + testHTTP bool + testDualstack bool // test dualstack loadbalancer services +} + +type deployment struct { + replicas int + server bool + client bool + address string + nodeAffinity string + name string +} + +func (d *deployment) createKVDs(ctx context.Context, clientset *kubernetes.Clientset, imagepath string) error { + ds := appsv1.DaemonSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "kube-vip-ds", + Namespace: "kube-system", + Labels: map[string]string{ + "app.kubernetes.io/name": "kube-vip-ds", + }, + }, + Spec: appsv1.DaemonSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app.kubernetes.io/name": "kube-vip-ds", + }, + }, + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app.kubernetes.io/name": "kube-vip-ds", + }, + }, + Spec: v1.PodSpec{ + ServiceAccountName: "kube-vip", + HostNetwork: true, + Containers: []v1.Container{ + { + Args: []string{ + "manager", + }, + Env: []v1.EnvVar{ + { + Name: "vip_arp", + Value: "true", + }, + { + Name: "vip_cidr", + Value: "32", + }, + { + Name: "svc_enable", + Value: "true", + }, + { + Name: "svc_election", + Value: "true", + }, + { + Name: "EGRESS_CLEAN", + Value: "true", + }, + { + Name: "vip_loglevel", + Value: "5", + }, + { + Name: "egress_withnftables", + Value: "true", + }, + }, + Image: imagepath, + Name: "kube-vip", + SecurityContext: &v1.SecurityContext{ + Capabilities: &v1.Capabilities{ + Add: []v1.Capability{ + "NET_ADMIN", + "NET_RAW", + }, + }, + }, + }, + }, + }, + }, + }, + } + + _, err := clientset.AppsV1().DaemonSets("kube-system").Create(ctx, &ds, metav1.CreateOptions{}) + if err != nil { + return err + } + + return nil + +} +func (d *deployment) createDeployment(ctx context.Context, clientset *kubernetes.Clientset) error { + replicas := int32(d.replicas) + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: d.name, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: &replicas, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "kube-vip", + }, + }, + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "kube-vip", + }, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "kube-vip-web", + Image: "plndr/e2e:0.0.1", + Ports: []v1.ContainerPort{ + { + Name: "http", + Protocol: v1.ProtocolTCP, + ContainerPort: 80, + }, + }, + ImagePullPolicy: v1.PullAlways, + }, + }, + }, + }, + }, + } + + if d.server { + deployment.Spec.Template.Spec.Containers[0].Env = + []v1.EnvVar{ + { + Name: "E2EMODE", + Value: "SERVER", + }, + } + } + + if d.client && d.address != "" { + deployment.Spec.Template.Spec.Containers[0].Env = + []v1.EnvVar{ + { + Name: "E2EMODE", + Value: "CLIENT", + }, + { + Name: "E2EADDRESS", + Value: d.address, + }, + } + } + + if d.nodeAffinity != "" { + deployment.Spec.Template.Spec.NodeName = d.nodeAffinity + } + + result, err := clientset.AppsV1().Deployments(v1.NamespaceDefault).Create(ctx, deployment, metav1.CreateOptions{}) + if err != nil { + return err + } + + log.Infof("πŸ“ created deployment [%s]", result.GetObjectMeta().GetName()) + return nil +} + +func (s *service) createService(ctx context.Context, clientset *kubernetes.Clientset) (currentLeader string, loadBalancerAddresses []string, err error) { + svc := &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: s.name, + Namespace: "default", + Labels: map[string]string{ + "app": "kube-vip", + }, + }, + + Spec: v1.ServiceSpec{ + Ports: []v1.ServicePort{ + { + Port: 80, + Protocol: v1.ProtocolTCP, + }, + }, + Selector: map[string]string{ + "app": "kube-vip", + }, + ClusterIP: "", + Type: v1.ServiceTypeLoadBalancer, + }, + } + + if s.egress { + svc.Annotations = map[string]string{ //kube-vip.io/egress: "true" + "kube-vip.io/egress": "true", + } + } + if s.policyLocal { + svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal + } + + if s.testDualstack { + if svc.Annotations == nil { + svc.Annotations = make(map[string]string) + } + ipv4VIP, err := generateIPv4VIP() + if err != nil { + log.Fatal(err) + } + ipv6VIP, err := generateIPv6VIP() + if err != nil { + log.Fatal(err) + } + svc.Annotations["kube-vip.io/loadbalancerIPs"] = fmt.Sprintf("%s,%s", ipv4VIP, ipv6VIP) + svc.Labels["implementation"] = "kube-vip" + svc.Spec.IPFamilies = []v1.IPFamily{v1.IPv4Protocol, v1.IPv6Protocol} + ipfPolicy := v1.IPFamilyPolicyRequireDualStack + svc.Spec.IPFamilyPolicy = &ipfPolicy + } + + log.Infof("🌍 creating service [%s]", svc.Name) + _, err = clientset.CoreV1().Services(v1.NamespaceDefault).Create(ctx, svc, metav1.CreateOptions{}) + if err != nil { + log.Fatal(err) + } + // Use a restartable watcher, as this should help in the event of etcd or timeout issues + rw, err := watchtools.NewRetryWatcher("1", &cache.ListWatch{ + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return clientset.CoreV1().Services(v1.NamespaceDefault).Watch(ctx, metav1.ListOptions{}) + }, + }) + if err != nil { + log.Fatal(err) + } + ch := rw.ResultChan() + go func() { + time.Sleep(time.Second * 10) + rw.Stop() + }() + ready := false + + // Used for tracking an active endpoint / pod + for event := range ch { + + // We need to inspect the event and get ResourceVersion out of it + switch event.Type { + case watch.Added, watch.Modified: + // log.Debugf("Endpoints for service [%s] have been Created or modified", s.service.ServiceName) + svc, ok := event.Object.(*v1.Service) + if !ok { + log.Fatalf("unable to parse Kubernetes services from API watcher") + } + if svc.Name == s.name { + if len(svc.Status.LoadBalancer.Ingress) != 0 { + for _, ingress := range svc.Status.LoadBalancer.Ingress { + loadBalancerAddresses = append(loadBalancerAddresses, ingress.IP) + } + log.Infof("πŸ”Ž found load balancer addresses [%s] on node [%s]", loadBalancerAddresses, svc.Annotations["kube-vip.io/vipHost"]) + ready = true + currentLeader = svc.Annotations["kube-vip.io/vipHost"] + } + } + default: + + } + if ready { + break + } + } + if s.testHTTP { + for _, lbAddress := range loadBalancerAddresses { + err = httpTest(lbAddress) + if err != nil { + return "", nil, fmt.Errorf("web retrieval timeout ") + + } + } + } + return currentLeader, loadBalancerAddresses, nil +} diff --git a/testing/e2e/services/services.go b/testing/e2e/services/services.go new file mode 100644 index 00000000..33b896a0 --- /dev/null +++ b/testing/e2e/services/services.go @@ -0,0 +1,608 @@ +//nolint:govet +package main + +import ( + "context" + "errors" + "fmt" + "net" + "net/http" + "os" + "time" + + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/cache" + + log "github.com/sirupsen/logrus" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/watch" + watchtools "k8s.io/client-go/tools/watch" +) + +// Methodology + +// 1. Create a deployment +// 2. Expose the deployment +func (config *testConfig) startServiceTest(ctx context.Context, clientset *kubernetes.Clientset) { + nodeTolerate := os.Getenv("NODE_TOLERATE") + + d := "kube-vip-deploy" + s := "kube-vip-service" + l := "kube-vip-deploy-leader" + + if !config.ignoreSimple { + // Simple Deployment test + log.Infof("πŸ§ͺ ---> simple deployment <---") + deploy := deployment{ + name: d, + nodeAffinity: nodeTolerate, + replicas: 2, + server: true, + } + err := deploy.createDeployment(ctx, clientset) + if err != nil { + log.Fatal(err) + } + svc := service{ + name: s, + testHTTP: true, + } + _, _, err = svc.createService(ctx, clientset) + if err != nil { + log.Error(err) + } else { + config.successCounter++ + } + + log.Infof("🧹 deleting Service [%s], deployment [%s]", s, d) + err = clientset.CoreV1().Services(v1.NamespaceDefault).Delete(ctx, s, metav1.DeleteOptions{}) + if err != nil { + log.Fatal(err) + } + err = clientset.AppsV1().Deployments(v1.NamespaceDefault).Delete(ctx, d, metav1.DeleteOptions{}) + if err != nil { + log.Fatal(err) + } + } + if !config.ignoreDeployments { + // Multiple deployment tests + log.Infof("πŸ§ͺ ---> multiple deployments <---") + deploy := deployment{ + name: l, + nodeAffinity: nodeTolerate, + replicas: 2, + server: true, + } + err := deploy.createDeployment(ctx, clientset) + if err != nil { + log.Fatal(err) + } + if err != nil { + log.Fatal(err) + } + for i := 1; i < 5; i++ { + svc := service{ + name: fmt.Sprintf("%s-%d", s, i), + testHTTP: true, + } + _, _, err = svc.createService(ctx, clientset) + if err != nil { + log.Fatal(err) + } + config.successCounter++ + } + for i := 1; i < 5; i++ { + log.Infof("🧹 deleting service [%s]", fmt.Sprintf("%s-%d", s, i)) + err = clientset.CoreV1().Services(v1.NamespaceDefault).Delete(ctx, fmt.Sprintf("%s-%d", s, i), metav1.DeleteOptions{}) + if err != nil { + log.Fatal(err) + } + } + log.Infof("🧹 deleting deployment [%s]", d) + err = clientset.AppsV1().Deployments(v1.NamespaceDefault).Delete(ctx, l, metav1.DeleteOptions{}) + if err != nil { + log.Fatal(err) + } + } + if !config.ignoreLeaderFailover { + // Failover tests + log.Infof("πŸ§ͺ ---> leader failover deployment (local policy) <---") + + deploy := deployment{ + name: d, + nodeAffinity: nodeTolerate, + replicas: 2, + server: true, + } + err := deploy.createDeployment(ctx, clientset) + if err != nil { + log.Fatal(err) + } + svc := service{ + name: s, + egress: false, + policyLocal: true, + testHTTP: true, + } + leader, lbAddresses, err := svc.createService(ctx, clientset) + if err != nil { + log.Error(err) + } + lbAddress := lbAddresses[0] + + err = leaderFailover(ctx, &s, &leader, clientset) + if err != nil { + log.Error(err) + } else { + config.successCounter++ + } + + // Get all addresses on all nodes + nodes, err := getAddressesOnNodes() + if err != nil { + log.Error(err) + } + // Make sure we don't exist in two places + err = checkNodesForDuplicateAddresses(nodes, lbAddress) + if err != nil { + log.Fatal(err) + } + + log.Infof("🧹 deleting Service [%s], deployment [%s]", s, d) + err = clientset.CoreV1().Services(v1.NamespaceDefault).Delete(ctx, s, metav1.DeleteOptions{}) + if err != nil { + log.Fatal(err) + } + + err = clientset.AppsV1().Deployments(v1.NamespaceDefault).Delete(ctx, d, metav1.DeleteOptions{}) + if err != nil { + log.Fatal(err) + } + } + + if !config.ignoreLeaderActive { + // pod Failover tests + log.Infof("πŸ§ͺ ---> active pod failover deployment (local policy) <---") + deploy := deployment{ + name: d, + nodeAffinity: nodeTolerate, + replicas: 1, + server: true, + } + err := deploy.createDeployment(ctx, clientset) + if err != nil { + log.Fatal(err) + } + svc := service{ + name: s, + policyLocal: true, + testHTTP: true, + } + leader, _, err := svc.createService(ctx, clientset) + if err != nil { + log.Error(err) + } + + err = podFailover(ctx, &s, &leader, clientset) + if err != nil { + log.Error(err) + } else { + config.successCounter++ + } + + log.Infof("🧹 deleting Service [%s], deployment [%s]", s, d) + err = clientset.CoreV1().Services(v1.NamespaceDefault).Delete(ctx, s, metav1.DeleteOptions{}) + if err != nil { + log.Fatal(err) + } + err = clientset.AppsV1().Deployments(v1.NamespaceDefault).Delete(ctx, d, metav1.DeleteOptions{}) + if err != nil { + log.Fatal(err) + } + } + if !config.ignoreLocalDeploy { + // Multiple deployment tests + log.Infof("πŸ§ͺ ---> multiple deployments (local policy) <---") + deploy := deployment{ + name: l, + nodeAffinity: nodeTolerate, + replicas: 2, + server: true, + } + err := deploy.createDeployment(ctx, clientset) + if err != nil { + log.Fatal(err) + } + for i := 1; i < 5; i++ { + svc := service{ + policyLocal: true, + name: fmt.Sprintf("%s-%d", s, i), + testHTTP: true, + } + _, lbAddresses, err := svc.createService(ctx, clientset) + if err != nil { + log.Fatal(err) + } + lbAddress := lbAddresses[0] + + config.successCounter++ + nodes, err := getAddressesOnNodes() + if err != nil { + log.Error(err) + } + err = checkNodesForDuplicateAddresses(nodes, lbAddress) + if err != nil { + log.Fatal(err) + } + } + for i := 1; i < 5; i++ { + log.Infof("🧹 deleting service [%s]", fmt.Sprintf("%s-%d", s, i)) + err = clientset.CoreV1().Services(v1.NamespaceDefault).Delete(ctx, fmt.Sprintf("%s-%d", s, i), metav1.DeleteOptions{}) + if err != nil { + log.Fatal(err) + } + } + log.Infof("🧹 deleting deployment [%s]", d) + err = clientset.AppsV1().Deployments(v1.NamespaceDefault).Delete(ctx, l, metav1.DeleteOptions{}) + if err != nil { + log.Fatal(err) + } + } + + if !config.ignoreEgress { + // pod Failover tests + log.Infof("πŸ§ͺ ---> egress IP re-write (local policy) <---") + var egress string + var found bool + // Set up a local listener + go func() { + found = tcpServer(&egress) + }() + + deploy := deployment{ + name: d, + nodeAffinity: nodeTolerate, + replicas: 1, + client: true, + } + + // Find this machines IP address + deploy.address = GetLocalIP() + if deploy.address == "" { + log.Fatalf("Unable to detect local IP address") + } + log.Infof("πŸ“  found local address [%s]", deploy.address) + // Create a deployment that connects back to this machines IP address + err := deploy.createDeployment(ctx, clientset) + if err != nil { + log.Fatal(err) + } + + svc := service{ + policyLocal: true, + name: s, + egress: true, + testHTTP: false, + } + + _, lbAddresses, err := svc.createService(ctx, clientset) + if err != nil { + log.Fatal(err) + } + egress = lbAddresses[0] + + for i := 1; i < 5; i++ { + if found { + log.Infof("πŸ•΅οΈ egress has correct IP address") + config.successCounter++ + break + } + time.Sleep(time.Second * 1) + } + + if !found { + log.Error("😱 No traffic found from loadbalancer address ") + } + log.Infof("🧹 deleting Service [%s], deployment [%s]", s, d) + err = clientset.CoreV1().Services(v1.NamespaceDefault).Delete(ctx, s, metav1.DeleteOptions{}) + if err != nil { + log.Fatal(err) + } + err = clientset.AppsV1().Deployments(v1.NamespaceDefault).Delete(ctx, d, metav1.DeleteOptions{}) + if err != nil { + log.Fatal(err) + } + } + + if !config.ignoreDualStack { + // Dualstack loadbalancer test + log.Infof("πŸ§ͺ ---> testing dualstack loadbalancer service <---") + deploy := deployment{ + name: d, + nodeAffinity: nodeTolerate, + replicas: 2, + server: true, + } + err := deploy.createDeployment(ctx, clientset) + if err != nil { + log.Fatal(err) + } + svc := service{ + name: s, + testHTTP: true, + testDualstack: true, + } + _, _, err = svc.createService(ctx, clientset) + if err != nil { + log.Error(err) + } else { + config.successCounter++ + } + + log.Infof("🧹 deleting Service [%s], deployment [%s]", s, d) + err = clientset.CoreV1().Services(v1.NamespaceDefault).Delete(ctx, s, metav1.DeleteOptions{}) + if err != nil { + log.Fatal(err) + } + err = clientset.AppsV1().Deployments(v1.NamespaceDefault).Delete(ctx, d, metav1.DeleteOptions{}) + if err != nil { + log.Fatal(err) + } + + } + + log.Infof("πŸ† Testing Complete [%d] passed", config.successCounter) +} + +func httpTest(address string) error { + log.Infof("πŸ•·οΈ testing HTTP request against [%s]", address) + Client := http.Client{ + Timeout: 2 * time.Second, + } + ip := net.ParseIP(address) + if ip == nil { + return errors.New("invalid address") + } + if ip.To4() == nil { + // use brackets for IPv6 address + address = fmt.Sprintf("[%s]", address) + } + var err error + for i := 0; i < 5; i++ { + var r *http.Response + //nolint + r, err = Client.Get(fmt.Sprintf("http://%s", address)) //nolint + + if err == nil { + log.Infof("πŸ•ΈοΈ successfully retrieved web data in [%ds]", i) + r.Body.Close() + + return nil + } + time.Sleep(time.Second * 2) + } + return err +} + +func leaderFailover(ctx context.Context, name, leaderNode *string, clientset *kubernetes.Clientset) error { + go func() { + log.Infof("πŸ’€ killing leader five times") + for i := 0; i < 5; i++ { + p, err := clientset.CoreV1().Pods("kube-system").List(ctx, metav1.ListOptions{}) + if err != nil { + log.Fatal(err) + } + + for x := range p.Items { + if p.Items[x].Spec.NodeName == *leaderNode { + if p.Items[x].Spec.Containers[0].Name == "kube-vip" { + err = clientset.CoreV1().Pods("kube-system").Delete(ctx, p.Items[x].Name, metav1.DeleteOptions{}) + if err != nil { + log.Fatal(err) + } + log.Infof("πŸ”ͺ leader pod [%s] has been deleted", p.Items[x].Name) + } + } + } + time.Sleep(time.Second * 5) + } + }() + + log.Infof("πŸ‘€ service [%s] for updates", *name) + + // Use a restartable watcher, as this should help in the event of etcd or timeout issues + rw, err := watchtools.NewRetryWatcher("1", &cache.ListWatch{ + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return clientset.CoreV1().Services(v1.NamespaceDefault).Watch(ctx, metav1.ListOptions{}) + }, + }) + if err != nil { + return err + } + ch := rw.ResultChan() + + go func() { + time.Sleep(time.Second * 30) + rw.Stop() + }() + + // Used for tracking an active endpoint / pod + for event := range ch { + // We need to inspect the event and get ResourceVersion out of it + switch event.Type { + case watch.Added: + // log.Debugf("Endpoints for service [%s] have been Created or modified", s.service.ServiceName) + svc, ok := event.Object.(*v1.Service) + if !ok { + log.Fatalf("unable to parse Kubernetes services from API watcher") + } + if svc.Name == *name { + if len(svc.Status.LoadBalancer.Ingress) != 0 { + log.Infof("πŸ”Ž found load balancer address [%s] on node [%s]", svc.Status.LoadBalancer.Ingress[0].IP, svc.Annotations["kube-vip.io/vipHost"]) + } + } + case watch.Modified: + svc, ok := event.Object.(*v1.Service) + if !ok { + log.Fatalf("unable to parse Kubernetes services from API watcher") + } + if svc.Name == *name { + if len(svc.Status.LoadBalancer.Ingress) != 0 { + log.Infof("πŸ” updated with address [%s] on node [%s]", svc.Status.LoadBalancer.Ingress[0].IP, svc.Annotations["kube-vip.io/vipHost"]) + err = httpTest(svc.Status.LoadBalancer.Ingress[0].IP) + if err != nil { + return err + } + *leaderNode = svc.Annotations["kube-vip.io/vipHost"] + } + } + default: + + } + } + return nil +} + +func podFailover(ctx context.Context, name, leaderNode *string, clientset *kubernetes.Clientset) error { + go func() { + log.Infof("πŸ’€ killing active pod five times") + for i := 0; i < 5; i++ { + p, err := clientset.CoreV1().Pods(v1.NamespaceDefault).List(ctx, metav1.ListOptions{}) + if err != nil { + log.Fatal(err) + } + found := false + for x := range p.Items { + if p.Items[x].Spec.NodeName == *leaderNode { + if p.Items[x].Spec.Containers[0].Name == "kube-vip-web" { + found = true + err = clientset.CoreV1().Pods(v1.NamespaceDefault).Delete(ctx, p.Items[x].Name, metav1.DeleteOptions{}) + if err != nil { + log.Fatal(err) + } + log.Infof("πŸ”ͺ active pod [%s] on [%s] has been deleted", p.Items[x].Name, p.Items[x].Spec.NodeName) + } + } + } + if !found { + log.Warnf("😱 No Pod found on [%s]", *leaderNode) + } + time.Sleep(time.Second * 5) + } + }() + + log.Infof("πŸ‘€ service [%s] for updates", *name) + + // Use a restartable watcher, as this should help in the event of etcd or timeout issues + rw, err := watchtools.NewRetryWatcher("1", &cache.ListWatch{ + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + return clientset.CoreV1().Services(v1.NamespaceDefault).Watch(ctx, metav1.ListOptions{}) + }, + }) + if err != nil { + return err + } + ch := rw.ResultChan() + + go func() { + time.Sleep(time.Second * 30) + rw.Stop() + }() + + // Used for tracking an active endpoint / pod + for event := range ch { + // We need to inspect the event and get ResourceVersion out of it + switch event.Type { + case watch.Added: + // log.Debugf("Endpoints for service [%s] have been Created or modified", s.service.ServiceName) + svc, ok := event.Object.(*v1.Service) + if !ok { + log.Fatalf("unable to parse Kubernetes services from API watcher") + } + if svc.Name == *name { + if len(svc.Status.LoadBalancer.Ingress) != 0 { + log.Infof("πŸ”Ž found load balancer address [%s] on node [%s]", svc.Status.LoadBalancer.Ingress[0].IP, svc.Annotations["kube-vip.io/vipHost"]) + } + } + case watch.Modified: + svc, ok := event.Object.(*v1.Service) + if !ok { + log.Fatalf("unable to parse Kubernetes services from API watcher") + } + if svc.Name == *name { + if len(svc.Status.LoadBalancer.Ingress) != 0 { + log.Infof("πŸ” updated with address [%s] on node [%s]", svc.Status.LoadBalancer.Ingress[0].IP, svc.Annotations["kube-vip.io/vipHost"]) + err = httpTest(svc.Status.LoadBalancer.Ingress[0].IP) + if err != nil { + log.Fatal(err) + } + *leaderNode = svc.Annotations["kube-vip.io/vipHost"] + } + } + default: + + } + } + return nil +} + +func tcpServer(egressAddress *string) bool { + listen, err := net.Listen("tcp", ":12345") //nolint + if err != nil { + log.Error(err) + } + // close listener + go func() { + time.Sleep(time.Second * 10) + listen.Close() + }() + for { + conn, err := listen.Accept() + if err != nil { + return false + // log.Fatal(err) + } + remoteAddress, _, _ := net.SplitHostPort(conn.RemoteAddr().String()) + if remoteAddress == *egressAddress { + log.Infof("πŸ“ž πŸ‘ incoming from egress Address [%s]", remoteAddress) + return true + } + log.Infof("πŸ“ž πŸ‘Ž incoming from pod address [%s]", remoteAddress) + go handleRequest(conn) + } +} + +func handleRequest(conn net.Conn) { + // incoming request + buffer := make([]byte, 1024) + _, err := conn.Read(buffer) + if err != nil { + log.Error(err) + } + // write data to response + time := time.Now().Format(time.ANSIC) + responseStr := fmt.Sprintf("Your message is: %v. Received time: %v", string(buffer[:]), time) + _, err = conn.Write([]byte(responseStr)) + if err != nil { + log.Error(err) + } + // close conn + conn.Close() +} + +func GetLocalIP() string { + addrs, err := net.InterfaceAddrs() + if err != nil { + return "" + } + for _, address := range addrs { + // check the address type and if it is not a loopback the display it + if ipnet, ok := address.(*net.IPNet); ok && !ipnet.IP.IsLoopback() { + if ipnet.IP.To4() != nil { + return ipnet.IP.String() + } + } + } + return "" +} diff --git a/testing/e2e/services/tests.go b/testing/e2e/services/tests.go new file mode 100644 index 00000000..96f207a9 --- /dev/null +++ b/testing/e2e/services/tests.go @@ -0,0 +1,131 @@ +package main + +import ( + "context" + "flag" + "os" + "path/filepath" + + "github.com/kube-vip/kube-vip/pkg/k8s" + log "github.com/sirupsen/logrus" +) + +type testConfig struct { + successCounter int + + ImagePath string + + ControlPlane bool + // control plane settings + Name string + ControlPlaneAddress string + ManifestPath string + IPv6 bool + Dualstack bool + + Services bool + // service tests + ignoreSimple bool + ignoreDeployments bool + ignoreLeaderFailover bool + ignoreLeaderActive bool + ignoreLocalDeploy bool + ignoreDualStack bool + ignoreEgress bool + retainCluster bool +} + +func main() { + var t testConfig + + t.ImagePath = os.Getenv("E2E_IMAGE_PATH") + + _, t.ignoreSimple = os.LookupEnv("IGNORE_SIMPLE") + _, t.ignoreDeployments = os.LookupEnv("IGNORE_DEPLOY") + _, t.ignoreLeaderFailover = os.LookupEnv("IGNORE_LEADER") + _, t.ignoreLeaderActive = os.LookupEnv("IGNORE_ACTIVE") + _, t.ignoreLocalDeploy = os.LookupEnv("IGNORE_LOCALDEPLOY") + _, t.ignoreEgress = os.LookupEnv("IGNORE_EGRESS") + _, t.ignoreDualStack = os.LookupEnv("IGNORE_DUALSTACK") + _, t.retainCluster = os.LookupEnv("RETAIN_CLUSTER") + _, t.IPv6 = os.LookupEnv("IPV6_FAMILY") + + flag.StringVar(&t.ImagePath, "imagepath", "plndr/kube-vip:action", "") + flag.BoolVar(&t.ControlPlane, "ControlPlane", false, "") + flag.BoolVar(&t.Services, "Services", false, "") + + flag.Parse() + + log.Infof("πŸ”¬ beginning e2e tests, image: [%s]", t.ImagePath) + + if !t.ignoreDualStack { + t.Dualstack = true + } + + if t.ControlPlane { + err := t.createKind() + if !t.retainCluster { + if err != nil { + log.Fatal(err) + } + defer func() { + err := deleteKind() + if err != nil { + log.Fatal(err) + } + }() + } else { + if err != nil { + log.Warn(err) + } + } + // ctx, cancel := context.WithCancel(context.TODO()) + // defer cancel() + // homeConfigPath := filepath.Join(os.Getenv("HOME"), ".kube", "config") + // clientset, err := k8s.NewClientset(homeConfigPath, false, "") + // if err != nil { + // log.Fatalf("could not create k8s clientset from external file: %q: %v", homeConfigPath, err) + // } + // log.Debugf("Using external Kubernetes configuration from file [%s]", homeConfigPath) + // err = t.startTest(ctx, clientset) + // if err != nil { + // log.Fatal(err) + // } + } + + if t.Services { + err := t.createKind() + if !t.retainCluster { + if err != nil { + log.Fatal(err) + } + defer func() { + err := deleteKind() + if err != nil { + log.Fatal(err) + } + }() + } else { + if err != nil { + log.Warn(err) + } + } + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + homeConfigPath := filepath.Join(os.Getenv("HOME"), ".kube", "config") + clientset, err := k8s.NewClientset(homeConfigPath, false, "") + if err != nil { + log.Fatalf("could not create k8s clientset from external file: %q: %v", homeConfigPath, err) + } + log.Debugf("Using external Kubernetes configuration from file [%s]", homeConfigPath) + + // Deplopy the daemonset for kube-vip + deploy := deployment{} + err = deploy.createKVDs(ctx, clientset, t.ImagePath) + if err != nil { + log.Error(err) + } + t.startServiceTest(ctx, clientset) + } + +} diff --git a/testing/e2e/template.go b/testing/e2e/template.go new file mode 100644 index 00000000..4b9abe92 --- /dev/null +++ b/testing/e2e/template.go @@ -0,0 +1,10 @@ +//go:build e2e +// +build e2e + +package e2e + +type KubevipManifestValues struct { + ControlPlaneVIP string + ImagePath string + ConfigPath string +} diff --git a/testing/k3s/create.sh b/testing/k3s/create.sh new file mode 100755 index 00000000..b9ebf0c8 --- /dev/null +++ b/testing/k3s/create.sh @@ -0,0 +1,59 @@ +#!/bin/bash + +set -e + +# Read node configuration +source ./testing/nodes + +# Read logging function +source ./testing/logging.bash + +## Main() + +# Ensure we have an entirely new logfile +reset_logfile + +logr "INFO" "Starting kube-vip.io testing with k3s" +logr "DEFAULT" "Creating Logfile $logfile" + +if [[ -z $1 && -z $2 && -z $3 && -z $4 ]]; then + echo "Usage:" + echo " Param 1: Kube-Vip Version" + echo " Param 2: Kube-Vip mode [\"controlplane\"/\"services\"/\"hybrid\"]" + echo " Param 3: Vip address" + echo " Param 4: k3s url (https://github.com/k3s-io/k3s/releases/download/v1.20.4%2Bk3s1/k3s)" + echo "" ./create.sh 0.3.3 hybrid 192.168.0.40 + exit 1 +fi + +# Sane variable renaming +kubernetes_version=$4 +kube_vip_version=$1 +kube_vip_vip=$3 + +case "$2" in +"controlplane") logr "INFO" "Creating in control plane only mode" + kube_vip_mode="--controlplane" + ;; +"services") logr "INFO" "Creating in services-only mode" + kube_vip_mode="--services" + ;; +"hybrid") logr "INFO" "Creating in hybrid mode" + kube_vip_mode="--controlplane --services" + ;; +*) echo "Unknown kube-vip mode [$3]" + exit -1 + ;; +esac + +ssh $NODE01 "sudo mkdir -p /var/lib/rancher/k3s/server/manifests/" +ssh $NODE01 "sudo docker run --network host --rm plndr/kube-vip:$kube_vip_version manifest daemonset $kube_vip_mode --interface ens160 --vip $kube_vip_vip --arp --leaderElection --inCluster --taint | sudo tee /var/lib/rancher/k3s/server/manifests/vip.yaml" +ssh $NODE01 "sudo curl https://kube-vip.io/manifests/rbac.yaml | sudo tee /var/lib/rancher/k3s/server/manifests/rbac.yaml" +ssh $NODE01 "sudo screen -dmSL k3s k3s server --cluster-init --tls-san $kube_vip_vip --no-deploy servicelb --disable-cloud-controller --token=test" +echo "Started first node, sleeping for 60 seconds" +sleep 60 +echo "Adding additional nodes" +ssh $NODE02 "sudo screen -dmSL k3s k3s server --server https://$kube_vip_vip:6443 --token=test" +ssh $NODE03 "sudo screen -dmSL k3s k3s server --server https://$kube_vip_vip:6443 --token=test" +sleep 20 +ssh $NODE01 "sudo k3s kubectl get node -o wide" diff --git a/testing/k3s/teardown.sh b/testing/k3s/teardown.sh new file mode 100755 index 00000000..070260b5 --- /dev/null +++ b/testing/k3s/teardown.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +source ./testing/nodes + +echo "Wiping Nodes in reverse order, and rebooting" +ssh $NODE05 "sudo pkill k3s; sudo rm -rf /var/lib/rancher /etc/rancher; sudo reboot" +ssh $NODE04 "sudo pkill k3s; sudo rm -rf /var/lib/rancher /etc/rancher; sudo reboot" +ssh $NODE03 "sudo pkill k3s; sudo rm -rf /var/lib/rancher /etc/rancher; sudo reboot" +ssh $NODE02 "sudo pkill k3s; sudo rm -rf /var/lib/rancher /etc/rancher; sudo reboot" +ssh $NODE01 "sudo pkill k3s; sudo rm -rf /var/lib/rancher /etc/rancher; sudo reboot" +echo +echo "All Control Plane Nodes have been reset" +echo "Consider removing kube-vip images if changing version" \ No newline at end of file diff --git a/testing/kubeadm/create.sh b/testing/kubeadm/create.sh new file mode 100755 index 00000000..6e346386 --- /dev/null +++ b/testing/kubeadm/create.sh @@ -0,0 +1,114 @@ +#!/bin/bash +set -e + +# Read node configuration +source ./testing/nodes + +# Read logging function +source ./testing/logging.bash + +install_deps() { + echo "Installing Kubernetes dependencies for Kubernetes $kubernetes_version on all nodes" + ssh $NODE01 "curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add && sudo apt-add-repository \"deb http://apt.kubernetes.io/ kubernetes-xenial main\" && sudo apt-get update -q && sudo apt-get install -qy --allow-downgrades kubelet=$kubernetes_version-00 kubectl=$kubernetes_version-00 kubeadm=$kubernetes_version-00" + ssh $NODE02 "curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add && sudo apt-add-repository \"deb http://apt.kubernetes.io/ kubernetes-xenial main\" && sudo apt-get update -q && sudo apt-get install -qy --allow-downgrades kubelet=$kubernetes_version-00 kubectl=$kubernetes_version-00 kubeadm=$kubernetes_version-00" + ssh $NODE03 "curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add && sudo apt-add-repository \"deb http://apt.kubernetes.io/ kubernetes-xenial main\" && sudo apt-get update -q && sudo apt-get install -qy --allow-downgrades kubelet=$kubernetes_version-00 kubectl=$kubernetes_version-00 kubeadm=$kubernetes_version-00" + ssh $NODE04 "curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add && sudo apt-add-repository \"deb http://apt.kubernetes.io/ kubernetes-xenial main\" && sudo apt-get update -q && sudo apt-get install -qy --allow-downgrades kubelet=$kubernetes_version-00 kubectl=$kubernetes_version-00 kubeadm=$kubernetes_version-00" + ssh $NODE05 "curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add && sudo apt-add-repository \"deb http://apt.kubernetes.io/ kubernetes-xenial main\" && sudo apt-get update -q && sudo apt-get install -qy --allow-downgrades kubelet=$kubernetes_version-00 kubectl=$kubernetes_version-00 kubeadm=$kubernetes_version-00" +} + +first_node() { + logr "INFO" "Creating First node!" + #ssh $NODE01 "sudo modprobe ip_vs_rr" + #ssh $NODE01 "sudo modprobe nf_conntrack" + logr "INFO" "$(ssh $NODE01 "docker rmi ghcr.io/kube-vip/kube-vip:$kube_vip_version" 2>&1)" + + # echo "echo "ip_vs | tee -a /etc/modules" + logr "INFO" "Creating Kube-vip.io Manifest" + ssh $NODE01 "sudo docker run --network host --rm ghcr.io/kube-vip/kube-vip:$kube_vip_version manifest pod --interface ens160 --vip $kube_vip_vip --arp --leaderElection --enableLoadBalancer $kube_vip_mode | sed \"s/image:.*/image: plndr\/kube-vip:$kube_vip_version/\" | sudo tee /etc/kubernetes/manifests/vip.yaml" >> $logfile + logr "INFO" "Deploying first Kubernetes node $NODE01" + FIRST_NODE=$(ssh $NODE01 "sudo kubeadm init --kubernetes-version $kubernetes_version --control-plane-endpoint $kube_vip_vip --upload-certs --pod-network-cidr=10.0.0.0/16") + echo "$FIRST_NODE" >> $logfile + CONTROLPLANE_CMD=$(echo "$FIRST_NODE" | grep -m1 certificate-key) + #CONTROLPLANE_CMD=$(ssh $NODE01 "sudo kubeadm init --kubernetes-version $kubernetes_version --control-plane-endpoint $kube_vip_vip --upload-certs --pod-network-cidr=10.0.0.0/16 | grep -m1 certificate-key") + ssh $NODE01 "sudo rm -rf ~/.kube/" + ssh $NODE01 "mkdir -p .kube" + ssh $NODE01 "sudo cp -i /etc/kubernetes/admin.conf .kube/config" + ssh $NODE01 "sudo chown dan:dan .kube/config" + logr "INFO" "Enabling strict ARP on kube-proxy" + ssh $NODE01 "kubectl get configmap kube-proxy -n kube-system -o yaml | sed -e \"s/strictARP: false/strictARP: true/\" | kubectl apply -f - -n kube-system" + ssh $NODE01 "kubectl describe configmap -n kube-system kube-proxy | grep strictARP" + logr "INFO" "Deploying Calico to the Kubernetes Cluster" + ssh $NODE01 "kubectl apply -f https://docs.projectcalico.org/manifests/calico.yaml" >> $logfile + logr "INFO" "Retrieving Join command" + JOIN_CMD=$(ssh $NODE01 " sudo kubeadm token create --print-join-command 2> /dev/null") +} + + +additional_controlplane() { + logr "INFO" "Adding $NODE02" + ssh $NODE02 "sudo $JOIN_CMD $CONTROLPLANE_CMD" >> $logfile + sleep 1 + ssh $NODE02 "sudo docker run --network host --rm ghcr.io/kube-vip/kube-vip:$kube_vip_version manifest pod --interface ens160 --vip $kube_vip_vip --arp --leaderElection --enableLoadBalancer $kube_vip_mode | sed \"s/image:.*/image: plndr\/kube-vip:$kube_vip_version/\"| sudo tee /etc/kubernetes/manifests/vip.yaml" >> $logfile + logr "INFO" "Adding $NODE03" + ssh $NODE03 "sudo $JOIN_CMD $CONTROLPLANE_CMD" >> $logfile + sleep 1 + ssh $NODE03 "sudo docker run --network host --rm ghcr.io/kube-vip/kube-vip:$kube_vip_version manifest pod --interface ens160 --vip $kube_vip_vip --arp --leaderElection --enableLoadBalancer $kube_vip_mode | sed \"s/image:.*/image: plndr\/kube-vip:$kube_vip_version/\"| sudo tee /etc/kubernetes/manifests/vip.yaml" >> $logfile +} + +## Main() + +# Ensure we have an entirely new logfile +reset_logfile + +logr "INFO" "Starting kube-vip.io testing with Kubeadm" +logr "DEFAULT" "Creating Logfile $logfile" + +if [[ -z $1 && -z $2 && -z $3 && -z $4 ]]; then + echo "Usage:" + echo " Param 1: Kubernetes Version" + echo " Param 2: Kube-Vip Version" + echo " Param 3: Kube-Vip mode [\"controlplane\"/\"services\"/\"hybrid\"]" + echo " Param 4: Vip address" + echo "" + echo "" ./create.sh 1.18.5 0.4.0 hybrid 192.168.0.40 + exit 1 +fi + +# Sane variable renaming +kubernetes_version=$1 +kube_vip_version=$2 +kube_vip_vip=$4 + +case "$3" in +"controlplane") logr "INFO" "Creating in control plane only mode" + kube_vip_mode="--controlplane" + ;; +"services") logr "INFO" "Creating in services-only mode" + kube_vip_mode="--services" + ;; +"hybrid") logr "INFO" "Creating in hybrid mode" + kube_vip_mode="--controlplane --services" + ;; +*) echo "Unknown kube-vip mode [$3]" + exit -1 + ;; +esac + +if [[ -z "$DEPS" ]]; then + logr "INFO" "Installing specific version of Kubernetes Dependencies" + install_deps +fi + +first_node +additional_controlplane +logr "INFO" "Adding $NODE04" +ssh $NODE04 "sudo $JOIN_CMD" >> $logfile +logr "INFO" "Adding $NODE05" +ssh $NODE05 "sudo $JOIN_CMD" >> $logfile +logr "DEFAULT" "Nodes should be deployed at this point, waiting 5 secs and querying the deployment" +echo +sleep 5 +ssh $NODE01 "kubectl get nodes" | tee >> $logfile +ssh $NODE01 "kubectl get pods -A" | tee >> $logfile +echo +logr "INFO" "Kubernetes: $kubernetes_version, Kube-vip $kube_vip_version, Advertising VIP: $kube_vip_vip" diff --git a/testing/kubeadm/create_ctr.sh b/testing/kubeadm/create_ctr.sh new file mode 100755 index 00000000..b366321a --- /dev/null +++ b/testing/kubeadm/create_ctr.sh @@ -0,0 +1,116 @@ +#!/bin/bash +set -e + +# Read node configuration +source ./testing/nodes + +# Read logging function +source ./testing/logging.bash + +install_deps() { + echo "Installing Kubernetes dependencies for Kubernetes $kubernetes_version on all nodes" + ssh $NODE01 "sudo rm /etc/apt/sources.list.d/* && curl -4 -s -L https://dl.k8s.io/apt/doc/apt-key.gpg | sudo apt-key add && sudo apt-add-repository \"deb http://apt.kubernetes.io/ kubernetes-xenial main\" && sudo apt-get update -q && sudo apt-get install -qy --allow-downgrades kubelet=$kubernetes_version-00 kubectl=$kubernetes_version-00 kubeadm=$kubernetes_version-00" + ssh $NODE02 "sudo rm /etc/apt/sources.list.d/* && curl -4 -s -L https://dl.k8s.io/apt/doc/apt-key.gpg | sudo apt-key add && sudo apt-add-repository \"deb http://apt.kubernetes.io/ kubernetes-xenial main\" && sudo apt-get update -q && sudo apt-get install -qy --allow-downgrades kubelet=$kubernetes_version-00 kubectl=$kubernetes_version-00 kubeadm=$kubernetes_version-00" + ssh $NODE03 "sudo rm /etc/apt/sources.list.d/* && curl -4 -s -L https://dl.k8s.io/apt/doc/apt-key.gpg | sudo apt-key add && sudo apt-add-repository \"deb http://apt.kubernetes.io/ kubernetes-xenial main\" && sudo apt-get update -q && sudo apt-get install -qy --allow-downgrades kubelet=$kubernetes_version-00 kubectl=$kubernetes_version-00 kubeadm=$kubernetes_version-00" + ssh $NODE04 "sudo rm /etc/apt/sources.list.d/* && curl -4 -s -L https://dl.k8s.io/apt/doc/apt-key.gpg | sudo apt-key add && sudo apt-add-repository \"deb http://apt.kubernetes.io/ kubernetes-xenial main\" && sudo apt-get update -q && sudo apt-get install -qy --allow-downgrades kubelet=$kubernetes_version-00 kubectl=$kubernetes_version-00 kubeadm=$kubernetes_version-00" + ssh $NODE05 "sudo rm /etc/apt/sources.list.d/* && curl -4 -s -L https://dl.k8s.io/apt/doc/apt-key.gpg | sudo apt-key add && sudo apt-add-repository \"deb http://apt.kubernetes.io/ kubernetes-xenial main\" && sudo apt-get update -q && sudo apt-get install -qy --allow-downgrades kubelet=$kubernetes_version-00 kubectl=$kubernetes_version-00 kubeadm=$kubernetes_version-00" +} + +first_node() { + logr "INFO" "Creating First node!" + #ssh $NODE01 "sudo modprobe ip_vs_rr" + #ssh $NODE01 "sudo modprobe nf_conntrack" + logr "INFO" "$(ssh $NODE01 "ctr images rm ghcr.io/kube-vip/kube-vip:$kube_vip_version" 2>&1)" + + # echo "echo "ip_vs | tee -a /etc/modules" + logr "INFO" "Creating Kube-vip.io Manifest" + + ssh $NODE01 "sudo ctr image pull ghcr.io/kube-vip/kube-vip:$kube_vip_version" + ssh $NODE01 "sudo mkdir -p /etc/kubernetes/manifests/; sudo ctr run --rm --net-host ghcr.io/kube-vip/kube-vip:$kube_vip_version vip /kube-vip manifest pod --interface ens160 --vip $kube_vip_vip --arp --leaderElection --enableLoadBalancer $kube_vip_mode | sed \"s/image:.*/image: plndr\/kube-vip:$kube_vip_version/\" | sudo tee /etc/kubernetes/manifests/vip.yaml" >> $logfile + logr "INFO" "Deploying first Kubernetes node $NODE01" + FIRST_NODE=$(ssh $NODE01 "sudo kubeadm init --kubernetes-version $kubernetes_version --control-plane-endpoint $kube_vip_vip --upload-certs --pod-network-cidr=10.0.0.0/16") + echo "$FIRST_NODE" >> $logfile + CONTROLPLANE_CMD=$(echo "$FIRST_NODE" | grep -m1 certificate-key) + #CONTROLPLANE_CMD=$(ssh $NODE01 "sudo kubeadm init --kubernetes-version $kubernetes_version --control-plane-endpoint $kube_vip_vip --upload-certs --pod-network-cidr=10.0.0.0/16 | grep -m1 certificate-key") + ssh $NODE01 "sudo rm -rf ~/.kube/" + ssh $NODE01 "mkdir -p .kube" + ssh $NODE01 "sudo cp -i /etc/kubernetes/admin.conf .kube/config" + ssh $NODE01 "sudo chown dan:dan .kube/config" + logr "INFO" "Enabling strict ARP on kube-proxy" + ssh $NODE01 "kubectl get configmap kube-proxy -n kube-system -o yaml | sed -e \"s/strictARP: false/strictARP: true/\" | kubectl apply -f - -n kube-system" + ssh $NODE01 "kubectl describe configmap -n kube-system kube-proxy | grep strictARP" + logr "INFO" "Deploying Calico to the Kubernetes Cluster" + ssh $NODE01 "kubectl apply -f https://docs.projectcalico.org/manifests/calico.yaml" >> $logfile + logr "INFO" "Retrieving Join command" + JOIN_CMD=$(ssh $NODE01 "kubeadm token create --print-join-command 2> /dev/null") +} + + +additional_controlplane() { + logr "INFO" "Adding $NODE02" + ssh $NODE02 "sudo $JOIN_CMD $CONTROLPLANE_CMD" >> $logfile + sleep 1 + ssh $NODE02 "sudo ctr image pull ghcr.io/kube-vip/kube-vip:$kube_vip_version; sudo mkdir -p /etc/kubernetes/manifests/; sudo ctr run --rm --net-host ghcr.io/kube-vip/kube-vip:$kube_vip_version vip /kube-vip manifest pod --interface ens160 --vip $kube_vip_vip --arp --leaderElection --enableLoadBalancer $kube_vip_mode | sed \"s/image:.*/image: plndr\/kube-vip:$kube_vip_version/\"| sudo tee /etc/kubernetes/manifests/vip.yaml" >> $logfile + logr "INFO" "Adding $NODE03" + ssh $NODE03 "sudo $JOIN_CMD $CONTROLPLANE_CMD" >> $logfile + sleep 1 + ssh $NODE03 "sudo ctr image pull ghcr.io/kube-vip/kube-vip:$kube_vip_version; sudo mkdir -p /etc/kubernetes/manifests/; sudo ctr run --rm --net-host ghcr.io/kube-vip/kube-vip:$kube_vip_version vip /kube-vip manifest pod --interface ens160 --vip $kube_vip_vip --arp --leaderElection --enableLoadBalancer $kube_vip_mode | sed \"s/image:.*/image: plndr\/kube-vip:$kube_vip_version/\"| sudo tee /etc/kubernetes/manifests/vip.yaml" >> $logfile +} + +## Main() + +# Ensure we have an entirely new logfile +reset_logfile + +logr "INFO" "Starting kube-vip.io testing with Kubeadm" +logr "DEFAULT" "Creating Logfile $logfile" + +if [[ -z $1 && -z $2 && -z $3 && -z $4 ]]; then + echo "Usage:" + echo " Param 1: Kubernetes Version" + echo " Param 2: Kube-Vip Version" + echo " Param 3: Kube-Vip mode [\"controlplane\"/\"services\"/\"hybrid\"]" + echo " Param 4: Vip address" + echo "" + echo "" ./create.sh 1.18.5 0.4.0 hybrid 192.168.0.40 + exit 1 +fi + +# Sane variable renaming +kubernetes_version=$1 +kube_vip_version=$2 +kube_vip_vip=$4 + +case "$3" in +"controlplane") logr "INFO" "Creating in control plane only mode" + kube_vip_mode="--controlplane" + ;; +"services") logr "INFO" "Creating in services-only mode" + kube_vip_mode="--services" + ;; +"hybrid") logr "INFO" "Creating in hybrid mode" + kube_vip_mode="--controlplane --services" + ;; +*) echo "Unknown kube-vip mode [$3]" + exit -1 + ;; +esac + +if [[ -z "$DEPS" ]]; then + logr "INFO" "Installing specific version of Kubernetes Dependencies" + install_deps +fi + +first_node +additional_controlplane +logr "INFO" "Adding $NODE04" +ssh $NODE04 "sudo $JOIN_CMD" >> $logfile +logr "INFO" "Adding $NODE05" +ssh $NODE05 "sudo $JOIN_CMD" >> $logfile +logr "DEFAULT" "Nodes should be deployed at this point, waiting 5 secs and querying the deployment" +echo +sleep 5 +ssh $NODE01 "kubectl get nodes" | tee >> $logfile +ssh $NODE01 "kubectl get pods -A" | tee >> $logfile +echo +logr "INFO" "Kubernetes: $kubernetes_version, Kube-vip $kube_vip_version, Advertising VIP: $kube_vip_vip" diff --git a/testing/kubeadm/service.sh b/testing/kubeadm/service.sh new file mode 100755 index 00000000..8ac810a0 --- /dev/null +++ b/testing/kubeadm/service.sh @@ -0,0 +1,34 @@ +#!/bin/bash +set -e + +# Read node configuration +source ./testing/nodes + +# Read logging function +source ./testing/logging.bash + + +logr "INFO" "Starting kube-vip.io service testing with Kubeadm" +logr "DEFAULT" "Creating Logfile $logfile" + +# Adding Controller +logr "INFO" "Creating network range configmap" +ssh $NODE01 "kubectl create configmap -n kube-system kubevip --from-literal range-global=192.168.0.220-192.168.0.222" >> $logfile + +logr "INFO" "Deploying kube-vip.io Controller" +ssh $NODE01 "kubectl apply -f https://raw.githubusercontent.com/kube-vip/kube-vip-cloud-provider/main/manifest/kube-vip-cloud-controller.yaml" >> $logfile + +logr "INFO" "Creating \"nginx\" deployment" +ssh $NODE01 "kubectl apply -f https://k8s.io/examples/application/deployment.yaml" >> $logfile +sleep 5 + +logr "DEFAULT" "Creating \"nginx\" service" +ssh $NODE01 "kubectl expose deployment nginx-deployment --port=80 --type=LoadBalancer --name=nginx" >> $logfile + +logr "INFO" "Sleeping for 20 seconds to give the controller time to \"reconcile\"" +sleep 20 + +logr "INFO" "Retrieving logs from kube-vip.io cloud provider" +ssh $NODE01 "kubectl logs -n kube-system kube-vip-cloud-provider-0" >> $logfile +logr "INFO" "Retrieving service configuration" +ssh $NODE01 "kubectl describe svc nginx" | tee >> $logfile diff --git a/testing/kubeadm/teardown.sh b/testing/kubeadm/teardown.sh new file mode 100755 index 00000000..4532b5df --- /dev/null +++ b/testing/kubeadm/teardown.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +source ./testing/nodes + +echo "Wiping Nodes in reverse order, and rebooting" +ssh $NODE05 "sudo kubeadm reset -f; sudo rm -rf /etc/cni/net.d; sudo reboot" +ssh $NODE04 "sudo kubeadm reset -f; sudo rm -rf /etc/cni/net.d; sudo reboot" +ssh $NODE03 "sudo kubeadm reset -f; sudo rm -rf /etc/cni/net.d; sudo reboot" +ssh $NODE02 "sudo kubeadm reset -f; sudo rm -rf /etc/cni/net.d; sudo reboot" +ssh $NODE01 "sudo kubeadm reset -f --skip-phases preflight update-cluster-status remove-etcd-member; sudo rm -rf /etc/cni/net.d; sudo reboot" +echo +echo "All Control Plane Nodes have been reset" +echo "Consider removing kube-vip images if changing version" \ No newline at end of file diff --git a/testing/lb_clean.sh b/testing/lb_clean.sh new file mode 100644 index 00000000..43bbcfa6 --- /dev/null +++ b/testing/lb_clean.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +if [[ -z $1 ]]; then + echo "Usage:" + echo " Param 1: kube-vip Version" + exit 1 +fi + +echo "Removing docker images from workers" +sleep 2 +ssh k8s05 "sudo docker rmi plndr/kube-vip:$1" +ssh k8s04 "sudo docker rmi plndr/kube-vip:$1" +ssh k8s03 "sudo docker rmi plndr/kube-vip:$1" +ssh k8s02 "sudo docker rmi plndr/kube-vip:$1" +ssh k8s01 "sudo docker rmi plndr/kube-vip:$1" diff --git a/testing/logging.bash b/testing/logging.bash new file mode 100644 index 00000000..fb81b6a4 --- /dev/null +++ b/testing/logging.bash @@ -0,0 +1,46 @@ +LOG_ON_FILE=true + +logfile="/tmp/kube-vip-testing.$(date +'%Y-%m-%d').log" + + +echo_timing() { + #-------------------------------------------------------- + # Out: [19/01/2020 18h19:56] Hello + #-------------------------------------------------------- + echo [`date +%d"/"%m"/"%Y" "%H"h"%M":"%S`] $@ +} + +echo_color(){ + COLOR=$1; MSG=$2; + + if [[ ${COLOR} == *"WHITE"* ]]; then echo -e "\\e[39m"${MSG}"\\e[0m"; + elif [[ ${COLOR} == *"RED"* ]]; then echo -e "\\e[31m"${MSG}"\\e[0m"; + elif [[ ${COLOR} == *"GREEN"* ]]; then echo -e "\\e[32m"${MSG}"\\e[0m"; + elif [[ ${COLOR} == *"YELLOW"* ]]; then echo -e "\\e[33m"${MSG}"\\e[0m"; + elif [[ ${COLOR} == *"BLUE"* ]]; then echo -e "\\e[34m"${MSG}"\\e[0m"; + fi; +} + +echo_console(){ + TYPE_OF_MSG=$1; MSG=$2; + + if [[ ${TYPE_OF_MSG} == *"1"* ]] || [[ ${TYPE_OF_MSG} == *"SUCCESS"* ]]; then echo_timing "$(echo_color "GREEN" "[+]: ${MSG}")"; + elif [[ ${TYPE_OF_MSG} == *"2"* ]] || [[ ${TYPE_OF_MSG} == *"FAIL"* ]]; then echo_timing "$(echo_color "RED" "[-]: ${MSG}")"; + elif [[ ${TYPE_OF_MSG} == *"3"* ]] || [[ ${TYPE_OF_MSG} == *"WARNING"* ]]; then echo_timing "$(echo_color "YELLOW" "[!]: ${MSG}")"; + elif [[ ${TYPE_OF_MSG} == *"4"* ]] || [[ ${TYPE_OF_MSG} == *"INFO"* ]]; then echo_timing "$(echo_color "BLUE" "[i]: ${MSG}")"; + elif [[ ${TYPE_OF_MSG} == *"0"* ]] || [[ ${TYPE_OF_MSG} == *"DEFAULT"* ]]; then echo_timing "$(echo_color "WHITE" "[:]: ${MSG}")"; + else MSG=${TYPE_OF_MSG}; echo_timing "$(echo_color "WHITE" "[:]: ${MSG}")"; + fi; +} + +logr(){ + TYPE_OF_MSG=$1; MSG=$2; + + if [[ ${LOG_ON_FILE} ]]; then echo_console "${TYPE_OF_MSG}" "${MSG}" | tee -a "${logfile}"; + else echo_console "${TYPE_OF_MSG}" "${MSG}"; fi; +} + +reset_logfile() { + touch $logfile + cat /dev/null > $logfile +} \ No newline at end of file diff --git a/testing/nodes b/testing/nodes new file mode 100644 index 00000000..3ecc09ec --- /dev/null +++ b/testing/nodes @@ -0,0 +1,11 @@ +# Home lab (for testing) +#NODE01=k8s01.fnnrn.me +#NODE02=k8s02.fnnrn.me +#NODE03=k8s03.fnnrn.me +#NODE04=k8s04.fnnrn.me +#NODE05=k8s05.fnnrn.me +NODE01=192.168.0.191 +NODE02=192.168.0.192 +NODE03=192.168.0.193 +NODE04=192.168.0.194 +NODE05=192.168.0.195 diff --git a/testing/testing.sh b/testing/testing.sh new file mode 100755 index 00000000..95885670 --- /dev/null +++ b/testing/testing.sh @@ -0,0 +1,46 @@ +#!/bin/bash + +abort() +{ + echo >&2 ' +*************** +*** ABORTED *** +*************** +' + echo "An error occurred. Exiting..." >&2 + exit 1 +} + +trap 'abort' 0 + +set -e +set -o pipefail + +echo "==> ARP w/services & controlplane" +docker run --network host --rm plndr/kube-vip:action manifest pod --interface eth0 --vip 192.168.0.1 --controlplane --arp --services --leaderElection + +echo "==> BGP w/controlplane" +docker run --network host --rm plndr/kube-vip:action manifest pod --interface eth0 --vip 192.168.0.1 --controlplane --bgp + +echo "==> BGP w/controlplane and specified peers" +docker run --network host --rm plndr/kube-vip:action manifest pod --interface eth0 --vip 192.168.0.1 --controlplane --bgp --bgppeers 192.168.0.2:12345::true,192.168.0.3:12345::true + +echo "==> BGP w/services & controlplane" +docker run --network host --rm plndr/kube-vip:action manifest pod --interface eth0 --vip 192.168.0.1 --controlplane --arp --leaderElection + +echo "==> ARP w/controlplane (using --address)" +docker run --network host --rm plndr/kube-vip:action manifest pod --interface enx001e063262b1 --address k8s-api-vip.lan --arp --leaderElection --controlplane + +echo "==> ARP w/controlplane (using --address)" +docker run --network host --rm plndr/kube-vip:action manifest daemonset --interface eth0 --vip 192.168.0.1 --controlplane \ + --services \ + --inCluster \ + --taint + +trap : 0 + +echo >&2 ' +************ +*** DONE *** +************ +'