diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 290deaeed628..330b6c7720b5 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -19,18 +19,18 @@ updates: - package-ecosystem: docker directory: /cluster/images/ - target-branch: "release-1.10" + target-branch: "release-1.11" schedule: interval: weekly - package-ecosystem: docker directory: /cluster/images/ - target-branch: "release-1.9" + target-branch: "release-1.10" schedule: interval: weekly - package-ecosystem: docker directory: /cluster/images/ - target-branch: "release-1.8" + target-branch: "release-1.9" schedule: interval: weekly diff --git a/.github/workflows/ci-image-scanning-on-schedule.yml b/.github/workflows/ci-image-scanning-on-schedule.yml new file mode 100644 index 000000000000..948e8389bf98 --- /dev/null +++ b/.github/workflows/ci-image-scanning-on-schedule.yml @@ -0,0 +1,69 @@ +name: image-scanning-on-schedule +on: + schedule: + # Run this workflow "At 00:00 UTC on Sunday" + - cron: '0 0 * * 0' +permissions: + contents: read +jobs: + use-trivy-to-scan-image: + permissions: + security-events: write # for github/codeql-action/upload-sarif to upload SARIF results + name: image-scanning + if: ${{ github.repository == 'karmada-io/karmada' }} + runs-on: ubuntu-22.04 + strategy: + fail-fast: false + matrix: + target: + - karmada-controller-manager + - karmada-scheduler + - karmada-descheduler + - karmada-webhook + - karmada-agent + - karmada-scheduler-estimator + - karmada-interpreter-webhook-example + - karmada-aggregated-apiserver + - karmada-search + - karmada-operator + - karmada-metrics-adapter + karmada-version: [ release-1.11, release-1.10, release-1.9 ] + steps: + - name: checkout code + uses: actions/checkout@v4 + with: + ref: ${{ matrix.karmada-version }} + - name: install Go + uses: actions/setup-go@v5 + with: + go-version-file: go.mod + - id: gen_git_info + run: | + echo "ref=$(git rev-parse --symbolic-full-name HEAD)" >> "$GITHUB_OUTPUT" + echo "sha=$(git rev-parse HEAD)" >> "$GITHUB_OUTPUT" + - name: Build images from Dockerfile + run: | + export VERSION=${{ matrix.karmada-version }} + export REGISTRY="docker.io/karmada" + make image-${{ matrix.target }} + - name: Run Trivy vulnerability scanner + uses: aquasecurity/trivy-action@0.28.0 + with: + image-ref: 'docker.io/karmada/${{ matrix.target }}:${{ matrix.karmada-version }}' + format: 'sarif' + ignore-unfixed: true + vuln-type: 'os,library' + output: '${{ matrix.target }}:${{ matrix.karmada-version }}.trivy-results.sarif' + - name: display scan results + uses: aquasecurity/trivy-action@0.28.0 + with: + image-ref: 'docker.io/karmada/${{ matrix.target }}:${{ matrix.karmada-version }}' + format: 'table' + ignore-unfixed: true + vuln-type: 'os,library' + - name: Upload Trivy scan results to GitHub Security tab + uses: github/codeql-action/upload-sarif@v3 + with: + sarif_file: '${{ matrix.target }}:${{ matrix.karmada-version }}.trivy-results.sarif' + ref: ${{steps.gen_git_info.outputs.ref}} + sha: ${{steps.gen_git_info.outputs.sha}} diff --git a/.github/workflows/ci-image-scanning.yaml b/.github/workflows/ci-image-scanning.yaml index 24e51df3214b..72c898e50c33 100644 --- a/.github/workflows/ci-image-scanning.yaml +++ b/.github/workflows/ci-image-scanning.yaml @@ -1,62 +1,62 @@ -name: image-scanning -on: - push: - # Exclude branches created by Dependabot to avoid triggering current workflow - # for PRs initiated by Dependabot. - branches-ignore: - - 'dependabot/**' -permissions: - contents: read -jobs: - use-trivy-to-scan-image: - permissions: - security-events: write # for github/codeql-action/upload-sarif to upload SARIF results - name: image-scanning - if: ${{ github.repository == 'karmada-io/karmada' }} - runs-on: ubuntu-22.04 - strategy: - fail-fast: false - matrix: - target: - - karmada-controller-manager - - karmada-scheduler - - karmada-descheduler - - karmada-webhook - - karmada-agent - - karmada-scheduler-estimator - - karmada-interpreter-webhook-example - - karmada-aggregated-apiserver - - karmada-search - - karmada-operator - - karmada-metrics-adapter - steps: - - name: checkout code - uses: actions/checkout@v4 - - name: install Go - uses: actions/setup-go@v5 - with: - go-version-file: go.mod - - name: Build an image from Dockerfile - run: | - export VERSION="latest" - export REGISTRY="docker.io/karmada" - make image-${{ matrix.target }} - - name: Run Trivy vulnerability scanner - uses: aquasecurity/trivy-action@0.24.0 - with: - image-ref: 'docker.io/karmada/${{ matrix.target }}:latest' - format: 'sarif' - ignore-unfixed: true - vuln-type: 'os,library' - output: 'trivy-results.sarif' - - name: display scan results - uses: aquasecurity/trivy-action@0.24.0 - with: - image-ref: 'docker.io/karmada/${{ matrix.target }}:latest' - format: 'table' - ignore-unfixed: true - vuln-type: 'os,library' - - name: Upload Trivy scan results to GitHub Security tab - uses: github/codeql-action/upload-sarif@v3 - with: - sarif_file: 'trivy-results.sarif' +name: image-scanning +on: + push: + # Exclude branches created by Dependabot to avoid triggering current workflow + # for PRs initiated by Dependabot. + branches-ignore: + - 'dependabot/**' +permissions: + contents: read +jobs: + use-trivy-to-scan-image: + permissions: + security-events: write # for github/codeql-action/upload-sarif to upload SARIF results + name: image-scanning + if: ${{ github.repository == 'karmada-io/karmada' }} + runs-on: ubuntu-22.04 + strategy: + fail-fast: false + matrix: + target: + - karmada-controller-manager + - karmada-scheduler + - karmada-descheduler + - karmada-webhook + - karmada-agent + - karmada-scheduler-estimator + - karmada-interpreter-webhook-example + - karmada-aggregated-apiserver + - karmada-search + - karmada-operator + - karmada-metrics-adapter + steps: + - name: checkout code + uses: actions/checkout@v4 + - name: install Go + uses: actions/setup-go@v5 + with: + go-version-file: go.mod + - name: Build an image from Dockerfile + run: | + export VERSION="latest" + export REGISTRY="docker.io/karmada" + make image-${{ matrix.target }} + - name: Run Trivy vulnerability scanner + uses: aquasecurity/trivy-action@0.28.0 + with: + image-ref: 'docker.io/karmada/${{ matrix.target }}:latest' + format: 'sarif' + ignore-unfixed: true + vuln-type: 'os,library' + output: 'trivy-results.sarif' + - name: display scan results + uses: aquasecurity/trivy-action@0.28.0 + with: + image-ref: 'docker.io/karmada/${{ matrix.target }}:latest' + format: 'table' + ignore-unfixed: true + vuln-type: 'os,library' + - name: Upload Trivy scan results to GitHub Security tab + uses: github/codeql-action/upload-sarif@v3 + with: + sarif_file: 'trivy-results.sarif' diff --git a/.github/workflows/ci-schedule-compatibility.yaml b/.github/workflows/ci-schedule-compatibility.yaml index 33e54a18ee2c..1e318c77875f 100644 --- a/.github/workflows/ci-schedule-compatibility.yaml +++ b/.github/workflows/ci-schedule-compatibility.yaml @@ -19,8 +19,8 @@ jobs: max-parallel: 5 fail-fast: false matrix: - kubeapiserver-version: [ v1.23.4, v1.24.2, v1.25.0, v1.26.0, v1.27.3, v1.28.0, v1.29.0, v1.30.0 ] - karmada-version: [ master, release-1.10, release-1.9, release-1.8 ] + kubeapiserver-version: [ v1.23.4, v1.24.2, v1.25.0, v1.26.0, v1.27.3, v1.28.0, v1.29.0, v1.30.0, v1.31.0 ] + karmada-version: [ master, release-1.11, release-1.10, release-1.9 ] env: KARMADA_APISERVER_VERSION: ${{ matrix.kubeapiserver-version }} steps: diff --git a/.github/workflows/ci-schedule.yml b/.github/workflows/ci-schedule.yml index cea428c248b7..91d94bfae07d 100644 --- a/.github/workflows/ci-schedule.yml +++ b/.github/workflows/ci-schedule.yml @@ -19,7 +19,7 @@ jobs: max-parallel: 5 fail-fast: false matrix: - k8s: [ v1.23.4, v1.24.2, v1.25.0, v1.26.0, v1.27.3, v1.28.0, v1.29.0, v1.30.0 ] + k8s: [ v1.23.4, v1.24.2, v1.25.0, v1.26.0, v1.27.3, v1.28.0, v1.29.0, v1.30.0, v1.31.0 ] steps: # Free up disk space on Ubuntu - name: Free Disk Space (Ubuntu) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 4f38aea07d48..969c44ec03cf 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -116,7 +116,7 @@ jobs: # Here support the latest three minor releases of Kubernetes, this can be considered to be roughly # the same as the End of Life of the Kubernetes release: https://kubernetes.io/releases/ # Please remember to update the CI Schedule Workflow when we add a new version. - k8s: [ v1.28.0, v1.29.0, v1.30.0 ] + k8s: [ v1.29.0, v1.30.0, v1.31.0 ] steps: # Free up disk space on Ubuntu - name: Free Disk Space (Ubuntu) diff --git a/.github/workflows/cli.yaml b/.github/workflows/cli.yaml index f1e5e6fabdd8..b9071897b9d4 100644 --- a/.github/workflows/cli.yaml +++ b/.github/workflows/cli.yaml @@ -24,7 +24,7 @@ jobs: # Here support the latest three minor releases of Kubernetes, this can be considered to be roughly # the same as the End of Life of the Kubernetes release: https://kubernetes.io/releases/ # Please remember to update the CI Schedule Workflow when we add a new version. - k8s: [ v1.28.0, v1.29.0, v1.30.0 ] + k8s: [ v1.29.0, v1.30.0, v1.31.0 ] steps: - name: checkout code uses: actions/checkout@v4 @@ -36,7 +36,6 @@ jobs: uses: actions/setup-go@v5 with: go-version-file: go.mod - - name: run karmadactl init test run: | export CLUSTER_VERSION=kindest/node:${{ matrix.k8s }} @@ -48,7 +47,7 @@ jobs: export KUBECONFIG=${HOME}/karmada/karmada-apiserver.config GO111MODULE=on go install github.com/onsi/ginkgo/v2/ginkgo ginkgo -v --race --trace -p --focus="[BasicPropagation] propagation testing deployment propagation testing" ./test/e2e/ - - name: export logs + - name: export logs if: always() run: | export ARTIFACTS_PATH=${{ github.workspace }}/karmadactl-test-logs/${{ matrix.k8s }}/ @@ -63,3 +62,45 @@ jobs: name: karmadactl_test_logs_${{ matrix.k8s }} path: ${{ github.workspace }}/karmadactl-test-logs/${{ matrix.k8s }}/ + init-config: + name: init with config file + runs-on: ubuntu-22.04 + strategy: + fail-fast: false + matrix: + # Latest three minor releases of Kubernetes + k8s: [ v1.29.0, v1.30.0, v1.31.0 ] + steps: + - name: checkout code + uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: install Go + uses: actions/setup-go@v5 + with: + go-version-file: go.mod + - name: run karmadactl init with config file test + run: | + export CLUSTER_VERSION=kindest/node:${{ matrix.k8s }} + + # Run custom test for workload configuration deployment + hack/cli-testing-init-with-config.sh + + # run a single e2e + export KUBECONFIG=${HOME}/karmada/karmada-apiserver.config + GO111MODULE=on go install github.com/onsi/ginkgo/v2/ginkgo + ginkgo -v --race --trace -p --focus="[BasicPropagation] propagation testing deployment propagation testing" ./test/e2e/ + - name: export logs for config test + if: always() + run: | + export ARTIFACTS_PATH=${{ github.workspace }}/karmadactl-test-logs/${{ matrix.k8s }}/config + mkdir -p $ARTIFACTS_PATH + + mkdir -p $ARTIFACTS_PATH/karmada-host + kind export logs --name=karmada-host $ARTIFACTS_PATH/karmada-host + - name: upload config test logs + if: always() + uses: actions/upload-artifact@v4 + with: + name: karmadactl_config_test_logs_${{ matrix.k8s }} + path: ${{ github.workspace }}/karmadactl-test-logs/${{ matrix.k8s }}/config/ diff --git a/.github/workflows/dockerhub-latest-image.yml b/.github/workflows/dockerhub-latest-image.yml index 1190b7276514..bdc1de493a87 100644 --- a/.github/workflows/dockerhub-latest-image.yml +++ b/.github/workflows/dockerhub-latest-image.yml @@ -42,7 +42,7 @@ jobs: with: go-version-file: go.mod - name: Install Cosign - uses: sigstore/cosign-installer@v3.6.0 + uses: sigstore/cosign-installer@v3.7.0 with: cosign-release: 'v2.2.3' - name: install QEMU diff --git a/.github/workflows/dockerhub-released-image.yml b/.github/workflows/dockerhub-released-image.yml index 2b9076b4f917..b1ff45d78dfc 100644 --- a/.github/workflows/dockerhub-released-image.yml +++ b/.github/workflows/dockerhub-released-image.yml @@ -38,7 +38,7 @@ jobs: with: go-version-file: go.mod - name: Install Cosign - uses: sigstore/cosign-installer@v3.6.0 + uses: sigstore/cosign-installer@v3.7.0 with: cosign-release: 'v2.2.3' - name: install QEMU diff --git a/.github/workflows/operator.yaml b/.github/workflows/operator.yaml new file mode 100644 index 000000000000..8b1127f8235b --- /dev/null +++ b/.github/workflows/operator.yaml @@ -0,0 +1,82 @@ +name: Operator +on: + # Run this workflow every time a new commit pushed to upstream/fork repository. + # Run workflow on fork repository will help contributors find and resolve issues before sending a PR. + push: + # Exclude branches created by Dependabot to avoid triggering current workflow + # for PRs initiated by Dependabot. + branches-ignore: + - 'dependabot/**' + pull_request: +# https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#concurrency +concurrency: + group: ${{ github.workflow }}-${{ github.actor }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true +permissions: + contents: read # for actions/checkout to fetch code +jobs: + test-on-kubernetes-matrix: + name: Test on Kubernetes + runs-on: ubuntu-22.04 + strategy: + fail-fast: false + matrix: + # Here support the latest three minor releases of Kubernetes, this can be considered to be roughly + # the same as the End of Life of the Kubernetes release: https://kubernetes.io/releases/ + # Please remember to update the CI Schedule Workflow when we add a new version. + k8s: [ v1.29.0, v1.30.0, v1.31.0 ] + steps: + # Free up disk space on Ubuntu + - name: Free Disk Space (Ubuntu) + uses: jlumbroso/free-disk-space@main + with: + # this might remove tools that are actually needed, if set to "true" but frees about 6 GB + tool-cache: false + # all of these default to true, but feel free to set to "false" if necessary for your workflow + android: true + dotnet: true + haskell: true + large-packages: false + docker-images: false + swap-storage: false + - name: checkout code + uses: actions/checkout@v4 + with: + # Number of commits to fetch. 0 indicates all history for all branches and tags. + # We need to guess version via git tags. + fetch-depth: 0 + - name: install Go + uses: actions/setup-go@v5 + with: + go-version-file: go.mod + - name: setup operator test environment + run: | + export CLUSTER_VERSION=kindest/node:${{ matrix.k8s }} + hack/local-up-karmada-by-operator.sh + - name: run operator test + run: | + # run a single e2e + export KUBECONFIG=${HOME}/.kube/karmada.config + kubectl config use-context karmada-apiserver + GO111MODULE=on go install github.com/onsi/ginkgo/v2/ginkgo + ginkgo -v --race --trace -p --focus="[BasicPropagation] propagation testing deployment propagation testing" ./test/e2e/ + - name: export logs + if: always() + run: | + export ARTIFACTS_PATH=${{ github.workspace }}/karmada-operator-test-logs/${{ matrix.k8s }}/ + mkdir -p $ARTIFACTS_PATH + + mkdir -p $ARTIFACTS_PATH/karmada-host + kind export logs --name=karmada-host $ARTIFACTS_PATH/karmada-host + - name: upload logs + if: always() + uses: actions/upload-artifact@v4 + with: + name: karmada_operator_test_logs_${{ matrix.k8s }} + path: ${{ github.workspace }}/karmada-operator-test-logs/${{ matrix.k8s }}/ + - name: upload kind logs + if: always() + uses: actions/upload-artifact@v4 + with: + name: karmada_kind_log_${{ matrix.k8s }} + path: /tmp/karmada/ diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 30506de306f7..1cf5b8a4d767 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -167,12 +167,12 @@ jobs: steps: - uses: actions/checkout@v4 - name: Generate sbom for karmada file system - uses: aquasecurity/trivy-action@0.24.0 + uses: aquasecurity/trivy-action@0.28.0 with: scan-type: 'fs' format: 'spdx' output: 'sbom-karmada.spdx' - scan-ref: "/github/workspace/" + scan-ref: "${{ github.workspace }}/" - name: Tar the sbom files run: | tar -zcf sbom.tar.gz *.spdx @@ -200,11 +200,24 @@ jobs: base64-subjects: "${{ needs.sbom-assests.outputs.hashes }}" provenance-name: "karmada-sbom.intoto.jsonl" upload-assets: true + update-krew-index: - needs: release-assests + env: + GH_TOKEN: ${{ github.token }} + needs: + - release-assests name: Update krew-index runs-on: ubuntu-22.04 steps: + - name: get latest tag + id: get-latest-tag + run: | + export LATEST_TAG=`gh api repos/karmada-io/karmada/releases/latest | jq -r '.tag_name'` + echo "Got the latest tag:$LATEST_TAG" + echo "event.tag:"${{ github.event.release.tag_name }} + echo "latestTag=$LATEST_TAG" >> "$GITHUB_OUTPUT" - uses: actions/checkout@v4 + if: steps.get-latest-tag.outputs.latestTag == github.event.release.tag_name - name: Update new version in krew-index + if: steps.get-latest-tag.outputs.latestTag == github.event.release.tag_name uses: rajatjindal/krew-release-bot@v0.0.46 diff --git a/.go-version b/.go-version index 013173af5e9b..87b26e8b1aa0 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.22.6 +1.22.7 diff --git a/CHANGELOG.md b/CHANGELOG.md index 97a5c655f39a..112d5d7d28c2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,3 @@ # CHANGELOGs -- [CHANGELOG-1.1.md](./docs/CHANGELOG/CHANGELOG-1.1.md) -- [CHANGELOG-1.0.md](./docs/CHANGELOG/CHANGELOG-1.0.md) -- [CHANGELOG-0.10.md](./docs/CHANGELOG/CHANGELOG-0.10.md) -- [CHANGELOG-0.9.md](./docs/CHANGELOG/CHANGELOG-0.9.md) +[CHANGELOG](./docs/CHANGELOG/) diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index 39bdc00820c4..0534e67a4663 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -18691,6 +18691,36 @@ } } }, + "com.github.karmada-io.karmada.pkg.apis.policy.v1alpha1.FieldOverrider": { + "description": "FieldOverrider represents the rules dedicated to modifying a specific field in any Kubernetes resource. This allows changing a single field within the resource with multiple operations. It is designed to handle structured field values such as those found in ConfigMaps or Secrets. The current implementation supports JSON and YAML formats, but can easily be extended to support XML in the future. Note: In any given instance, FieldOverrider processes either JSON or YAML fields, but not both simultaneously.", + "type": "object", + "required": [ + "fieldPath" + ], + "properties": { + "fieldPath": { + "description": "FieldPath specifies the initial location in the instance document where the operation should take place. The path uses RFC 6901 for navigating into nested structures. For example, the path \"/data/db-config.yaml\" specifies the configuration data key named \"db-config.yaml\" in a ConfigMap: \"/data/db-config.yaml\".", + "type": "string", + "default": "" + }, + "json": { + "description": "JSON represents the operations performed on the JSON document specified by the FieldPath.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/com.github.karmada-io.karmada.pkg.apis.policy.v1alpha1.JSONPatchOperation" + } + }, + "yaml": { + "description": "YAML represents the operations performed on the YAML document specified by the FieldPath.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/com.github.karmada-io.karmada.pkg.apis.policy.v1alpha1.YAMLPatchOperation" + } + } + } + }, "com.github.karmada-io.karmada.pkg.apis.policy.v1alpha1.FieldSelector": { "description": "FieldSelector is a field filter.", "type": "object", @@ -18747,6 +18777,30 @@ } } }, + "com.github.karmada-io.karmada.pkg.apis.policy.v1alpha1.JSONPatchOperation": { + "description": "JSONPatchOperation represents a single field modification operation for JSON format.", + "type": "object", + "required": [ + "subPath", + "operator" + ], + "properties": { + "operator": { + "description": "Operator indicates the operation on target field. Available operators are: \"add\", \"remove\", and \"replace\".", + "type": "string", + "default": "" + }, + "subPath": { + "description": "SubPath specifies the relative location within the initial FieldPath where the operation should take place. The path uses RFC 6901 for navigating into nested structures.", + "type": "string", + "default": "" + }, + "value": { + "description": "Value is the new value to set for the specified field if the operation is \"add\" or \"replace\". For \"remove\" operation, this field is ignored.", + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSON" + } + } + }, "com.github.karmada-io.karmada.pkg.apis.policy.v1alpha1.LabelAnnotationOverrider": { "description": "LabelAnnotationOverrider represents the rules dedicated to handling workload labels/annotations", "type": "object", @@ -18871,7 +18925,7 @@ } }, "com.github.karmada-io.karmada.pkg.apis.policy.v1alpha1.Overriders": { - "description": "Overriders offers various alternatives to represent the override rules.\n\nIf more than one alternative exists, they will be applied with following order: - ImageOverrider - CommandOverrider - ArgsOverrider - LabelsOverrider - AnnotationsOverrider - Plaintext", + "description": "Overriders offers various alternatives to represent the override rules.\n\nIf more than one alternative exists, they will be applied with following order: - ImageOverrider - CommandOverrider - ArgsOverrider - LabelsOverrider - AnnotationsOverrider - FieldOverrider - Plaintext", "type": "object", "properties": { "annotationsOverrider": { @@ -18898,6 +18952,14 @@ "$ref": "#/definitions/com.github.karmada-io.karmada.pkg.apis.policy.v1alpha1.CommandArgsOverrider" } }, + "fieldOverrider": { + "description": "FieldOverrider represents the rules dedicated to modifying a specific field in any Kubernetes resource. This allows changing a single field within the resource with multiple operations. It is designed to handle structured field values such as those found in ConfigMaps or Secrets. The current implementation supports JSON and YAML formats, but can easily be extended to support XML in the future.", + "type": "array", + "items": { + "default": {}, + "$ref": "#/definitions/com.github.karmada-io.karmada.pkg.apis.policy.v1alpha1.FieldOverrider" + } + }, "imageOverrider": { "description": "ImageOverrider represents the rules dedicated to handling image overrides.", "type": "array", @@ -19098,6 +19160,10 @@ "Never" ] }, + "preserveResourcesOnDeletion": { + "description": "PreserveResourcesOnDeletion controls whether resources should be preserved on the member clusters when the resource template is deleted. If set to true, resources will be preserved on the member clusters. Default is false, which means resources will be deleted along with the resource template.\n\nThis setting is particularly useful during workload migration scenarios to ensure that rollback can occur quickly without affecting the workloads running on the member clusters.\n\nAdditionally, this setting applies uniformly across all member clusters and will not selectively control preservation on only some clusters.\n\nNote: This setting does not apply to the deletion of the policy itself. When the policy is deleted, the resource templates and their corresponding propagated resources in member clusters will remain unchanged unless explicitly deleted.", + "type": "boolean" + }, "priority": { "description": "Priority indicates the importance of a policy(PropagationPolicy or ClusterPropagationPolicy). A policy will be applied for the matched resource templates if there is no other policies with higher priority at the point of the resource template be processed. Once a resource template has been claimed by a policy, by default it will not be preempted by following policies even with a higher priority. See Preemption for more details.\n\nIn case of two policies have the same priority, the one with a more precise matching rules in ResourceSelectors wins: - matching by name(resourceSelector.name) has higher priority than\n by selector(resourceSelector.labelSelector)\n- matching by selector(resourceSelector.labelSelector) has higher priority\n than by APIVersion(resourceSelector.apiVersion) and Kind(resourceSelector.kind).\nIf there is still no winner at this point, the one with the lower alphabetic order wins, e.g. policy 'bar' has higher priority than 'foo'.\n\nThe higher the value, the higher the priority. Defaults to zero.", "type": "integer", @@ -19288,6 +19354,30 @@ } } }, + "com.github.karmada-io.karmada.pkg.apis.policy.v1alpha1.YAMLPatchOperation": { + "description": "YAMLPatchOperation represents a single field modification operation for YAML format.", + "type": "object", + "required": [ + "subPath", + "operator" + ], + "properties": { + "operator": { + "description": "Operator indicates the operation on target field. Available operators are: \"add\", \"remove\", and \"replace\".", + "type": "string", + "default": "" + }, + "subPath": { + "description": "SubPath specifies the relative location within the initial FieldPath where the operation should take place. The path uses RFC 6901 for navigating into nested structures.", + "type": "string", + "default": "" + }, + "value": { + "description": "Value is the new value to set for the specified field if the operation is \"add\" or \"replace\". For \"remove\" operation, this field is ignored.", + "$ref": "#/definitions/io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1.JSON" + } + } + }, "com.github.karmada-io.karmada.pkg.apis.remedy.v1alpha1.ClusterAffinity": { "description": "ClusterAffinity represents the filter to select clusters.", "type": "object", @@ -19752,8 +19842,12 @@ "description": "WorkSpec defines the desired state of Work.", "type": "object", "properties": { + "preserveResourcesOnDeletion": { + "description": "PreserveResourcesOnDeletion controls whether resources should be preserved on the member cluster when the Work object is deleted. If set to true, resources will be preserved on the member cluster. Default is false, which means resources will be deleted along with the Work object.", + "type": "boolean" + }, "suspendDispatching": { - "description": "SuspendDispatching controls whether dispatching should be suspended, nil means not suspend. Note: true means stop propagating to all clusters.", + "description": "SuspendDispatching controls whether dispatching should be suspended, nil means not suspend. Note: true means stop propagating to the corresponding member cluster, and does not prevent status collection.", "type": "boolean" }, "workload": { @@ -20174,6 +20268,10 @@ "description": "Placement represents the rule for select clusters to propagate resources.", "$ref": "#/definitions/com.github.karmada-io.karmada.pkg.apis.policy.v1alpha1.Placement" }, + "preserveResourcesOnDeletion": { + "description": "PreserveResourcesOnDeletion controls whether resources should be preserved on the member clusters when the binding object is deleted. If set to true, resources will be preserved on the member clusters. Default is false, which means resources will be deleted along with the binding object. This setting applies to all Work objects created under this binding object.", + "type": "boolean" + }, "propagateDeps": { "description": "PropagateDeps tells if relevant resources should be propagated automatically. It is inherited from PropagationPolicy or ClusterPropagationPolicy. default false.", "type": "boolean" diff --git a/artifacts/agent/karmada-agent.yaml b/artifacts/agent/karmada-agent.yaml index aad7346a1535..5bcc695759e8 100644 --- a/artifacts/agent/karmada-agent.yaml +++ b/artifacts/agent/karmada-agent.yaml @@ -25,12 +25,13 @@ spec: imagePullPolicy: {{image_pull_policy}} command: - /bin/karmada-agent - - --karmada-kubeconfig=/etc/kubeconfig/karmada-kubeconfig + - --karmada-kubeconfig=/etc/karmada/config/karmada.config - --karmada-context={{karmada_context}} - --cluster-name={{member_cluster_name}} - --cluster-api-endpoint={{member_cluster_api_endpoint}} - --cluster-status-update-frequency=10s - --health-probe-bind-address=0.0.0.0:10357 + - --metrics-bind-address=:8080 - --feature-gates=CustomizedClusterResourceModeling=true,MultiClusterService=true - --v=4 livenessProbe: @@ -42,10 +43,14 @@ spec: initialDelaySeconds: 15 periodSeconds: 15 timeoutSeconds: 5 + ports: + - containerPort: 8080 + name: metrics + protocol: TCP volumeMounts: - - name: kubeconfig - mountPath: /etc/kubeconfig + - name: karmada-config + mountPath: /etc/karmada/config volumes: - - name: kubeconfig + - name: karmada-config secret: - secretName: karmada-kubeconfig + secretName: karmada-agent-config diff --git a/artifacts/deploy/bootstrap-token-configuration.yaml b/artifacts/deploy/bootstrap-token-configuration.yaml index 11b496f26980..097718bf6338 100644 --- a/artifacts/deploy/bootstrap-token-configuration.yaml +++ b/artifacts/deploy/bootstrap-token-configuration.yaml @@ -91,12 +91,6 @@ kind: ClusterRole metadata: name: system:karmada:agent rules: -- apiGroups: - - authentication.k8s.io - resources: - - tokenreviews - verbs: - - create - apiGroups: - cluster.karmada.io resources: @@ -106,14 +100,12 @@ rules: - get - list - watch - - patch - - update + - delete - apiGroups: - cluster.karmada.io resources: - clusters/status verbs: - - patch - update - apiGroups: - work.karmada.io @@ -148,17 +140,12 @@ rules: - namespaces verbs: - get - - list - - watch - - create - apiGroups: - "" resources: - secrets verbs: - get - - list - - watch - create - patch - apiGroups: @@ -167,9 +154,7 @@ rules: - leases verbs: - create - - delete - get - - patch - update - apiGroups: - certificates.k8s.io @@ -178,8 +163,6 @@ rules: verbs: - create - get - - list - - watch - apiGroups: - "" resources: @@ -202,3 +185,170 @@ subjects: - apiGroup: rbac.authorization.k8s.io kind: Group name: system:nodes + +# To ensure the agent has the minimal RBAC permissions, the ideal approach is to +# use different RBAC configurations for different agents of member clusters with pull mode. +# Below is the minimal set of RBAC permissions required for a single pull mode member cluster. +# Here are the definitions of the variables used: +# +# - clustername: the name of the member cluster. +# - cluster_namespace: the namespace where the member cluster secrets are stored, default to karmada-cluster. +# +# --- +# apiVersion: rbac.authorization.k8s.io/v1 +# kind: ClusterRole +# metadata: +# name: system:karmada:agent +# rules: +# - apiGroups: +# - cluster.karmada.io +# resources: +# - clusters +# resourceNames: +# - {{clustername}} +# verbs: +# - create +# - get +# - delete +# - apiGroups: +# - cluster.karmada.io +# resources: +# - clusters +# verbs: +# - list +# - watch +# - apiGroups: +# - cluster.karmada.io +# resources: +# - clusters/status +# resourceNames: +# - {{clustername}} +# verbs: +# - update +# - apiGroups: +# - config.karmada.io +# resources: +# - resourceinterpreterwebhookconfigurations +# - resourceinterpretercustomizations +# verbs: +# - get +# - list +# - watch +# - apiGroups: +# - "" +# resources: +# - namespaces +# verbs: +# - get +# - apiGroups: +# - coordination.k8s.io +# resources: +# - leases +# verbs: +# - create +# - get +# - update +# - apiGroups: +# - certificates.k8s.io +# resources: +# - certificatesigningrequests +# verbs: +# - create +# - get +# - apiGroups: +# - "" +# resources: +# - events +# verbs: +# - create +# - patch +# - update +# +# --- +# apiVersion: rbac.authorization.k8s.io/v1 +# kind: ClusterRoleBinding +# metadata: +# name: system:karmada:agent +# roleRef: +# apiGroup: rbac.authorization.k8s.io +# kind: ClusterRole +# name: system:karmada:agent +# subjects: +# - apiGroup: rbac.authorization.k8s.io +# kind: Group +# name: system:nodes +# +# --- +# apiVersion: rbac.authorization.k8s.io/v1 +# kind: Role +# metadata: +# name: system:karmada:agent-secret +# namespace: "{{cluster_namespace}}" +# rules: +# - apiGroups: +# - "" +# resources: +# - secrets +# resourceNames: +# - {{clustername}}-impersonator +# - {{clustername}} +# verbs: +# - get +# - create +# - patch +# +# --- +# apiVersion: rbac.authorization.k8s.io/v1 +# kind: RoleBinding +# metadata: +# name: system:karmada:agent-secret +# namespace: "{{cluster_namespace}}" +# roleRef: +# apiGroup: rbac.authorization.k8s.io +# kind: Role +# name: system:karmada:agent-secret +# subjects: +# - apiGroup: rbac.authorization.k8s.io +# kind: Group +# name: system:nodes +# +# --- +# apiVersion: rbac.authorization.k8s.io/v1 +# kind: Role +# metadata: +# name: system:karmada:agent-work +# namespace: "karmada-es-{{clustername}}" +# rules: +# - apiGroups: +# - work.karmada.io +# resources: +# - works +# verbs: +# - create +# - get +# - list +# - watch +# - update +# - delete +# - apiGroups: +# - work.karmada.io +# resources: +# - works/status +# verbs: +# - patch +# - update +# +# --- +# apiVersion: rbac.authorization.k8s.io/v1 +# kind: RoleBinding +# metadata: +# name: system:karmada:agent-work +# namespace: "karmada-es-{{clustername}}" +# roleRef: +# apiGroup: rbac.authorization.k8s.io +# kind: Role +# name: system:karmada:agent-work +# subjects: +# - apiGroup: rbac.authorization.k8s.io +# kind: Group +# name: system:nodes diff --git a/artifacts/deploy/karmada-aggregated-apiserver.yaml b/artifacts/deploy/karmada-aggregated-apiserver.yaml index 58493c5ceff0..00e651f723ce 100644 --- a/artifacts/deploy/karmada-aggregated-apiserver.yaml +++ b/artifacts/deploy/karmada-aggregated-apiserver.yaml @@ -24,24 +24,17 @@ spec: - name: karmada-aggregated-apiserver image: docker.io/karmada/karmada-aggregated-apiserver:latest imagePullPolicy: IfNotPresent - volumeMounts: - - name: karmada-certs - mountPath: /etc/karmada/pki - readOnly: true - - name: kubeconfig - subPath: kubeconfig - mountPath: /etc/kubeconfig command: - /bin/karmada-aggregated-apiserver - - --kubeconfig=/etc/kubeconfig - - --authentication-kubeconfig=/etc/kubeconfig - - --authorization-kubeconfig=/etc/kubeconfig + - --kubeconfig=/etc/karmada/config/karmada.config + - --authentication-kubeconfig=/etc/karmada/config/karmada.config + - --authorization-kubeconfig=/etc/karmada/config/karmada.config - --etcd-servers=https://etcd-client.karmada-system.svc.cluster.local:2379 - - --etcd-cafile=/etc/karmada/pki/etcd-ca.crt - - --etcd-certfile=/etc/karmada/pki/etcd-client.crt - - --etcd-keyfile=/etc/karmada/pki/etcd-client.key - - --tls-cert-file=/etc/karmada/pki/karmada.crt - - --tls-private-key-file=/etc/karmada/pki/karmada.key + - --etcd-cafile=/etc/karmada/pki/etcd-client/ca.crt + - --etcd-certfile=/etc/karmada/pki/etcd-client/tls.crt + - --etcd-keyfile=/etc/karmada/pki/etcd-client/tls.key + - --tls-cert-file=/etc/karmada/pki/server/tls.crt + - --tls-private-key-file=/etc/karmada/pki//server/tls.key - --audit-log-path=- - --audit-log-maxage=0 - --audit-log-maxbackup=0 @@ -65,13 +58,25 @@ spec: initialDelaySeconds: 10 periodSeconds: 10 timeoutSeconds: 15 + volumeMounts: + - name: karmada-config + mountPath: /etc/karmada/config + - name: server-cert + mountPath: /etc/karmada/pki/server + readOnly: true + - name: etcd-client-cert + mountPath: /etc/karmada/pki/etcd-client + readOnly: true volumes: - - name: karmada-certs + - name: karmada-config + secret: + secretName: karmada-aggregated-apiserver-config + - name: server-cert secret: - secretName: karmada-cert-secret - - name: kubeconfig + secretName: karmada-aggregated-apiserver-cert + - name: etcd-client-cert secret: - secretName: kubeconfig + secretName: karmada-aggregated-apiserver-etcd-client-cert --- apiVersion: v1 kind: Service diff --git a/artifacts/deploy/karmada-apiserver.yaml b/artifacts/deploy/karmada-apiserver.yaml index 296f7b8f0a9a..ef26ae3116b0 100644 --- a/artifacts/deploy/karmada-apiserver.yaml +++ b/artifacts/deploy/karmada-apiserver.yaml @@ -36,32 +36,29 @@ spec: - kube-apiserver - --allow-privileged=true - --authorization-mode=Node,RBAC - - --client-ca-file=/etc/karmada/pki/ca.crt - --enable-bootstrap-token-auth=true - - --etcd-cafile=/etc/karmada/pki/etcd-ca.crt - - --etcd-certfile=/etc/karmada/pki/etcd-client.crt - - --etcd-keyfile=/etc/karmada/pki/etcd-client.key + - --etcd-cafile=/etc/karmada/pki/etcd-client/ca.crt + - --etcd-certfile=/etc/karmada/pki/etcd-client/tls.crt + - --etcd-keyfile=/etc/karmada/pki/etcd-client/tls.key - --etcd-servers=https://etcd-client.karmada-system.svc.cluster.local:2379 - --bind-address=0.0.0.0 - - --kubelet-client-certificate=/etc/karmada/pki/karmada.crt - - --kubelet-client-key=/etc/karmada/pki/karmada.key - - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname - --disable-admission-plugins=StorageObjectInUseProtection,ServiceAccount - --runtime-config= - --secure-port=5443 - --service-account-issuer=https://kubernetes.default.svc.cluster.local - - --service-account-key-file=/etc/karmada/pki/karmada.key - - --service-account-signing-key-file=/etc/karmada/pki/karmada.key + - --service-account-key-file=/etc/karmada/pki/service-account-key-pair/sa.pub + - --service-account-signing-key-file=/etc/karmada/pki/service-account-key-pair/sa.key - --service-cluster-ip-range=10.96.0.0/12 - - --proxy-client-cert-file=/etc/karmada/pki/front-proxy-client.crt - - --proxy-client-key-file=/etc/karmada/pki/front-proxy-client.key + - --proxy-client-cert-file=/etc/karmada/pki/front-proxy-client/tls.crt + - --proxy-client-key-file=/etc/karmada/pki/front-proxy-client/tls.key + - --requestheader-client-ca-file=/etc/karmada/pki/front-proxy-client/ca.crt - --requestheader-allowed-names=front-proxy-client - - --requestheader-client-ca-file=/etc/karmada/pki/front-proxy-ca.crt - --requestheader-extra-headers-prefix=X-Remote-Extra- - --requestheader-group-headers=X-Remote-Group - --requestheader-username-headers=X-Remote-User - - --tls-cert-file=/etc/karmada/pki/apiserver.crt - - --tls-private-key-file=/etc/karmada/pki/apiserver.key + - --tls-cert-file=/etc/karmada/pki/server/tls.crt + - --tls-private-key-file=/etc/karmada/pki/server/tls.key + - --client-ca-file=/etc/karmada/pki/server/ca.crt - --tls-min-version=VersionTLS13 name: karmada-apiserver image: registry.k8s.io/kube-apiserver:{{karmada_apiserver_version}} @@ -91,9 +88,31 @@ spec: terminationMessagePath: /dev/termination-log terminationMessagePolicy: File volumeMounts: - - mountPath: /etc/karmada/pki - name: karmada-certs + - name: server-cert + mountPath: /etc/karmada/pki/server readOnly: true + - name: etcd-client-cert + mountPath: /etc/karmada/pki/etcd-client + readOnly: true + - name: front-proxy-client-cert + mountPath: /etc/karmada/pki/front-proxy-client + readOnly: true + - name: service-account-key-pair + mountPath: /etc/karmada/pki/service-account-key-pair + readOnly: true + volumes: + - name: server-cert + secret: + secretName: karmada-apiserver-cert + - name: etcd-client-cert + secret: + secretName: karmada-apiserver-etcd-client-cert + - name: front-proxy-client-cert + secret: + secretName: karmada-apiserver-front-proxy-client-cert + - name: service-account-key-pair + secret: + secretName: karmada-apiserver-service-account-key-pair dnsPolicy: ClusterFirstWithHostNet enableServiceLinks: true hostNetwork: true @@ -107,10 +126,6 @@ spec: tolerations: - effect: NoExecute operator: Exists - volumes: - - name: karmada-certs - secret: - secretName: karmada-cert-secret --- apiVersion: v1 kind: Service diff --git a/artifacts/deploy/karmada-webhook-cert-secret.yaml b/artifacts/deploy/karmada-ca-cert-secret.yaml similarity index 64% rename from artifacts/deploy/karmada-webhook-cert-secret.yaml rename to artifacts/deploy/karmada-ca-cert-secret.yaml index aabdeedc2ef2..afe566040e0e 100644 --- a/artifacts/deploy/karmada-webhook-cert-secret.yaml +++ b/artifacts/deploy/karmada-ca-cert-secret.yaml @@ -1,11 +1,11 @@ apiVersion: v1 kind: Secret metadata: - name: webhook-cert + name: ${component}-ca-cert namespace: karmada-system type: kubernetes.io/tls data: tls.crt: | - {{server_certificate}} + ${ca_crt} tls.key: | - {{server_key}} + ${ca_key} diff --git a/artifacts/deploy/karmada-cert-secret.yaml b/artifacts/deploy/karmada-cert-secret.yaml index 2a32ae4b2eb5..344a65fe14f8 100644 --- a/artifacts/deploy/karmada-cert-secret.yaml +++ b/artifacts/deploy/karmada-cert-secret.yaml @@ -1,35 +1,13 @@ apiVersion: v1 kind: Secret metadata: - name: karmada-cert-secret + name: ${name}-cert namespace: karmada-system -type: Opaque +type: kubernetes.io/tls data: ca.crt: | - {{ca_crt}} - ca.key: | - {{ca_key}} - karmada.crt: | - {{client_crt}} - karmada.key: | - {{client_key}} - apiserver.crt: | - {{apiserver_crt}} - apiserver.key: | - {{apiserver_key}} - front-proxy-ca.crt: | - {{front_proxy_ca_crt}} - front-proxy-client.crt: | - {{front_proxy_client_crt}} - front-proxy-client.key: | - {{front_proxy_client_key}} - etcd-ca.crt: | - {{etcd_ca_crt}} - etcd-server.crt: | - {{etcd_server_crt}} - etcd-server.key: | - {{etcd_server_key}} - etcd-client.crt: | - {{etcd_client_crt}} - etcd-client.key: | - {{etcd_client_key}} + ${ca_crt} + tls.crt: | + ${tls_crt} + tls.key: | + ${tls_key} diff --git a/artifacts/deploy/karmada-config-secret.yaml b/artifacts/deploy/karmada-config-secret.yaml new file mode 100644 index 000000000000..bb4faae32f86 --- /dev/null +++ b/artifacts/deploy/karmada-config-secret.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: Secret +metadata: + name: ${component}-config + namespace: karmada-system +stringData: + karmada.config: |- + apiVersion: v1 + kind: Config + preferences: {} + clusters: + - name: karmada-apiserver + cluster: + certificate-authority-data: ${ca_crt} + server: https://karmada-apiserver.karmada-system.svc.cluster.local:5443 + users: + - name: karmada-apiserver + user: + client-certificate-data: ${client_crt} + client-key-data: ${client_key} + contexts: + - name: karmada-apiserver + context: + cluster: karmada-apiserver + user: karmada-apiserver + current-context: karmada-apiserver diff --git a/artifacts/deploy/karmada-controller-manager.yaml b/artifacts/deploy/karmada-controller-manager.yaml index 5771b1d4ad3c..4f75df36533c 100644 --- a/artifacts/deploy/karmada-controller-manager.yaml +++ b/artifacts/deploy/karmada-controller-manager.yaml @@ -25,13 +25,13 @@ spec: imagePullPolicy: IfNotPresent command: - /bin/karmada-controller-manager - - --kubeconfig=/etc/kubeconfig - - --bind-address=0.0.0.0 + - --kubeconfig=/etc/karmada/config/karmada.config + - --metrics-bind-address=:8080 - --cluster-status-update-frequency=10s - - --secure-port=10357 - --failover-eviction-timeout=30s - --controllers=*,hpaScaleTargetMarker,deploymentReplicasSyncer - --feature-gates=PropagationPolicyPreemption=true,MultiClusterService=true + - --health-probe-bind-address=0.0.0.0:10357 - --v=4 livenessProbe: httpGet: @@ -42,11 +42,14 @@ spec: initialDelaySeconds: 15 periodSeconds: 15 timeoutSeconds: 5 + ports: + - containerPort: 8080 + name: metrics + protocol: TCP volumeMounts: - - name: kubeconfig - subPath: kubeconfig - mountPath: /etc/kubeconfig + - name: karmada-config + mountPath: /etc/karmada/config volumes: - - name: kubeconfig + - name: karmada-config secret: - secretName: kubeconfig + secretName: karmada-controller-manager-config diff --git a/artifacts/deploy/karmada-descheduler.yaml b/artifacts/deploy/karmada-descheduler.yaml index 53d0d189bd7c..46a0f48516be 100644 --- a/artifacts/deploy/karmada-descheduler.yaml +++ b/artifacts/deploy/karmada-descheduler.yaml @@ -25,11 +25,12 @@ spec: imagePullPolicy: IfNotPresent command: - /bin/karmada-descheduler - - --kubeconfig=/etc/kubeconfig - - --bind-address=0.0.0.0 - - --scheduler-estimator-ca-file=/etc/karmada/pki/ca.crt - - --scheduler-estimator-cert-file=/etc/karmada/pki/karmada.crt - - --scheduler-estimator-key-file=/etc/karmada/pki/karmada.key + - --kubeconfig=/etc/karmada/config/karmada.config + - --metrics-bind-address=0.0.0.0:8080 + - --health-probe-bind-address=0.0.0.0:10358 + - --scheduler-estimator-ca-file=/etc/karmada/pki/scheduler-estimator-client/ca.crt + - --scheduler-estimator-cert-file=/etc/karmada/pki/scheduler-estimator-client/tls.crt + - --scheduler-estimator-key-file=/etc/karmada/pki/scheduler-estimator-client/tls.key - --v=4 livenessProbe: httpGet: @@ -40,17 +41,20 @@ spec: initialDelaySeconds: 15 periodSeconds: 15 timeoutSeconds: 5 + ports: + - containerPort: 8080 + name: metrics + protocol: TCP volumeMounts: - - name: karmada-certs - mountPath: /etc/karmada/pki + - name: karmada-config + mountPath: /etc/karmada/config + - name: scheduler-estimator-client-cert + mountPath: /etc/karmada/pki/scheduler-estimator-client readOnly: true - - name: kubeconfig - subPath: kubeconfig - mountPath: /etc/kubeconfig volumes: - - name: karmada-certs + - name: karmada-config secret: - secretName: karmada-cert-secret - - name: kubeconfig + secretName: karmada-descheduler-config + - name: scheduler-estimator-client-cert secret: - secretName: kubeconfig + secretName: karmada-descheduler-scheduler-estimator-client-cert diff --git a/artifacts/deploy/karmada-etcd.yaml b/artifacts/deploy/karmada-etcd.yaml index d429700b0ebf..d2f135452837 100644 --- a/artifacts/deploy/karmada-etcd.yaml +++ b/artifacts/deploy/karmada-etcd.yaml @@ -40,7 +40,7 @@ spec: command: - /bin/sh - -ec - - 'etcdctl get /registry --prefix --keys-only --endpoints https://127.0.0.1:2379 --cacert /etc/karmada/pki/etcd-ca.crt --cert /etc/karmada/pki/etcd-server.crt --key /etc/karmada/pki/etcd-server.key' + - 'etcdctl get /registry --prefix --keys-only --endpoints https://127.0.0.1:2379 --cacert /etc/karmada/pki/etcd-client/ca.crt --cert /etc/karmada/pki/etcd-client/tls.crt --key /etc/karmada/pki/etcd-client/tls.key' failureThreshold: 3 initialDelaySeconds: 600 periodSeconds: 60 @@ -53,11 +53,6 @@ spec: - containerPort: 2380 name: server protocol: TCP - volumeMounts: - - mountPath: /var/lib/etcd - name: etcd-data - - mountPath: /etc/karmada/pki - name: etcd-certs resources: requests: cpu: 100m @@ -76,24 +71,34 @@ spec: - etcd0=http://etcd-0.etcd.karmada-system.svc.cluster.local:2380 - --initial-cluster-state - new - - --cert-file=/etc/karmada/pki/etcd-server.crt - --client-cert-auth=true - - --key-file=/etc/karmada/pki/etcd-server.key - - --trusted-ca-file=/etc/karmada/pki/etcd-ca.crt + - --cert-file=/etc/karmada/pki/server/tls.crt + - --key-file=/etc/karmada/pki/server/tls.key + - --trusted-ca-file=/etc/karmada/pki/server/ca.crt - --data-dir=/var/lib/etcd - --snapshot-count=10000 # Setting Golang's secure cipher suites as etcd's cipher suites. # They are obtained by the return value of the function CipherSuites() under the go/src/crypto/tls/cipher_suites.go package. # Consistent with the Preferred values of k8sā€™s default cipher suites. - --cipher-suites=TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_RSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_256_GCM_SHA384,TLS_AES_128_GCM_SHA256,TLS_AES_256_GCM_SHA384,TLS_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 + volumeMounts: + - name: etcd-data + mountPath: /var/lib/etcd + - name: server-cert + mountPath: /etc/karmada/pki/server + - name: etcd-client-cert + mountPath: /etc/karmada/pki/etcd-client volumes: - - hostPath: + - name: etcd-data + hostPath: path: /var/lib/karmada-etcd type: DirectoryOrCreate - name: etcd-data - - name: etcd-certs + - name: server-cert + secret: + secretName: etcd-cert + - name: etcd-client-cert secret: - secretName: karmada-cert-secret + secretName: etcd-etcd-client-cert --- apiVersion: v1 diff --git a/artifacts/deploy/karmada-key-pair-secret.yaml b/artifacts/deploy/karmada-key-pair-secret.yaml new file mode 100644 index 000000000000..58bb203f168f --- /dev/null +++ b/artifacts/deploy/karmada-key-pair-secret.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Secret +metadata: + name: ${component}-service-account-key-pair + namespace: karmada-system +type: Opaque +data: + sa.pub: | + ${sa_pub} + sa.key: | + ${sa_key} diff --git a/artifacts/deploy/karmada-metrics-adapter.yaml b/artifacts/deploy/karmada-metrics-adapter.yaml index 437b18c419b0..678edb13baf5 100644 --- a/artifacts/deploy/karmada-metrics-adapter.yaml +++ b/artifacts/deploy/karmada-metrics-adapter.yaml @@ -24,21 +24,14 @@ spec: - name: karmada-metrics-adapter image: docker.io/karmada/karmada-metrics-adapter:latest imagePullPolicy: IfNotPresent - volumeMounts: - - name: karmada-certs - mountPath: /etc/karmada/pki - readOnly: true - - name: kubeconfig - subPath: kubeconfig - mountPath: /etc/kubeconfig command: - /bin/karmada-metrics-adapter - - --kubeconfig=/etc/kubeconfig - - --authentication-kubeconfig=/etc/kubeconfig - - --authorization-kubeconfig=/etc/kubeconfig - - --client-ca-file=/etc/karmada/pki/ca.crt - - --tls-cert-file=/etc/karmada/pki/karmada.crt - - --tls-private-key-file=/etc/karmada/pki/karmada.key + - --kubeconfig=/etc/karmada/config/karmada.config + - --authentication-kubeconfig=/etc/karmada/config/karmada.config + - --authorization-kubeconfig=/etc/karmada/config/karmada.config + - --client-ca-file=/etc/karmada/pki/server/ca.crt + - --tls-cert-file=/etc/karmada/pki/server/tls.crt + - --tls-private-key-file=/etc/karmada/pki/server/tls.key - --audit-log-path=- - --audit-log-maxage=0 - --audit-log-maxbackup=0 @@ -64,13 +57,19 @@ spec: resources: requests: cpu: 100m + volumeMounts: + - name: karmada-config + mountPath: /etc/karmada/config + - name: server-cert + mountPath: /etc/karmada/pki/server + readOnly: true volumes: - - name: karmada-certs + - name: karmada-config secret: - secretName: karmada-cert-secret - - name: kubeconfig + secretName: karmada-metrics-adapter-config + - name: server-cert secret: - secretName: kubeconfig + secretName: karmada-metrics-adapter-cert --- apiVersion: v1 kind: Service diff --git a/artifacts/deploy/karmada-scheduler-estimator.yaml b/artifacts/deploy/karmada-scheduler-estimator.yaml index 85d6111f5aae..e44ef8c3a216 100644 --- a/artifacts/deploy/karmada-scheduler-estimator.yaml +++ b/artifacts/deploy/karmada-scheduler-estimator.yaml @@ -27,9 +27,11 @@ spec: - /bin/karmada-scheduler-estimator - --kubeconfig=/etc/{{member_cluster_name}}-kubeconfig - --cluster-name={{member_cluster_name}} - - --grpc-auth-cert-file=/etc/karmada/pki/karmada.crt - - --grpc-auth-key-file=/etc/karmada/pki/karmada.key - - --grpc-client-ca-file=/etc/karmada/pki/ca.crt + - --grpc-auth-cert-file=/etc/karmada/pki/server/tls.crt + - --grpc-auth-key-file=/etc/karmada/pki/server/tls.key + - --grpc-client-ca-file=/etc/karmada/pki/server/ca.crt + - --metrics-bind-address=0.0.0.0:8080 + - --health-probe-bind-address=0.0.0.0:10351 livenessProbe: httpGet: path: /healthz @@ -39,17 +41,21 @@ spec: initialDelaySeconds: 15 periodSeconds: 15 timeoutSeconds: 5 + ports: + - containerPort: 8080 + name: metrics + protocol: TCP volumeMounts: - - name: karmada-certs - mountPath: /etc/karmada/pki + - name: server-cert + mountPath: /etc/karmada/pki/server readOnly: true - name: member-kubeconfig subPath: {{member_cluster_name}}-kubeconfig mountPath: /etc/{{member_cluster_name}}-kubeconfig volumes: - - name: karmada-certs + - name: server-cert secret: - secretName: karmada-cert-secret + secretName: karmada-metrics-adapter-cert - name: member-kubeconfig secret: secretName: {{member_cluster_name}}-kubeconfig diff --git a/artifacts/deploy/karmada-scheduler.yaml b/artifacts/deploy/karmada-scheduler.yaml index f863fba55a3a..1604b9c9e1d3 100644 --- a/artifacts/deploy/karmada-scheduler.yaml +++ b/artifacts/deploy/karmada-scheduler.yaml @@ -32,27 +32,30 @@ spec: initialDelaySeconds: 15 periodSeconds: 15 timeoutSeconds: 5 + ports: + - containerPort: 8080 + name: metrics + protocol: TCP command: - /bin/karmada-scheduler - - --kubeconfig=/etc/kubeconfig - - --bind-address=0.0.0.0 - - --secure-port=10351 + - --kubeconfig=/etc/karmada/config/karmada.config + - --metrics-bind-address=0.0.0.0:8080 + - --health-probe-bind-address=0.0.0.0:10351 - --enable-scheduler-estimator=true - - --scheduler-estimator-ca-file=/etc/karmada/pki/ca.crt - - --scheduler-estimator-cert-file=/etc/karmada/pki/karmada.crt - - --scheduler-estimator-key-file=/etc/karmada/pki/karmada.key + - --scheduler-estimator-ca-file=/etc/karmada/pki/scheduler-estimator-client/ca.crt + - --scheduler-estimator-cert-file=/etc/karmada/pki/scheduler-estimator-client/tls.crt + - --scheduler-estimator-key-file=/etc/karmada/pki/scheduler-estimator-client/tls.key - --v=4 volumeMounts: - - name: karmada-certs - mountPath: /etc/karmada/pki + - name: karmada-config + mountPath: /etc/karmada/config + - name: scheduler-estimator-client-cert + mountPath: /etc/karmada/pki/scheduler-estimator-client readOnly: true - - name: kubeconfig - subPath: kubeconfig - mountPath: /etc/kubeconfig volumes: - - name: karmada-certs + - name: karmada-config secret: - secretName: karmada-cert-secret - - name: kubeconfig + secretName: karmada-scheduler-config + - name: scheduler-estimator-client-cert secret: - secretName: kubeconfig + secretName: karmada-scheduler-scheduler-estimator-client-cert diff --git a/artifacts/deploy/karmada-search.yaml b/artifacts/deploy/karmada-search.yaml index b972096f05dc..5c18e788bb0f 100644 --- a/artifacts/deploy/karmada-search.yaml +++ b/artifacts/deploy/karmada-search.yaml @@ -24,24 +24,17 @@ spec: - name: karmada-search image: docker.io/karmada/karmada-search:latest imagePullPolicy: IfNotPresent - volumeMounts: - - name: karmada-certs - mountPath: /etc/karmada/pki - readOnly: true - - name: kubeconfig - subPath: kubeconfig - mountPath: /etc/kubeconfig command: - /bin/karmada-search - - --kubeconfig=/etc/kubeconfig - - --authentication-kubeconfig=/etc/kubeconfig - - --authorization-kubeconfig=/etc/kubeconfig + - --kubeconfig=/etc/karmada/config/karmada.config + - --authentication-kubeconfig=/etc/karmada/config/karmada.config + - --authorization-kubeconfig=/etc/karmada/config/karmada.config - --etcd-servers=https://etcd-client.karmada-system.svc.cluster.local:2379 - - --etcd-cafile=/etc/karmada/pki/etcd-ca.crt - - --etcd-certfile=/etc/karmada/pki/etcd-client.crt - - --etcd-keyfile=/etc/karmada/pki/etcd-client.key - - --tls-cert-file=/etc/karmada/pki/karmada.crt - - --tls-private-key-file=/etc/karmada/pki/karmada.key + - --etcd-cafile=/etc/karmada/pki/etcd-client/ca.crt + - --etcd-certfile=/etc/karmada/pki/etcd-client/tls.crt + - --etcd-keyfile=/etc/karmada/pki/etcd-client/tls.key + - --tls-cert-file=/etc/karmada/pki/server/tls.crt + - --tls-private-key-file=/etc/karmada/pki/server/tls.key - --audit-log-path=- - --audit-log-maxage=0 - --audit-log-maxbackup=0 @@ -58,13 +51,25 @@ spec: resources: requests: cpu: 100m + volumeMounts: + - name: karmada-config + mountPath: /etc/karmada/config + - name: server-cert + mountPath: /etc/karmada/pki/server + readOnly: true + - name: etcd-client-cert + mountPath: /etc/karmada/pki/etcd-client + readOnly: true volumes: - - name: karmada-certs + - name: karmada-config + secret: + secretName: karmada-search-config + - name: server-cert secret: - secretName: karmada-cert-secret - - name: kubeconfig + secretName: karmada-search-cert + - name: etcd-client-cert secret: - secretName: kubeconfig + secretName: karmada-search-etcd-client-cert --- apiVersion: v1 kind: Service diff --git a/artifacts/deploy/karmada-webhook.yaml b/artifacts/deploy/karmada-webhook.yaml index 2201cbe20ad8..850bdc5b2142 100644 --- a/artifacts/deploy/karmada-webhook.yaml +++ b/artifacts/deploy/karmada-webhook.yaml @@ -25,34 +25,37 @@ spec: imagePullPolicy: IfNotPresent command: - /bin/karmada-webhook - - --kubeconfig=/etc/kubeconfig + - --kubeconfig=/etc/karmada/config/karmada.config - --bind-address=0.0.0.0 + - --metrics-bind-address=:8080 - --default-not-ready-toleration-seconds=30 - --default-unreachable-toleration-seconds=30 - --secure-port=8443 - - --cert-dir=/var/serving-cert + - --cert-dir=/etc/karmada/pki/server - --v=4 ports: - containerPort: 8443 - volumeMounts: - - name: kubeconfig - subPath: kubeconfig - mountPath: /etc/kubeconfig - - name: cert - mountPath: /var/serving-cert - readOnly: true + - containerPort: 8080 + name: metrics + protocol: TCP readinessProbe: httpGet: path: /readyz port: 8443 scheme: HTTPS + volumeMounts: + - name: karmada-config + mountPath: /etc/karmada/config + - name: server-cert + mountPath: /etc/karmada/pki/server + readOnly: true volumes: - - name: kubeconfig + - name: karmada-config secret: - secretName: kubeconfig - - name: cert + secretName: karmada-webhook-config + - name: server-cert secret: - secretName: webhook-cert + secretName: karmada-webhook-cert --- apiVersion: v1 kind: Service diff --git a/artifacts/deploy/kube-controller-manager.yaml b/artifacts/deploy/kube-controller-manager.yaml index 205759193f3c..1eb16c7bebd5 100644 --- a/artifacts/deploy/kube-controller-manager.yaml +++ b/artifacts/deploy/kube-controller-manager.yaml @@ -31,24 +31,28 @@ spec: values: - kube-controller-manager topologyKey: kubernetes.io/hostname + priorityClassName: system-node-critical containers: + # --client-ca-file verifies the cert of its client like kubelet and other controller + # --cluster-signing-key-file is used for signing certificates + # --root-ca-file is stored in service account type secret - command: - kube-controller-manager - --allocate-node-cidrs=true - - --authentication-kubeconfig=/etc/kubeconfig - - --authorization-kubeconfig=/etc/kubeconfig + - --kubeconfig=/etc/karmada/config/karmada.config + - --authentication-kubeconfig=/etc/karmada/config/karmada.config + - --authorization-kubeconfig=/etc/karmada/config/karmada.config - --bind-address=0.0.0.0 - - --client-ca-file=/etc/karmada/pki/ca.crt + - --client-ca-file=/etc/karmada/pki/ca/tls.crt - --cluster-cidr=10.244.0.0/16 - --cluster-name=karmada - - --cluster-signing-cert-file=/etc/karmada/pki/ca.crt - - --cluster-signing-key-file=/etc/karmada/pki/ca.key + - --cluster-signing-cert-file=/etc/karmada/pki/ca/tls.crt + - --cluster-signing-key-file=/etc/karmada/pki/ca/tls.key - --controllers=namespace,garbagecollector,serviceaccount-token,ttl-after-finished,bootstrapsigner,tokencleaner,csrapproving,csrcleaner,csrsigning,clusterrole-aggregation - - --kubeconfig=/etc/kubeconfig - --leader-elect=true - --node-cidr-mask-size=24 - - --root-ca-file=/etc/karmada/pki/ca.crt - - --service-account-private-key-file=/etc/karmada/pki/karmada.key + - --root-ca-file=/etc/karmada/pki/ca/tls.crt + - --service-account-private-key-file=/etc/karmada/pki/service-account-key-pair/sa.key - --service-cluster-ip-range=10.96.0.0/12 - --use-service-account-credentials=true - --v=4 @@ -69,17 +73,21 @@ spec: requests: cpu: 200m volumeMounts: - - mountPath: /etc/karmada/pki - name: karmada-certs + - name: karmada-config + mountPath: /etc/karmada/config + - name: ca-cert + mountPath: /etc/karmada/pki/ca + readOnly: true + - name: service-account-key-pair + mountPath: /etc/karmada/pki/service-account-key-pair readOnly: true - - mountPath: /etc/kubeconfig - subPath: kubeconfig - name: kubeconfig - priorityClassName: system-node-critical volumes: - - name: karmada-certs + - name: karmada-config + secret: + secretName: kube-controller-manager-config + - name: ca-cert secret: - secretName: karmada-cert-secret - - name: kubeconfig + secretName: kube-controller-manager-ca-cert + - name: service-account-key-pair secret: - secretName: kubeconfig + secretName: kube-controller-manager-service-account-key-pair diff --git a/artifacts/deploy/secret.yaml b/artifacts/deploy/secret.yaml deleted file mode 100644 index be55726f8a81..000000000000 --- a/artifacts/deploy/secret.yaml +++ /dev/null @@ -1,26 +0,0 @@ -apiVersion: v1 -stringData: - kubeconfig: |- - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: {{ca_crt}} - server: https://karmada-apiserver.karmada-system.svc.cluster.local:5443 - name: kind-karmada - contexts: - - context: - cluster: kind-karmada - user: kind-karmada - name: karmada - current-context: karmada - kind: Config - preferences: {} - users: - - name: kind-karmada - user: - client-certificate-data: {{client_crt}} - client-key-data: {{client_key}} -kind: Secret -metadata: - name: kubeconfig - namespace: karmada-system diff --git a/charts/OWNERS b/charts/OWNERS index 8c8c68b3569a..312437cdad10 100644 --- a/charts/OWNERS +++ b/charts/OWNERS @@ -1,9 +1,12 @@ reviewers: +- a7i - chaosi-zju - calvin0327 - jrkeen - pidb - Poor12 approvers: +- a7i +- chaosi-zju - pidb - Poor12 diff --git a/charts/index.yaml b/charts/index.yaml index 06968d2d38f4..d92b1578721b 100644 --- a/charts/index.yaml +++ b/charts/index.yaml @@ -2,7 +2,47 @@ apiVersion: v1 entries: karmada: - apiVersion: v2 - appVersion: latest + appVersion: v1.1.0 + created: "2024-09-21T12:09:38.421759709+08:00" + dependencies: + - name: common + repository: https://charts.bitnami.com/bitnami + version: 2.x.x + description: A Helm chart for karmada + digest: d4d9bbbda5ac03a4ba9349b17973b3df9047849398735dcb36cec4ec39140046 + kubeVersion: '>= 1.16.0-0' + maintainers: + - email: chaosi@zju.edu.cn + name: chaosi-zju + - email: amiralavi7@gmail.com + name: a7i + name: karmada + type: application + urls: + - https://github.com/karmada-io/karmada/releases/download/v1.11.0/karmada-chart-v1.11.0.tgz + version: v1.11.0 + - apiVersion: v2 + appVersion: v1.1.0 + created: "2024-09-21T12:04:45.631679376+08:00" + dependencies: + - name: common + repository: https://charts.bitnami.com/bitnami + version: 2.x.x + description: A Helm chart for karmada + digest: c381332dfebd6a4473cfcdb68abfca4916d415088cc90707b7336001e8cb20db + kubeVersion: '>= 1.16.0-0' + maintainers: + - email: chaosi@zju.edu.cn + name: chaosi-zju + - email: amiralavi7@gmail.com + name: a7i + name: karmada + type: application + urls: + - https://github.com/karmada-io/karmada/releases/download/v1.10.0/karmada-chart-v1.10.0.tgz + version: v1.10.0 + - apiVersion: v2 + appVersion: v1.1.0 created: "2024-02-29T16:55:17.965911-05:00" dependencies: - name: common @@ -242,6 +282,68 @@ entries: - https://github.com/karmada-io/karmada/releases/download/v1.2.0/karmada-chart-v1.2.0.tgz version: v1.2.0 karmada-operator: + - apiVersion: v2 + appVersion: v1.1.0 + created: "2024-09-21T15:01:05.712207268+08:00" + dependencies: + - name: common + repository: https://charts.bitnami.com/bitnami + version: 1.x.x + description: A Helm chart for karmada-operator + digest: d0aecd28e92ebfa5e9cb4836a38b878c3355f2f068f3645b7152a00625c168c6 + kubeVersion: '>= 1.16.0-0' + maintainers: + - email: wen.chen@daocloud.io + name: calvin0327 + - email: chaosi@zju.edu.cn + name: chaosi-zju + - email: amiralavi7@gmail.com + name: a7i + name: karmada-operator + type: application + urls: + - https://github.com/karmada-io/karmada/releases/download/v1.11.0/karmada-operator-chart-v1.11.0.tgz + version: v1.11.0 + - apiVersion: v2 + appVersion: v1.1.0 + created: "2024-09-21T14:59:09.729691529+08:00" + dependencies: + - name: common + repository: https://charts.bitnami.com/bitnami + version: 1.x.x + description: A Helm chart for karmada-operator + digest: a8794bcbfcf96d5ad2a7f7e976fac4d599437858f05d94acd7bf6c493ec6401e + kubeVersion: '>= 1.16.0-0' + maintainers: + - email: wen.chen@daocloud.io + name: calvin0327 + - email: chaosi@zju.edu.cn + name: chaosi-zju + - email: amiralavi7@gmail.com + name: a7i + name: karmada-operator + type: application + urls: + - https://github.com/karmada-io/karmada/releases/download/v1.10.0/karmada-operator-chart-v1.10.0.tgz + version: v1.10.0 + - apiVersion: v2 + appVersion: v1.1.0 + created: "2024-09-21T14:56:28.687039261+08:00" + dependencies: + - name: common + repository: https://charts.bitnami.com/bitnami + version: 1.x.x + description: A Helm chart for karmada-operator + digest: 609743e4c8bbe381fbc822e0957ccb1ba47ff5a84f33f86ee16f9f3c6d4c7eed + kubeVersion: '>= 1.16.0-0' + maintainers: + - email: wen.chen@daocloud.io + name: calvin0327 + name: karmada-operator + type: application + urls: + - https://github.com/karmada-io/karmada/releases/download/v1.9.0/karmada-operator-chart-v1.9.0.tgz + version: v1.9.0 - apiVersion: v2 appVersion: v1.1.0 created: "2023-11-30T11:47:32.390757-08:00" @@ -260,4 +362,4 @@ entries: urls: - https://github.com/karmada-io/karmada/releases/download/v1.8.0/karmada-operator-chart-v1.8.0.tgz version: v1.8.0 -generated: "2024-02-29T16:55:17.961702-05:00" +generated: "2024-09-21T15:01:05.710041947+08:00" diff --git a/charts/karmada-operator/Chart.yaml b/charts/karmada-operator/Chart.yaml index 2a055d3909fb..9c0f34596001 100644 --- a/charts/karmada-operator/Chart.yaml +++ b/charts/karmada-operator/Chart.yaml @@ -26,7 +26,7 @@ version: 0.0.1 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. -appVersion: latest +appVersion: v1.1.0 # This is karmada dependencies dependencies: @@ -38,3 +38,7 @@ dependencies: maintainers: - name: calvin0327 email: wen.chen@daocloud.io + - email: chaosi@zju.edu.cn + name: chaosi-zju + - email: amiralavi7@gmail.com + name: a7i diff --git a/charts/karmada-operator/crds/operator.karmada.io_karmadas.yaml b/charts/karmada-operator/crds/operator.karmada.io_karmadas.yaml index 9a04701a8dbe..b92125e8cdab 100644 --- a/charts/karmada-operator/crds/operator.karmada.io_karmadas.yaml +++ b/charts/karmada-operator/crds/operator.karmada.io_karmadas.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.5 name: karmadas.operator.karmada.io spec: group: operator.karmada.io @@ -65,12 +65,14 @@ spec: description: |- CAData is an SSL Certificate Authority file used to secure etcd communication. Required if using a TLS connection. + Deprecated: This field is deprecated and will be removed in a future version. Use SecretRef for providing client connection credentials. format: byte type: string certData: description: |- CertData is an SSL certification file used to secure etcd communication. Required if using a TLS connection. + Deprecated: This field is deprecated and will be removed in a future version. Use SecretRef for providing client connection credentials. format: byte type: string endpoints: @@ -82,13 +84,29 @@ spec: description: |- KeyData is an SSL key file used to secure etcd communication. Required if using a TLS connection. + Deprecated: This field is deprecated and will be removed in a future version. Use SecretRef for providing client connection credentials. format: byte type: string + secretRef: + description: |- + SecretRef references a Kubernetes secret containing the etcd connection credentials. + The secret must contain the following data keys: + ca.crt: The Certificate Authority (CA) certificate data. + tls.crt: The TLS certificate data used for verifying the etcd server's certificate. + tls.key: The TLS private key. + Required to configure the connection to an external etcd cluster. + properties: + name: + description: Name is the name of resource being referenced. + type: string + namespace: + description: Namespace is the namespace for the resource + being referenced. + type: string + type: object required: - - caData - - certData - endpoints - - keyData + - secretRef type: object local: description: |- @@ -151,11 +169,9 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. - This field is immutable. It can only be set for containers. items: description: ResourceClaim references one entry @@ -243,9 +259,6 @@ spec: used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath - --- - TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not - mount host directories as read/write. properties: path: description: |- @@ -532,18 +545,1842 @@ spec: override. A key in this map is the flag name as it appears on the command line except without leading dash(es). - Note: This is a temporary solution to allow for the configuration of the kube-apiserver component. In the future, we will provide a more structured way to configure the component. Once that is done, this field will be discouraged to be used. Incorrect settings on this field maybe lead to the corresponding component in an unhealthy state. Before you do it, please confirm that you understand the risks of this configuration. - For supported flags, please see https://kubernetes.io/docs/reference/command-line-tools-reference/kube-apiserver/ for details. type: object + extraVolumeMounts: + description: |- + ExtraVolumeMounts specifies a list of extra volume mounts to be mounted into the API server's container + To fulfil the base functionality required for a functioning control plane, when provisioning a new Karmada instance, + the operator will automatically mount volumes into the API server container needed to configure things such as TLS, + SA token issuance/signing and secured connection to etcd, amongst others. However, given the wealth of options for configurability, + there are additional features (e.g., encryption at rest and custom AuthN webhook) that can be configured. ExtraVolumeMounts, in conjunction + with ExtraArgs and ExtraVolumes can be used to fulfil those use cases. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + When not set, MountPropagationNone is used. + This field is beta in 1.10. + When RecursiveReadOnly is set to IfPossible or to Enabled, MountPropagation must be None or unspecified + (which defaults to None). + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + + If ReadOnly is false, this field has no meaning and must be unspecified. + + If ReadOnly is true, and this field is set to Disabled, the mount is not made + recursively read-only. If this field is set to IfPossible, the mount is made + recursively read-only, if it is supported by the container runtime. If this + field is set to Enabled, the mount is made recursively read-only if it is + supported by the container runtime, otherwise the pod will not be started and + an error will be generated to indicate the reason. + + If this field is set to IfPossible or Enabled, MountPropagation must be set to + None (or be unspecified, which defaults to None). + + If this field is not specified, it is treated as an equivalent of Disabled. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: |- + Expanded path within the volume from which the container's volume should be mounted. + Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + Defaults to "" (volume's root). + SubPathExpr and SubPath are mutually exclusive. + type: string + required: + - mountPath + - name + type: object + type: array + extraVolumes: + description: |- + ExtraVolumes specifies a list of extra volumes for the API server's pod + To fulfil the base functionality required for a functioning control plane, when provisioning a new Karmada instance, + the operator will automatically attach volumes for the API server pod needed to configure things such as TLS, + SA token issuance/signing and secured connection to etcd, amongst others. However, given the wealth of options for configurability, + there are additional features (e.g., encryption at rest and custom AuthN webhook) that can be configured. ExtraVolumes, in conjunction + with ExtraArgs and ExtraVolumeMounts can be used to fulfil those use cases. + items: + description: Volume represents a named volume in a pod that + may be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: |- + awsElasticBlockStore represents an AWS Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + format: int32 + type: integer + readOnly: + description: |- + readOnly value true will force the readOnly setting in VolumeMounts. + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: boolean + volumeID: + description: |- + volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). + More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + type: string + required: + - volumeID + type: object + azureDisk: + description: azureDisk represents an Azure Data Disk + mount on the host and bind mount to the pod. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: + None, Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk + in the blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in + the blob storage + type: string + fsType: + description: |- + fsType is Filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single + blob disk per storage account Managed: azure + managed data disk (only in managed availability + set). defaults to shared' + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: azureFile represents an Azure File Service + mount on the host and bind mount to the pod. + properties: + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that + contains Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: cephFS represents a Ceph FS mount on the + host that shares a pod's lifetime + properties: + monitors: + description: |- + monitors is Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + description: 'path is Optional: Used as the mounted + root, rather than the full Ceph tree, default + is /' + type: string + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: boolean + secretFile: + description: |- + secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + secretRef: + description: |- + secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is optional: User is the rados user name, default is admin + More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + type: string + required: + - monitors + type: object + cinder: + description: |- + cinder represents a cinder volume attached and mounted on kubelets host machine. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: boolean + secretRef: + description: |- + secretRef is optional: points to a secret object containing parameters used to connect + to OpenStack. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: |- + volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should + populate this volume + properties: + defaultMode: + description: |- + defaultMode is optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents + ephemeral storage that is handled by certain external + CSI drivers (Beta feature). + properties: + driver: + description: |- + driver is the name of the CSI driver that handles this volume. + Consult with your admin for the correct name as registered in the cluster. + type: string + fsType: + description: |- + fsType to mount. Ex. "ext4", "xfs", "ntfs". + If not provided, the empty value is passed to the associated CSI driver + which will determine the default filesystem to apply. + type: string + nodePublishSecretRef: + description: |- + nodePublishSecretRef is a reference to the secret object containing + sensitive information to pass to the CSI driver to complete the CSI + NodePublishVolume and NodeUnpublishVolume calls. + This field is optional, and may be empty if no secret is required. If the + secret object contains more than one secret, all secret references are passed. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: |- + readOnly specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: |- + volumeAttributes stores driver-specific properties that are passed to the CSI + driver. Consult your driver's documentation for supported values. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about + the pod that should populate this volume + properties: + defaultMode: + description: |- + Optional: mode bits to use on created files by default. Must be a + Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: Items is a list of downward API volume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing the + pod field + properties: + fieldRef: + description: 'Required: Selects a field of + the pod: only annotations, labels, name, + namespace and uid are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' path. + Must be utf-8 encoded. The first item of + the relative path must not start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + description: |- + emptyDir represents a temporary directory that shares a pod's lifetime. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + properties: + medium: + description: |- + medium represents what type of storage medium should back this directory. + The default is "" which means to use the node's default medium. + Must be an empty string (default) or Memory. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: |- + sizeLimit is the total amount of local storage required for this EmptyDir volume. + The size limit is also applicable for memory medium. + The maximum usage on memory medium EmptyDir would be the minimum value between + the SizeLimit specified here and the sum of memory limits of all containers in a pod. + The default is nil which means that the limit is undefined. + More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: |- + ephemeral represents a volume that is handled by a cluster storage driver. + The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, + and deleted when the pod is removed. + + Use this if: + a) the volume is only needed while the pod runs, + b) features of normal volumes like restoring from snapshot or capacity + tracking are needed, + c) the storage driver is specified through a storage class, and + d) the storage driver supports dynamic volume provisioning through + a PersistentVolumeClaim (see EphemeralVolumeSource for more + information on the connection between this volume type + and PersistentVolumeClaim). + + Use PersistentVolumeClaim or one of the vendor-specific + APIs for volumes that persist for longer than the lifecycle + of an individual pod. + + Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to + be used that way - see the documentation of the driver for + more information. + + A pod can use both types of ephemeral volumes and + persistent volumes at the same time. + properties: + volumeClaimTemplate: + description: |- + Will be used to create a stand-alone PVC to provision the volume. + The pod in which this EphemeralVolumeSource is embedded will be the + owner of the PVC, i.e. the PVC will be deleted together with the + pod. The name of the PVC will be `-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over + volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource + that is attached to a kubelet's host machine and then + exposed to the pod. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target + worldwide names (WWNs)' + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + description: |- + wwids Optional: FC volume world wide identifiers (wwids) + Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + description: |- + flexVolume represents a generic volume resource that is + provisioned/attached using an exec based plugin. + properties: + driver: + description: driver is the name of the driver to + use for this volume. + type: string + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds + extra command options if any.' + type: object + readOnly: + description: |- + readOnly is Optional: defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is Optional: secretRef is reference to the secret object containing + sensitive information to pass to the plugin scripts. This may be + empty if no secret object is specified. If the secret object + contains more than one secret, all secrets are passed to the plugin + scripts. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: flocker represents a Flocker volume attached + to a kubelet's host machine. This depends on the Flocker + control service being running + properties: + datasetName: + description: |- + datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker + should be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. + This is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: |- + gcePersistentDisk represents a GCE Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + properties: + fsType: + description: |- + fsType is filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + format: int32 + type: integer + pdName: + description: |- + pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: boolean + required: + - pdName + type: object + gitRepo: + description: |- + gitRepo represents a git repository at a particular revision. + DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + into the Pod's container. + properties: + directory: + description: |- + directory is the target directory name. + Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + git repository. Otherwise, if specified, the volume will contain the git repository in + the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the + specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: |- + glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + More info: https://examples.k8s.io/volumes/glusterfs/README.md + properties: + endpoints: + description: |- + endpoints is the endpoint name that details Glusterfs topology. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + path: + description: |- + path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + readOnly: + description: |- + readOnly here will force the Glusterfs volume to be mounted with read-only permissions. + Defaults to false. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: |- + hostPath represents a pre-existing file or directory on the host + machine that is directly exposed to the container. This is generally + used for system agents or other privileged things that are allowed + to see the host machine. Most containers will NOT need this. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + properties: + path: + description: |- + path of the directory on the host. + If the path is a symlink, it will follow the link to the real path. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + type: + description: |- + type for HostPath Volume + Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + required: + - path + type: object + iscsi: + description: |- + iscsi represents an ISCSI Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://examples.k8s.io/volumes/iscsi/README.md + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support + iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support + iSCSI Session CHAP authentication + type: boolean + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + type: string + initiatorName: + description: |- + initiatorName is the custom iSCSI Initiator Name. + If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + description: |- + iscsiInterface is the interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: |- + portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI + target and initiator authentication + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: |- + targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + nfs: + description: |- + nfs represents an NFS mount on the host that shares a pod's lifetime + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + properties: + path: + description: |- + path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + readOnly: + description: |- + readOnly here will force the NFS export to be mounted with read-only permissions. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: boolean + server: + description: |- + server is the hostname or IP address of the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + claimName: + description: |- + claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: photonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host + machine + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon + Controller persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: portworxVolume represents a portworx volume + attached and mounted on kubelets host machine + properties: + fsType: + description: |- + fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx + volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources + secrets, configmaps, and downward API + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: sources is the list of volume projections + items: + description: Projection that may be projected + along with other supported volume types + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume + root to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the + configMap data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether + the ConfigMap or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about + the downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects + a field of the pod: only annotations, + labels, name, namespace and uid + are supported.' + properties: + apiVersion: + description: Version of the + schema the FieldPath is written + in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field + to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the + relative path name of the file + to be created. Must not be absolute + or contain the ''..'' path. Must + be utf-8 encoded. The first item + of the relative path must not + start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: + required for volumes, optional + for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the + secret data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether + the Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information + about the serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + description: quobyte represents a Quobyte mount on the + host that shares a pod's lifetime + properties: + group: + description: |- + group to map volume access to + Default is no group + type: string + readOnly: + description: |- + readOnly here will force the Quobyte volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: |- + registry represents a single or multiple Quobyte Registry services + specified as a string as host:port pair (multiple entries are separated with commas) + which acts as the central registry for volumes + type: string + tenant: + description: |- + tenant owning the given Quobyte volume in the Backend + Used with dynamically provisioned Quobyte volumes, value is set by the plugin + type: string + user: + description: |- + user to map volume access to + Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references + an already created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: |- + rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + More info: https://examples.k8s.io/volumes/rbd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + type: string + image: + description: |- + image is the rados image name. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + keyring: + description: |- + keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + monitors: + description: |- + monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + description: |- + pool is the rados pool name. + Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: boolean + secretRef: + description: |- + secretRef is name of the authentication secret for RBDUser. If provided + overrides keyring. + Default is nil. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is the rados user name. + Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + required: + - image + - monitors + type: object + scaleIO: + description: scaleIO represents a ScaleIO persistent + volume attached and mounted on Kubernetes nodes. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". + Default is "xfs". + type: string + gateway: + description: gateway is the host address of the + ScaleIO API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the + ScaleIO Protection Domain for the configured storage. + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef references to the secret for ScaleIO user and other + sensitive information. If this is not provided, Login operation will fail. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable SSL + communication with Gateway, default false + type: boolean + storageMode: + description: |- + storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage + Pool associated with the protection domain. + type: string + system: + description: system is the name of the storage system + as configured in ScaleIO. + type: string + volumeName: + description: |- + volumeName is the name of a volume already created in the ScaleIO system + that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + description: optional field specify whether the + Secret or its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + storageos: + description: storageOS represents a StorageOS volume + attached and mounted on Kubernetes nodes. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef specifies the secret to use for obtaining the StorageOS API + credentials. If not specified, default values will be attempted. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: |- + volumeName is the human-readable name of the StorageOS volume. Volume + names are only unique within a namespace. + type: string + volumeNamespace: + description: |- + volumeNamespace specifies the scope of the volume within StorageOS. If no + namespace is specified then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + Set VolumeName to any name to override the default behaviour. + Set to "default" if you are not using namespaces within StorageOS. + Namespaces that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: vsphereVolume represents a vSphere volume + attached and mounted on kubelets host machine + properties: + fsType: + description: |- + fsType is filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy + Based Management (SPBM) profile ID associated + with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy + Based Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies + vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array featureGates: additionalProperties: type: boolean @@ -592,11 +2429,9 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. - This field is immutable. It can only be set for containers. items: description: ResourceClaim references one entry in PodSpec.ResourceClaims. @@ -651,9 +2486,15 @@ spec: Defaults to "10.96.0.0/12". type: string serviceType: - description: |- - ServiceType represents the service type of karmada apiserver. - it is ClusterIP by default. + default: ClusterIP + description: |- + ServiceType represents the service type of Karmada API server. + Valid options are: "ClusterIP", "NodePort", "LoadBalancer". + Defaults to "ClusterIP". + enum: + - ClusterIP + - NodePort + - LoadBalancer type: string type: object karmadaAggregatedAPIServer: @@ -683,14 +2524,12 @@ spec: override. A key in this map is the flag name as it appears on the command line except without leading dash(es). - Note: This is a temporary solution to allow for the configuration of the karmada-aggregated-apiserver component. In the future, we will provide a more structured way to configure the component. Once that is done, this field will be discouraged to be used. Incorrect settings on this field maybe lead to the corresponding component in an unhealthy state. Before you do it, please confirm that you understand the risks of this configuration. - For supported flags, please see https://karmada.io/docs/reference/components/karmada-aggregated-apiserver for details. @@ -744,11 +2583,9 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. - This field is immutable. It can only be set for containers. items: description: ResourceClaim references one entry in PodSpec.ResourceClaims. @@ -811,7 +2648,6 @@ spec: 'foo' enables the controller named 'foo', '-foo' disables the controller named 'foo'. - All controllers: binding, cluster, clusterStatus, endpointSlice, execution, federatedResourceQuotaStatus, federatedResourceQuotaSync, hpa, namespace, serviceExport, serviceImport, unifiedAuth, workStatus. @@ -830,14 +2666,12 @@ spec: override. A key in this map is the flag name as it appears on the command line except without leading dash(es). - Note: This is a temporary solution to allow for the configuration of the karmada-controller-manager component. In the future, we will provide a more structured way to configure the component. Once that is done, this field will be discouraged to be used. Incorrect settings on this field maybe lead to the corresponding component in an unhealthy state. Before you do it, please confirm that you understand the risks of this configuration. - For supported flags, please see https://karmada.io/docs/reference/components/karmada-controller-manager for details. @@ -894,11 +2728,9 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. - This field is immutable. It can only be set for containers. items: description: ResourceClaim references one entry in PodSpec.ResourceClaims. @@ -963,14 +2795,12 @@ spec: A key in this map is the flag name as it appears on the command line except without leading dash(es). - Note: This is a temporary solution to allow for the configuration of the karmada-descheduler component. In the future, we will provide a more structured way to configure the component. Once that is done, this field will be discouraged to be used. Incorrect settings on this field maybe lead to the corresponding component in an unhealthy state. Before you do it, please confirm that you understand the risks of this configuration. - For supported flags, please see https://karmada.io/docs/reference/components/karmada-descheduler for details. @@ -1016,11 +2846,9 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. - This field is immutable. It can only be set for containers. items: description: ResourceClaim references one entry in PodSpec.ResourceClaims. @@ -1085,14 +2913,12 @@ spec: A key in this map is the flag name as it appears on the command line except without leading dash(es). - Note: This is a temporary solution to allow for the configuration of the karmada-metrics-adapter component. In the future, we will provide a more structured way to configure the component. Once that is done, this field will be discouraged to be used. Incorrect settings on this field maybe lead to the corresponding component in an unhealthy state. Before you do it, please confirm that you understand the risks of this configuration. - For supported flags, please see https://karmada.io/docs/reference/components/karmada-metrics-adapter for details. @@ -1138,11 +2964,9 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. - This field is immutable. It can only be set for containers. items: description: ResourceClaim references one entry in PodSpec.ResourceClaims. @@ -1207,14 +3031,12 @@ spec: A key in this map is the flag name as it appears on the command line except without leading dash(es). - Note: This is a temporary solution to allow for the configuration of the karmada-scheduler component. In the future, we will provide a more structured way to configure the component. Once that is done, this field will be discouraged to be used. Incorrect settings on this field maybe lead to the corresponding component in an unhealthy state. Before you do it, please confirm that you understand the risks of this configuration. - For supported flags, please see https://karmada.io/docs/reference/components/karmada-scheduler for details. @@ -1268,11 +3090,9 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. - This field is immutable. It can only be set for containers. items: description: ResourceClaim references one entry in PodSpec.ResourceClaims. @@ -1337,14 +3157,12 @@ spec: A key in this map is the flag name as it appears on the command line except without leading dash(es). - Note: This is a temporary solution to allow for the configuration of the karmada-search component. In the future, we will provide a more structured way to configure the component. Once that is done, this field will be discouraged to be used. Incorrect settings on this field maybe lead to the corresponding component in an unhealthy state. Before you do it, please confirm that you understand the risks of this configuration. - For supported flags, please see https://karmada.io/docs/reference/components/karmada-search for details. @@ -1390,11 +3208,9 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. - This field is immutable. It can only be set for containers. items: description: ResourceClaim references one entry in PodSpec.ResourceClaims. @@ -1459,14 +3275,12 @@ spec: override. A key in this map is the flag name as it appears on the command line except without leading dash(es). - Note: This is a temporary solution to allow for the configuration of the karmada-webhook component. In the future, we will provide a more structured way to configure the component. Once that is done, this field will be discouraged to be used. Incorrect settings on this field maybe lead to the corresponding component in an unhealthy state. Before you do it, please confirm that you understand the risks of this configuration. - For supported flags, please see https://karmada.io/docs/reference/components/karmada-webhook for details. @@ -1512,11 +3326,9 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. - This field is immutable. It can only be set for containers. items: description: ResourceClaim references one entry in PodSpec.ResourceClaims. @@ -1579,7 +3391,6 @@ spec: 'foo' enables the controller named 'foo', '-foo' disables the controller named 'foo'. - All controllers: attachdetach, bootstrapsigner, cloud-node-lifecycle, clusterrole-aggregation, cronjob, csrapproving, csrcleaner, csrsigning, daemonset, deployment, disruption, endpoint, endpointslice, @@ -1594,7 +3405,6 @@ spec: https://kubernetes.io/docs/reference/command-line-tools-reference/kube-controller-manager/ for details. - However, Karmada uses Kubernetes Native API definitions for federated resource template, so it doesn't need enable some resource related controllers like daemonset, deployment etc. On the other hand, Karmada leverages the capabilities of the Kubernetes controller to @@ -1603,13 +3413,11 @@ spec: and the `garbagecollector` controller handles automatic clean-up of redundant items in your karmada. - According to the user feedback and karmada requirements, the following controllers are enabled by default: namespace, garbagecollector, serviceaccount-token, ttl-after-finished, bootstrapsigner,csrapproving,csrcleaner,csrsigning. See https://karmada.io/docs/administrator/configuration/configure-controllers#kubernetes-controllers - Others are disabled by default. If you want to enable or disable other controllers, you have to explicitly specify all the controllers that kube-controller-manager should enable at startup phase. @@ -1624,14 +3432,12 @@ spec: override. A key in this map is the flag name as it appears on the command line except without leading dash(es). - Note: This is a temporary solution to allow for the configuration of the kube-controller-manager component. In the future, we will provide a more structured way to configure the component. Once that is done, this field will be discouraged to be used. Incorrect settings on this field maybe lead to the corresponding component in an unhealthy state. Before you do it, please confirm that you understand the risks of this configuration. - For supported flags, please see https://kubernetes.io/docs/reference/command-line-tools-reference/kube-controller-manager/ for details. @@ -1684,11 +3490,9 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. - This field is immutable. It can only be set for containers. items: description: ResourceClaim references one entry in PodSpec.ResourceClaims. @@ -1732,6 +3536,8 @@ spec: type: object type: object type: object + required: + - etcd type: object crdTarball: description: |- @@ -1830,20 +3636,25 @@ spec: status: description: Most recently observed status of the Karmada. properties: + apiServerService: + description: |- + APIServerService reports the location of the Karmada API server service which + can be used by third-party applications to discover the Karmada Service, e.g. + expose the service outside the cluster by Ingress. + properties: + name: + description: Name represents the name of the Karmada API Server + service. + type: string + required: + - name + type: object conditions: description: Conditions represents the latest available observations of a karmada's current state. items: - description: "Condition contains details for one aspect of the current - state of this API Resource.\n---\nThis struct is intended for - direct use as an array at the field path .status.conditions. For - example,\n\n\n\ttype FooStatus struct{\n\t // Represents the - observations of a foo's current state.\n\t // Known .status.conditions.type - are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // - +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t - \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t - \ // other fields\n\t}" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: description: |- @@ -1884,12 +3695,7 @@ spec: - Unknown type: string type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - --- - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be - useful (see .node.status.conditions), the ability to deconflict is important. - The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + description: type of condition in CamelCase or in foo.example.com/CamelCase. maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string diff --git a/charts/karmada-operator/templates/karmada-operator-clusterrole.yaml b/charts/karmada-operator/templates/karmada-operator-clusterrole.yaml index 690592157347..8ea365a641b6 100644 --- a/charts/karmada-operator/templates/karmada-operator-clusterrole.yaml +++ b/charts/karmada-operator/templates/karmada-operator-clusterrole.yaml @@ -4,8 +4,29 @@ metadata: name: {{ include "common.names.fullname" . }} labels: {{- include "common.labels.standard" . | nindent 4 }} rules: - - apiGroups: ['*'] - resources: ['*'] - verbs: ["*"] - - nonResourceURLs: ['*'] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] # karmada-operator requires access to the Lease resource for leader election + verbs: ["get", "create", "update"] + - apiGroups: ["operator.karmada.io"] + resources: ["karmadas"] # to manage karmada instances + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["operator.karmada.io"] + resources: ["karmadas/status"] # to update the status subresource of karmada instances + verbs: ["update"] + - apiGroups: [""] + resources: ["events"] # allows karmada-operator to record events in the kubernetes api-server + verbs: ["create"] + - apiGroups: [""] + resources: ["nodes", "pods"] # list cluster nodes and pods to get node information and for health checks + verbs: ["list"] + - apiGroups: [""] + resources: ["namespaces"] # to get information about namespaces, and deploy resources into specific namespaces + verbs: ["get"] + - apiGroups: [""] + resources: ["secrets", "services"] # to manage secrets which might contain sensitive data like credentials and services to expose applications within the cluster + verbs: ["get", "create", "update", "delete"] + - apiGroups: ["apps"] + resources: ["statefulsets", "deployments"] # to manage statefulsets, e.g. etcd, and deployments, e.g. karmada-operator + verbs: ["get", "create", "update", "delete"] + - nonResourceURLs: ["/healthz"] # used to check whether the karmada apiserver is health verbs: ["get"] diff --git a/charts/karmada/Chart.yaml b/charts/karmada/Chart.yaml index b9c4d8141057..8adc96f7f788 100644 --- a/charts/karmada/Chart.yaml +++ b/charts/karmada/Chart.yaml @@ -26,7 +26,7 @@ version: 0.0.0 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to # follow Semantic Versioning. They should reflect the version the application is using. -appVersion: latest +appVersion: v1.1.0 # This is karmada dependencies dependencies: @@ -36,9 +36,7 @@ dependencies: # This is karmada maintainers maintainers: - - name: jrkeen - email: jrkeen@hotmail.com - - name: pidb - email: jackson.cloudnative@gmail.com - - name: Poor12 - email: shentiecheng@huawei.com + - email: chaosi@zju.edu.cn + name: chaosi-zju + - email: amiralavi7@gmail.com + name: a7i diff --git a/charts/karmada/README.md b/charts/karmada/README.md index c8944c02c3cb..8b609a42eecf 100644 --- a/charts/karmada/README.md +++ b/charts/karmada/README.md @@ -259,7 +259,7 @@ helm install karmada-scheduler-estimator -n karmada-system ./charts/karmada | `agent.affinity` | Affinity of the agent | `{}` | | `agent.tolerations` | Tolerations of the agent | `[]` | | `agent.strategy` | Strategy of the agent | `{"type": "RollingUpdate", "rollingUpdate": {"maxUnavailable": "0", "maxSurge": "50%"} }` | -| `scheduler.labels` | Labels of the schedeler deployment | `{"app": "karmada-scheduler"}` | +| `scheduler.labels` | Labels of the scheduler deployment | `{"app": "karmada-scheduler"}` | | `scheduler.replicaCount` | Target replicas of the scheduler | `1` | | `scheduler.podLabels` | Labels of the scheduler pods | `{}` | | `scheduler.podAnnotations` | Annotations of the scheduler pods | `{}` | @@ -308,7 +308,7 @@ helm install karmada-scheduler-estimator -n karmada-system ./charts/karmada | `apiServer.podAnnotations` | Annotations of the karmada-apiserver pods | `{}` | | `apiServer.image.pullSecrets` | Image pull secret of the karmada-apiserver | `[]` | | `apiServer.image.repository` | Image of the karmada-apiserver | `"registry.k8s.io/kube-apiserver"` | -| `apiServer.image.tag` | Image tag of the karmada-apiserver | `"v1.29.6"` | +| `apiServer.image.tag` | Image tag of the karmada-apiserver | `"v1.30.4"` | | `apiServer.image.pullPolicy` | Image pull policy of the karmada-apiserver | `"IfNotPresent"` | | `apiServer.resources` | Resource quota of the karmada-apiserver | `{}` | | `apiServer.hostNetwork` | Deploy karmada-apiserver with hostNetwork. If there are multiple karmadas in one cluster, you'd better set it to "false" | `"false"` | @@ -337,7 +337,7 @@ helm install karmada-scheduler-estimator -n karmada-system ./charts/karmada | `kubeControllerManager.podAnnotations` | Annotations of the kube-controller-manager pods | `{}` | | `kubeControllerManager.image.pullSecrets` | Image pull secret of the kube-controller-manager | `[]` | | `kubeControllerManager.image.repository` | Image of the kube-controller-manager | `"registry.k8s.io/kube-controller-manager"` | -| `kubeControllerManager.image.tag` | Image tag of the kube-controller-manager | `"v1.29.6"` | +| `kubeControllerManager.image.tag` | Image tag of the kube-controller-manager | `"v1.30.4"` | | `kubeControllerManager.image.pullPolicy` | Image pull policy of the kube-controller-manager | `"IfNotPresent"` | | `kubeControllerManager.resources` | Resource quota of the kube-controller-manager | `{}` | | `kubeControllerManager.nodeSelector` | Node selector of the kube-controller-manager | `{}` | diff --git a/charts/karmada/_crds/bases/apps/apps.karmada.io_workloadrebalancers.yaml b/charts/karmada/_crds/bases/apps/apps.karmada.io_workloadrebalancers.yaml index 17a0d54e0eae..04d002838794 100644 --- a/charts/karmada/_crds/bases/apps/apps.karmada.io_workloadrebalancers.yaml +++ b/charts/karmada/_crds/bases/apps/apps.karmada.io_workloadrebalancers.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.5 name: workloadrebalancers.apps.karmada.io spec: group: apps.karmada.io diff --git a/charts/karmada/_crds/bases/autoscaling/autoscaling.karmada.io_cronfederatedhpas.yaml b/charts/karmada/_crds/bases/autoscaling/autoscaling.karmada.io_cronfederatedhpas.yaml index ec81f31e0674..98dc633c0090 100644 --- a/charts/karmada/_crds/bases/autoscaling/autoscaling.karmada.io_cronfederatedhpas.yaml +++ b/charts/karmada/_crds/bases/autoscaling/autoscaling.karmada.io_cronfederatedhpas.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.5 name: cronfederatedhpas.autoscaling.karmada.io spec: group: autoscaling.karmada.io @@ -79,7 +79,6 @@ spec: Name of the rule. Each rule in a CronFederatedHPA must have a unique name. - Note: the name will be used as an identifier to record its execution history. Changing the name will be considered as deleting the old rule and adding a new rule, that means the original execution history will be diff --git a/charts/karmada/_crds/bases/autoscaling/autoscaling.karmada.io_federatedhpas.yaml b/charts/karmada/_crds/bases/autoscaling/autoscaling.karmada.io_federatedhpas.yaml index 7d8d74539559..615fd67e05b3 100644 --- a/charts/karmada/_crds/bases/autoscaling/autoscaling.karmada.io_federatedhpas.yaml +++ b/charts/karmada/_crds/bases/autoscaling/autoscaling.karmada.io_federatedhpas.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.5 name: federatedhpas.autoscaling.karmada.io spec: group: autoscaling.karmada.io diff --git a/charts/karmada/_crds/bases/config/config.karmada.io_resourceinterpretercustomizations.yaml b/charts/karmada/_crds/bases/config/config.karmada.io_resourceinterpretercustomizations.yaml index 394496651f12..e2325e0e8ede 100644 --- a/charts/karmada/_crds/bases/config/config.karmada.io_resourceinterpretercustomizations.yaml +++ b/charts/karmada/_crds/bases/config/config.karmada.io_resourceinterpretercustomizations.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.5 name: resourceinterpretercustomizations.config.karmada.io spec: group: config.karmada.io @@ -74,7 +74,6 @@ spec: a specific resource. The script should implement a function as follows: - ``` luaScript: > function GetDependencies(desiredObj) @@ -92,16 +91,13 @@ spec: end ``` - The content of the LuaScript needs to be a whole function including both declaration and implementation. - The parameters will be supplied by the system: - desiredObj: the object represents the configuration to be applied to the member cluster. - The returned value should be expressed by a slice of DependentObjectReference. type: string required: @@ -118,7 +114,6 @@ spec: a specific resource. The script should implement a function as follows: - ``` luaScript: > function InterpretHealth(observedObj) @@ -128,16 +123,13 @@ spec: end ``` - The content of the LuaScript needs to be a whole function including both declaration and implementation. - The parameters will be supplied by the system: - observedObj: the object represents the configuration that is observed from a specific member cluster. - The returned boolean value indicates the health status. type: string required: @@ -158,10 +150,8 @@ spec: LuaScript holds the Lua script that is used to discover the resource's replica as well as resource requirements - The script should implement a function as follows: - ``` luaScript: > function GetReplicas(desiredObj) @@ -175,16 +165,13 @@ spec: end ``` - The content of the LuaScript needs to be a whole function including both declaration and implementation. - The parameters will be supplied by the system: - desiredObj: the object represents the configuration to be applied to the member cluster. - The function expects two return values: - replica: the declared replica number - requirement: the resource required by each replica expressed with a @@ -208,7 +195,6 @@ spec: LuaScript holds the Lua script that is used to revise replicas in the desired specification. The script should implement a function as follows: - ``` luaScript: > function ReviseReplica(desiredObj, desiredReplica) @@ -217,17 +203,14 @@ spec: end ``` - The content of the LuaScript needs to be a whole function including both declaration and implementation. - The parameters will be supplied by the system: - desiredObj: the object represents the configuration to be applied to the member cluster. - desiredReplica: the replica number should be applied with. - The returned object should be a revised configuration which will be applied to member cluster eventually. type: string @@ -249,10 +232,8 @@ spec: LuaScript holds the Lua script that is used to retain runtime values to the desired specification. - The script should implement a function as follows: - ``` luaScript: > function Retain(desiredObj, observedObj) @@ -261,18 +242,15 @@ spec: end ``` - The content of the LuaScript needs to be a whole function including both declaration and implementation. - The parameters will be supplied by the system: - desiredObj: the object represents the configuration to be applied to the member cluster. - observedObj: the object represents the configuration that is observed from a specific member cluster. - The returned object should be a retained configuration which will be applied to member cluster eventually. type: string @@ -293,7 +271,6 @@ spec: to the desired specification. The script should implement a function as follows: - ``` luaScript: > function AggregateStatus(desiredObj, statusItems) @@ -304,16 +281,13 @@ spec: end ``` - The content of the LuaScript needs to be a whole function including both declaration and implementation. - The parameters will be supplied by the system: - desiredObj: the object represents a resource template. - statusItems: the slice of status expressed with AggregatedStatusItem. - The returned object should be a whole object with status aggregated. type: string required: @@ -331,7 +305,6 @@ spec: LuaScript holds the Lua script that is used to get the status from the observed specification. The script should implement a function as follows: - ``` luaScript: > function ReflectStatus(observedObj) @@ -341,16 +314,13 @@ spec: end ``` - The content of the LuaScript needs to be a whole function including both declaration and implementation. - The parameters will be supplied by the system: - observedObj: the object represents the configuration that is observed from a specific member cluster. - The returned status could be the whole status or part of it and will be set into both Work and ResourceBinding(ClusterResourceBinding). type: string diff --git a/charts/karmada/_crds/bases/config/config.karmada.io_resourceinterpreterwebhookconfigurations.yaml b/charts/karmada/_crds/bases/config/config.karmada.io_resourceinterpreterwebhookconfigurations.yaml index 36f486fa2cb3..1b9bd7aceced 100644 --- a/charts/karmada/_crds/bases/config/config.karmada.io_resourceinterpreterwebhookconfigurations.yaml +++ b/charts/karmada/_crds/bases/config/config.karmada.io_resourceinterpreterwebhookconfigurations.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.5 name: resourceinterpreterwebhookconfigurations.config.karmada.io spec: group: config.karmada.io @@ -61,7 +61,6 @@ spec: `service` is a reference to the service for this webhook. Either `service` or `url` must be specified. - If the webhook is running within the cluster, then you should use `service`. properties: name: @@ -96,29 +95,24 @@ spec: (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified. - The `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address. - Please note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster. - The scheme must be "https"; the URL must begin with "https://". - A path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier. - Attempting to use a user or basic auth e.g. "user:password@" is not allowed. Fragments ("#...") and query parameters ("?...") are not allowed, either. @@ -156,7 +150,6 @@ spec: ["apps", "batch", "example.io"] means matches 3 groups. ["*"] means matches all group - Note: The group could be empty, e.g the 'core' group of kubernetes, in that case use [""]. items: type: string diff --git a/charts/karmada/_crds/bases/networking/networking.karmada.io_multiclusteringresses.yaml b/charts/karmada/_crds/bases/networking/networking.karmada.io_multiclusteringresses.yaml index 13d5c93e403b..e2ccb3f56c7e 100644 --- a/charts/karmada/_crds/bases/networking/networking.karmada.io_multiclusteringresses.yaml +++ b/charts/karmada/_crds/bases/networking/networking.karmada.io_multiclusteringresses.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.5 name: multiclusteringresses.networking.karmada.io spec: group: networking.karmada.io @@ -142,19 +142,19 @@ spec: and\n\t :443 for https.\nBoth these may change in the future.\nIncoming requests are matched against the host before the\nIngressRuleValue. If the host is unspecified, the Ingress routes all\ntraffic - based on the specified IngressRuleValue.\n\n\nhost can be - \"precise\" which is a domain name without the terminating - dot of\na network host (e.g. \"foo.bar.com\") or \"wildcard\", - which is a domain name\nprefixed with a single wildcard label - (e.g. \"*.foo.com\").\nThe wildcard character '*' must appear - by itself as the first DNS label and\nmatches only a single - label. You cannot have a wildcard label by itself (e.g. Host - == \"*\").\nRequests will be matched against the Host field - in the following way:\n1. If host is precise, the request - matches this rule if the http host header is equal to Host.\n2. - If host is a wildcard, then the request matches this rule - if the http host header\nis to equal to the suffix (removing - the first label) of the wildcard rule." + based on the specified IngressRuleValue.\n\nhost can be \"precise\" + which is a domain name without the terminating dot of\na network + host (e.g. \"foo.bar.com\") or \"wildcard\", which is a domain + name\nprefixed with a single wildcard label (e.g. \"*.foo.com\").\nThe + wildcard character '*' must appear by itself as the first + DNS label and\nmatches only a single label. You cannot have + a wildcard label by itself (e.g. Host == \"*\").\nRequests + will be matched against the Host field in the following way:\n1. + If host is precise, the request matches this rule if the http + host header is equal to Host.\n2. If host is a wildcard, then + the request matches this rule if the http host header\nis + to equal to the suffix (removing the first label) of the wildcard + rule." type: string http: description: |- @@ -339,8 +339,6 @@ spec: CamelCase names - cloud provider specific error values must have names that comply with the format foo.example.com/CamelCase. - --- - The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -350,12 +348,12 @@ spec: format: int32 type: integer protocol: - default: TCP description: |- protocol is the protocol of the ingress port. The supported values are: "TCP", "UDP", "SCTP" type: string required: + - error - port - protocol type: object diff --git a/charts/karmada/_crds/bases/networking/networking.karmada.io_multiclusterservices.yaml b/charts/karmada/_crds/bases/networking/networking.karmada.io_multiclusterservices.yaml index c6201074bdc3..c723254885ea 100644 --- a/charts/karmada/_crds/bases/networking/networking.karmada.io_multiclusterservices.yaml +++ b/charts/karmada/_crds/bases/networking/networking.karmada.io_multiclusterservices.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.5 name: multiclusterservices.networking.karmada.io spec: group: networking.karmada.io @@ -59,6 +59,8 @@ spec: name: description: Name is the name of the cluster to be selected. type: string + required: + - name type: object type: array ports: @@ -94,6 +96,8 @@ spec: name: description: Name is the name of the cluster to be selected. type: string + required: + - name type: object type: array range: @@ -146,16 +150,8 @@ spec: conditions: description: Current service state items: - description: "Condition contains details for one aspect of the current - state of this API Resource.\n---\nThis struct is intended for - direct use as an array at the field path .status.conditions. For - example,\n\n\n\ttype FooStatus struct{\n\t // Represents the - observations of a foo's current state.\n\t // Known .status.conditions.type - are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // - +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t - \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t - \ // other fields\n\t}" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: description: |- @@ -196,12 +192,7 @@ spec: - Unknown type: string type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - --- - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be - useful (see .node.status.conditions), the ability to deconflict is important. - The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + description: type of condition in CamelCase or in foo.example.com/CamelCase. maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -263,8 +254,6 @@ spec: CamelCase names - cloud provider specific error values must have names that comply with the format foo.example.com/CamelCase. - --- - The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -274,12 +263,12 @@ spec: format: int32 type: integer protocol: - default: TCP description: |- Protocol is the protocol of the service port of which status is recorded here The supported values are: "TCP", "UDP", "SCTP" type: string required: + - error - port - protocol type: object diff --git a/charts/karmada/_crds/bases/policy/policy.karmada.io_clusteroverridepolicies.yaml b/charts/karmada/_crds/bases/policy/policy.karmada.io_clusteroverridepolicies.yaml index 360c08017a43..116260963c9b 100644 --- a/charts/karmada/_crds/bases/policy/policy.karmada.io_clusteroverridepolicies.yaml +++ b/charts/karmada/_crds/bases/policy/policy.karmada.io_clusteroverridepolicies.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.5 name: clusteroverridepolicies.policy.karmada.io spec: group: policy.karmada.io @@ -80,6 +80,7 @@ spec: type: object required: - operator + - value type: object type: array argsOverrider: @@ -144,6 +145,92 @@ spec: - operator type: object type: array + fieldOverrider: + description: |- + FieldOverrider represents the rules dedicated to modifying a specific field in any Kubernetes resource. + This allows changing a single field within the resource with multiple operations. + It is designed to handle structured field values such as those found in ConfigMaps or Secrets. + The current implementation supports JSON and YAML formats, but can easily be extended to support XML in the future. + items: + description: |- + FieldOverrider represents the rules dedicated to modifying a specific field in any Kubernetes resource. + This allows changing a single field within the resource with multiple operations. + It is designed to handle structured field values such as those found in ConfigMaps or Secrets. + The current implementation supports JSON and YAML formats, but can easily be extended to support XML in the future. + Note: In any given instance, FieldOverrider processes either JSON or YAML fields, but not both simultaneously. + properties: + fieldPath: + description: |- + FieldPath specifies the initial location in the instance document where the operation should take place. + The path uses RFC 6901 for navigating into nested structures. For example, the path "/data/db-config.yaml" + specifies the configuration data key named "db-config.yaml" in a ConfigMap: "/data/db-config.yaml". + type: string + json: + description: JSON represents the operations performed + on the JSON document specified by the FieldPath. + items: + description: JSONPatchOperation represents a single + field modification operation for JSON format. + properties: + operator: + description: |- + Operator indicates the operation on target field. + Available operators are: "add", "remove", and "replace". + enum: + - add + - remove + - replace + type: string + subPath: + description: |- + SubPath specifies the relative location within the initial FieldPath where the operation should take place. + The path uses RFC 6901 for navigating into nested structures. + type: string + value: + description: |- + Value is the new value to set for the specified field if the operation is "add" or "replace". + For "remove" operation, this field is ignored. + x-kubernetes-preserve-unknown-fields: true + required: + - operator + - subPath + type: object + type: array + yaml: + description: YAML represents the operations performed + on the YAML document specified by the FieldPath. + items: + description: YAMLPatchOperation represents a single + field modification operation for YAML format. + properties: + operator: + description: |- + Operator indicates the operation on target field. + Available operators are: "add", "remove", and "replace". + enum: + - add + - remove + - replace + type: string + subPath: + description: |- + SubPath specifies the relative location within the initial FieldPath where the operation should take place. + The path uses RFC 6901 for navigating into nested structures. + type: string + value: + description: |- + Value is the new value to set for the specified field if the operation is "add" or "replace". + For "remove" operation, this field is ignored. + x-kubernetes-preserve-unknown-fields: true + required: + - operator + - subPath + type: object + type: array + required: + - fieldPath + type: object + type: array imageOverrider: description: ImageOverrider represents the rules dedicated to handling image overrides. @@ -182,7 +269,6 @@ spec: description: |- Predicate filters images before applying the rule. - Defaults to nil, in that case, the system will automatically detect image fields if the resource type is Pod, ReplicaSet, Deployment, StatefulSet, DaemonSet or Job by following rule: - Pod: /spec/containers//image @@ -193,7 +279,6 @@ spec: - Job: /spec/template/spec/containers//image In addition, all images will be processed if the resource object has more than one container. - If not nil, only images matches the filters will be processed. properties: path: @@ -240,6 +325,7 @@ spec: type: object required: - operator + - value type: object type: array plaintext: @@ -388,7 +474,6 @@ spec: description: |- Overriders represents the override rules that would apply on resources - Deprecated: This filed is deprecated in v1.0 and please use the OverrideRules instead. properties: annotationsOverrider: @@ -417,6 +502,7 @@ spec: type: object required: - operator + - value type: object type: array argsOverrider: @@ -481,6 +567,92 @@ spec: - operator type: object type: array + fieldOverrider: + description: |- + FieldOverrider represents the rules dedicated to modifying a specific field in any Kubernetes resource. + This allows changing a single field within the resource with multiple operations. + It is designed to handle structured field values such as those found in ConfigMaps or Secrets. + The current implementation supports JSON and YAML formats, but can easily be extended to support XML in the future. + items: + description: |- + FieldOverrider represents the rules dedicated to modifying a specific field in any Kubernetes resource. + This allows changing a single field within the resource with multiple operations. + It is designed to handle structured field values such as those found in ConfigMaps or Secrets. + The current implementation supports JSON and YAML formats, but can easily be extended to support XML in the future. + Note: In any given instance, FieldOverrider processes either JSON or YAML fields, but not both simultaneously. + properties: + fieldPath: + description: |- + FieldPath specifies the initial location in the instance document where the operation should take place. + The path uses RFC 6901 for navigating into nested structures. For example, the path "/data/db-config.yaml" + specifies the configuration data key named "db-config.yaml" in a ConfigMap: "/data/db-config.yaml". + type: string + json: + description: JSON represents the operations performed on + the JSON document specified by the FieldPath. + items: + description: JSONPatchOperation represents a single field + modification operation for JSON format. + properties: + operator: + description: |- + Operator indicates the operation on target field. + Available operators are: "add", "remove", and "replace". + enum: + - add + - remove + - replace + type: string + subPath: + description: |- + SubPath specifies the relative location within the initial FieldPath where the operation should take place. + The path uses RFC 6901 for navigating into nested structures. + type: string + value: + description: |- + Value is the new value to set for the specified field if the operation is "add" or "replace". + For "remove" operation, this field is ignored. + x-kubernetes-preserve-unknown-fields: true + required: + - operator + - subPath + type: object + type: array + yaml: + description: YAML represents the operations performed on + the YAML document specified by the FieldPath. + items: + description: YAMLPatchOperation represents a single field + modification operation for YAML format. + properties: + operator: + description: |- + Operator indicates the operation on target field. + Available operators are: "add", "remove", and "replace". + enum: + - add + - remove + - replace + type: string + subPath: + description: |- + SubPath specifies the relative location within the initial FieldPath where the operation should take place. + The path uses RFC 6901 for navigating into nested structures. + type: string + value: + description: |- + Value is the new value to set for the specified field if the operation is "add" or "replace". + For "remove" operation, this field is ignored. + x-kubernetes-preserve-unknown-fields: true + required: + - operator + - subPath + type: object + type: array + required: + - fieldPath + type: object + type: array imageOverrider: description: ImageOverrider represents the rules dedicated to handling image overrides. @@ -519,7 +691,6 @@ spec: description: |- Predicate filters images before applying the rule. - Defaults to nil, in that case, the system will automatically detect image fields if the resource type is Pod, ReplicaSet, Deployment, StatefulSet, DaemonSet or Job by following rule: - Pod: /spec/containers//image @@ -530,7 +701,6 @@ spec: - Job: /spec/template/spec/containers//image In addition, all images will be processed if the resource object has more than one container. - If not nil, only images matches the filters will be processed. properties: path: @@ -576,6 +746,7 @@ spec: type: object required: - operator + - value type: object type: array plaintext: @@ -692,7 +863,6 @@ spec: that only applies to resources propagated to the matching clusters. nil means matching all clusters. - Deprecated: This filed is deprecated in v1.0 and please use the OverrideRules instead. properties: clusterNames: diff --git a/charts/karmada/_crds/bases/policy/policy.karmada.io_clusterpropagationpolicies.yaml b/charts/karmada/_crds/bases/policy/policy.karmada.io_clusterpropagationpolicies.yaml index c1ac7c57262e..e9d2e0600cef 100644 --- a/charts/karmada/_crds/bases/policy/policy.karmada.io_clusterpropagationpolicies.yaml +++ b/charts/karmada/_crds/bases/policy/policy.karmada.io_clusterpropagationpolicies.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.5 name: clusterpropagationpolicies.policy.karmada.io spec: group: policy.karmada.io @@ -62,12 +62,10 @@ spec: ActivationPreference indicates how the referencing resource template will be propagated, in case of policy changes. - If empty, the resource template will respond to policy changes immediately, in other words, any policy changes will drive the resource template to be propagated immediately as per the current propagation rules. - If the value is 'Lazy' means the policy changes will not take effect for now but defer to the resource template changes, in other words, the resource template will not be propagated as per the current propagation rules until @@ -93,7 +91,6 @@ spec: ConflictResolution declares how potential conflict should be handled when a resource that is being propagated already exists in the target cluster. - It defaults to "Abort" which means stop propagating to avoid unexpected overwrites. The "Overwrite" might be useful when migrating legacy cluster resources to Karmada, in which case conflict is predictable and can be @@ -107,12 +104,10 @@ spec: DependentOverrides represents the list of overrides(OverridePolicy) which must present before the current PropagationPolicy takes effect. - It used to explicitly specify overrides which current PropagationPolicy rely on. A typical scenario is the users create OverridePolicy(ies) and resources at the same time, they want to ensure the new-created policies would be adopted. - Note: For the overrides, OverridePolicy(ies) in current namespace and ClusterOverridePolicy(ies), which not present in this list will still be applied if they matches the resources. items: @@ -182,24 +177,20 @@ spec: ClusterAffinities represents scheduling restrictions to multiple cluster groups that indicated by ClusterAffinityTerm. - The scheduler will evaluate these groups one by one in the order they appear in the spec, the group that does not satisfy scheduling restrictions will be ignored which means all clusters in this group will not be selected unless it also belongs to the next group(a cluster could belong to multiple groups). - If none of the groups satisfy the scheduling restrictions, then scheduling fails, which means no cluster will be selected. - Note: 1. ClusterAffinities can not co-exist with ClusterAffinity. 2. If both ClusterAffinity and ClusterAffinities are not set, any cluster can be scheduling candidates. - Potential use case 1: The private clusters in the local data center could be the main group, and the managed clusters provided by cluster providers could be the secondary @@ -207,7 +198,6 @@ spec: to the main group and the second group will only be considered in case of the main group does not satisfy restrictions(like, lack of resources). - Potential use case 2: For the disaster recovery scenario, the clusters could be organized to primary and backup groups, the workloads would be scheduled to primary @@ -687,6 +677,24 @@ spec: - Always - Never type: string + preserveResourcesOnDeletion: + description: |- + PreserveResourcesOnDeletion controls whether resources should be preserved on the + member clusters when the resource template is deleted. + If set to true, resources will be preserved on the member clusters. + Default is false, which means resources will be deleted along with the resource template. + + This setting is particularly useful during workload migration scenarios to ensure + that rollback can occur quickly without affecting the workloads running on the + member clusters. + + Additionally, this setting applies uniformly across all member clusters and will not + selectively control preservation on only some clusters. + + Note: This setting does not apply to the deletion of the policy itself. + When the policy is deleted, the resource templates and their corresponding + propagated resources in member clusters will remain unchanged unless explicitly deleted. + type: boolean priority: default: 0 description: |- @@ -698,7 +706,6 @@ spec: not be preempted by following policies even with a higher priority. See Preemption for more details. - In case of two policies have the same priority, the one with a more precise matching rules in ResourceSelectors wins: - matching by name(resourceSelector.name) has higher priority than @@ -708,7 +715,6 @@ spec: If there is still no winner at this point, the one with the lower alphabetic order wins, e.g. policy 'bar' has higher priority than 'foo'. - The higher the value, the higher the priority. Defaults to zero. format: int32 type: integer @@ -720,7 +726,6 @@ spec: propagated along with the Deployment. In addition to the propagating process, the referencing resources will be migrated along with the Deployment in the fail-over scenario. - Defaults to false. type: boolean resourceSelectors: diff --git a/charts/karmada/_crds/bases/policy/policy.karmada.io_federatedresourcequotas.yaml b/charts/karmada/_crds/bases/policy/policy.karmada.io_federatedresourcequotas.yaml index 5c07ad115c65..a31d435596cc 100644 --- a/charts/karmada/_crds/bases/policy/policy.karmada.io_federatedresourcequotas.yaml +++ b/charts/karmada/_crds/bases/policy/policy.karmada.io_federatedresourcequotas.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.5 name: federatedresourcequotas.policy.karmada.io spec: group: policy.karmada.io diff --git a/charts/karmada/_crds/bases/policy/policy.karmada.io_overridepolicies.yaml b/charts/karmada/_crds/bases/policy/policy.karmada.io_overridepolicies.yaml index eb12e693d040..70dc176a6e11 100644 --- a/charts/karmada/_crds/bases/policy/policy.karmada.io_overridepolicies.yaml +++ b/charts/karmada/_crds/bases/policy/policy.karmada.io_overridepolicies.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.5 name: overridepolicies.policy.karmada.io spec: group: policy.karmada.io @@ -80,6 +80,7 @@ spec: type: object required: - operator + - value type: object type: array argsOverrider: @@ -144,6 +145,92 @@ spec: - operator type: object type: array + fieldOverrider: + description: |- + FieldOverrider represents the rules dedicated to modifying a specific field in any Kubernetes resource. + This allows changing a single field within the resource with multiple operations. + It is designed to handle structured field values such as those found in ConfigMaps or Secrets. + The current implementation supports JSON and YAML formats, but can easily be extended to support XML in the future. + items: + description: |- + FieldOverrider represents the rules dedicated to modifying a specific field in any Kubernetes resource. + This allows changing a single field within the resource with multiple operations. + It is designed to handle structured field values such as those found in ConfigMaps or Secrets. + The current implementation supports JSON and YAML formats, but can easily be extended to support XML in the future. + Note: In any given instance, FieldOverrider processes either JSON or YAML fields, but not both simultaneously. + properties: + fieldPath: + description: |- + FieldPath specifies the initial location in the instance document where the operation should take place. + The path uses RFC 6901 for navigating into nested structures. For example, the path "/data/db-config.yaml" + specifies the configuration data key named "db-config.yaml" in a ConfigMap: "/data/db-config.yaml". + type: string + json: + description: JSON represents the operations performed + on the JSON document specified by the FieldPath. + items: + description: JSONPatchOperation represents a single + field modification operation for JSON format. + properties: + operator: + description: |- + Operator indicates the operation on target field. + Available operators are: "add", "remove", and "replace". + enum: + - add + - remove + - replace + type: string + subPath: + description: |- + SubPath specifies the relative location within the initial FieldPath where the operation should take place. + The path uses RFC 6901 for navigating into nested structures. + type: string + value: + description: |- + Value is the new value to set for the specified field if the operation is "add" or "replace". + For "remove" operation, this field is ignored. + x-kubernetes-preserve-unknown-fields: true + required: + - operator + - subPath + type: object + type: array + yaml: + description: YAML represents the operations performed + on the YAML document specified by the FieldPath. + items: + description: YAMLPatchOperation represents a single + field modification operation for YAML format. + properties: + operator: + description: |- + Operator indicates the operation on target field. + Available operators are: "add", "remove", and "replace". + enum: + - add + - remove + - replace + type: string + subPath: + description: |- + SubPath specifies the relative location within the initial FieldPath where the operation should take place. + The path uses RFC 6901 for navigating into nested structures. + type: string + value: + description: |- + Value is the new value to set for the specified field if the operation is "add" or "replace". + For "remove" operation, this field is ignored. + x-kubernetes-preserve-unknown-fields: true + required: + - operator + - subPath + type: object + type: array + required: + - fieldPath + type: object + type: array imageOverrider: description: ImageOverrider represents the rules dedicated to handling image overrides. @@ -182,7 +269,6 @@ spec: description: |- Predicate filters images before applying the rule. - Defaults to nil, in that case, the system will automatically detect image fields if the resource type is Pod, ReplicaSet, Deployment, StatefulSet, DaemonSet or Job by following rule: - Pod: /spec/containers//image @@ -193,7 +279,6 @@ spec: - Job: /spec/template/spec/containers//image In addition, all images will be processed if the resource object has more than one container. - If not nil, only images matches the filters will be processed. properties: path: @@ -240,6 +325,7 @@ spec: type: object required: - operator + - value type: object type: array plaintext: @@ -388,7 +474,6 @@ spec: description: |- Overriders represents the override rules that would apply on resources - Deprecated: This filed is deprecated in v1.0 and please use the OverrideRules instead. properties: annotationsOverrider: @@ -417,6 +502,7 @@ spec: type: object required: - operator + - value type: object type: array argsOverrider: @@ -481,6 +567,92 @@ spec: - operator type: object type: array + fieldOverrider: + description: |- + FieldOverrider represents the rules dedicated to modifying a specific field in any Kubernetes resource. + This allows changing a single field within the resource with multiple operations. + It is designed to handle structured field values such as those found in ConfigMaps or Secrets. + The current implementation supports JSON and YAML formats, but can easily be extended to support XML in the future. + items: + description: |- + FieldOverrider represents the rules dedicated to modifying a specific field in any Kubernetes resource. + This allows changing a single field within the resource with multiple operations. + It is designed to handle structured field values such as those found in ConfigMaps or Secrets. + The current implementation supports JSON and YAML formats, but can easily be extended to support XML in the future. + Note: In any given instance, FieldOverrider processes either JSON or YAML fields, but not both simultaneously. + properties: + fieldPath: + description: |- + FieldPath specifies the initial location in the instance document where the operation should take place. + The path uses RFC 6901 for navigating into nested structures. For example, the path "/data/db-config.yaml" + specifies the configuration data key named "db-config.yaml" in a ConfigMap: "/data/db-config.yaml". + type: string + json: + description: JSON represents the operations performed on + the JSON document specified by the FieldPath. + items: + description: JSONPatchOperation represents a single field + modification operation for JSON format. + properties: + operator: + description: |- + Operator indicates the operation on target field. + Available operators are: "add", "remove", and "replace". + enum: + - add + - remove + - replace + type: string + subPath: + description: |- + SubPath specifies the relative location within the initial FieldPath where the operation should take place. + The path uses RFC 6901 for navigating into nested structures. + type: string + value: + description: |- + Value is the new value to set for the specified field if the operation is "add" or "replace". + For "remove" operation, this field is ignored. + x-kubernetes-preserve-unknown-fields: true + required: + - operator + - subPath + type: object + type: array + yaml: + description: YAML represents the operations performed on + the YAML document specified by the FieldPath. + items: + description: YAMLPatchOperation represents a single field + modification operation for YAML format. + properties: + operator: + description: |- + Operator indicates the operation on target field. + Available operators are: "add", "remove", and "replace". + enum: + - add + - remove + - replace + type: string + subPath: + description: |- + SubPath specifies the relative location within the initial FieldPath where the operation should take place. + The path uses RFC 6901 for navigating into nested structures. + type: string + value: + description: |- + Value is the new value to set for the specified field if the operation is "add" or "replace". + For "remove" operation, this field is ignored. + x-kubernetes-preserve-unknown-fields: true + required: + - operator + - subPath + type: object + type: array + required: + - fieldPath + type: object + type: array imageOverrider: description: ImageOverrider represents the rules dedicated to handling image overrides. @@ -519,7 +691,6 @@ spec: description: |- Predicate filters images before applying the rule. - Defaults to nil, in that case, the system will automatically detect image fields if the resource type is Pod, ReplicaSet, Deployment, StatefulSet, DaemonSet or Job by following rule: - Pod: /spec/containers//image @@ -530,7 +701,6 @@ spec: - Job: /spec/template/spec/containers//image In addition, all images will be processed if the resource object has more than one container. - If not nil, only images matches the filters will be processed. properties: path: @@ -576,6 +746,7 @@ spec: type: object required: - operator + - value type: object type: array plaintext: @@ -692,7 +863,6 @@ spec: that only applies to resources propagated to the matching clusters. nil means matching all clusters. - Deprecated: This filed is deprecated in v1.0 and please use the OverrideRules instead. properties: clusterNames: diff --git a/charts/karmada/_crds/bases/policy/policy.karmada.io_propagationpolicies.yaml b/charts/karmada/_crds/bases/policy/policy.karmada.io_propagationpolicies.yaml index 1415e24325dd..70cdf3b93e12 100644 --- a/charts/karmada/_crds/bases/policy/policy.karmada.io_propagationpolicies.yaml +++ b/charts/karmada/_crds/bases/policy/policy.karmada.io_propagationpolicies.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.5 name: propagationpolicies.policy.karmada.io spec: group: policy.karmada.io @@ -59,12 +59,10 @@ spec: ActivationPreference indicates how the referencing resource template will be propagated, in case of policy changes. - If empty, the resource template will respond to policy changes immediately, in other words, any policy changes will drive the resource template to be propagated immediately as per the current propagation rules. - If the value is 'Lazy' means the policy changes will not take effect for now but defer to the resource template changes, in other words, the resource template will not be propagated as per the current propagation rules until @@ -90,7 +88,6 @@ spec: ConflictResolution declares how potential conflict should be handled when a resource that is being propagated already exists in the target cluster. - It defaults to "Abort" which means stop propagating to avoid unexpected overwrites. The "Overwrite" might be useful when migrating legacy cluster resources to Karmada, in which case conflict is predictable and can be @@ -104,12 +101,10 @@ spec: DependentOverrides represents the list of overrides(OverridePolicy) which must present before the current PropagationPolicy takes effect. - It used to explicitly specify overrides which current PropagationPolicy rely on. A typical scenario is the users create OverridePolicy(ies) and resources at the same time, they want to ensure the new-created policies would be adopted. - Note: For the overrides, OverridePolicy(ies) in current namespace and ClusterOverridePolicy(ies), which not present in this list will still be applied if they matches the resources. items: @@ -179,24 +174,20 @@ spec: ClusterAffinities represents scheduling restrictions to multiple cluster groups that indicated by ClusterAffinityTerm. - The scheduler will evaluate these groups one by one in the order they appear in the spec, the group that does not satisfy scheduling restrictions will be ignored which means all clusters in this group will not be selected unless it also belongs to the next group(a cluster could belong to multiple groups). - If none of the groups satisfy the scheduling restrictions, then scheduling fails, which means no cluster will be selected. - Note: 1. ClusterAffinities can not co-exist with ClusterAffinity. 2. If both ClusterAffinity and ClusterAffinities are not set, any cluster can be scheduling candidates. - Potential use case 1: The private clusters in the local data center could be the main group, and the managed clusters provided by cluster providers could be the secondary @@ -204,7 +195,6 @@ spec: to the main group and the second group will only be considered in case of the main group does not satisfy restrictions(like, lack of resources). - Potential use case 2: For the disaster recovery scenario, the clusters could be organized to primary and backup groups, the workloads would be scheduled to primary @@ -684,6 +674,24 @@ spec: - Always - Never type: string + preserveResourcesOnDeletion: + description: |- + PreserveResourcesOnDeletion controls whether resources should be preserved on the + member clusters when the resource template is deleted. + If set to true, resources will be preserved on the member clusters. + Default is false, which means resources will be deleted along with the resource template. + + This setting is particularly useful during workload migration scenarios to ensure + that rollback can occur quickly without affecting the workloads running on the + member clusters. + + Additionally, this setting applies uniformly across all member clusters and will not + selectively control preservation on only some clusters. + + Note: This setting does not apply to the deletion of the policy itself. + When the policy is deleted, the resource templates and their corresponding + propagated resources in member clusters will remain unchanged unless explicitly deleted. + type: boolean priority: default: 0 description: |- @@ -695,7 +703,6 @@ spec: not be preempted by following policies even with a higher priority. See Preemption for more details. - In case of two policies have the same priority, the one with a more precise matching rules in ResourceSelectors wins: - matching by name(resourceSelector.name) has higher priority than @@ -705,7 +712,6 @@ spec: If there is still no winner at this point, the one with the lower alphabetic order wins, e.g. policy 'bar' has higher priority than 'foo'. - The higher the value, the higher the priority. Defaults to zero. format: int32 type: integer @@ -717,7 +723,6 @@ spec: propagated along with the Deployment. In addition to the propagating process, the referencing resources will be migrated along with the Deployment in the fail-over scenario. - Defaults to false. type: boolean resourceSelectors: diff --git a/charts/karmada/_crds/bases/remedy/remedy.karmada.io_remedies.yaml b/charts/karmada/_crds/bases/remedy/remedy.karmada.io_remedies.yaml index 16443b7d2ae7..94f3ba17390b 100644 --- a/charts/karmada/_crds/bases/remedy/remedy.karmada.io_remedies.yaml +++ b/charts/karmada/_crds/bases/remedy/remedy.karmada.io_remedies.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.5 name: remedies.remedy.karmada.io spec: group: remedy.karmada.io diff --git a/charts/karmada/_crds/bases/work/work.karmada.io_clusterresourcebindings.yaml b/charts/karmada/_crds/bases/work/work.karmada.io_clusterresourcebindings.yaml index 7895411a95a2..d4aab0dea6c4 100644 --- a/charts/karmada/_crds/bases/work/work.karmada.io_clusterresourcebindings.yaml +++ b/charts/karmada/_crds/bases/work/work.karmada.io_clusterresourcebindings.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.5 name: clusterresourcebindings.work.karmada.io spec: group: work.karmada.io @@ -145,16 +145,8 @@ spec: conditions: description: Conditions contain the different condition statuses. items: - description: "Condition contains details for one aspect of the current - state of this API Resource.\n---\nThis struct is intended for - direct use as an array at the field path .status.conditions. For - example,\n\n\n\ttype FooStatus struct{\n\t // Represents the - observations of a foo's current state.\n\t // Known .status.conditions.type - are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // - +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t - \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t - \ // other fields\n\t}" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: description: |- @@ -195,12 +187,7 @@ spec: - Unknown type: string type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - --- - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be - useful (see .node.status.conditions), the ability to deconflict is important. - The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + description: type of condition in CamelCase or in foo.example.com/CamelCase. maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -280,7 +267,6 @@ spec: ConflictResolution declares how potential conflict should be handled when a resource that is being propagated already exists in the target cluster. - It defaults to "Abort" which means stop propagating to avoid unexpected overwrites. The "Overwrite" might be useful when migrating legacy cluster resources to Karmada, in which case conflict is predictable and can be @@ -369,7 +355,6 @@ spec: Clients should not set this value to avoid the time inconsistency issue. It is represented in RFC3339 form(like '2021-04-25T10:02:10Z') and is in UTC. - Populated by the system. Read-only. format: date-time type: string @@ -434,24 +419,20 @@ spec: ClusterAffinities represents scheduling restrictions to multiple cluster groups that indicated by ClusterAffinityTerm. - The scheduler will evaluate these groups one by one in the order they appear in the spec, the group that does not satisfy scheduling restrictions will be ignored which means all clusters in this group will not be selected unless it also belongs to the next group(a cluster could belong to multiple groups). - If none of the groups satisfy the scheduling restrictions, then scheduling fails, which means no cluster will be selected. - Note: 1. ClusterAffinities can not co-exist with ClusterAffinity. 2. If both ClusterAffinity and ClusterAffinities are not set, any cluster can be scheduling candidates. - Potential use case 1: The private clusters in the local data center could be the main group, and the managed clusters provided by cluster providers could be the secondary @@ -459,7 +440,6 @@ spec: to the main group and the second group will only be considered in case of the main group does not satisfy restrictions(like, lack of resources). - Potential use case 2: For the disaster recovery scenario, the clusters could be organized to primary and backup groups, the workloads would be scheduled to primary @@ -930,6 +910,14 @@ spec: type: object type: array type: object + preserveResourcesOnDeletion: + description: |- + PreserveResourcesOnDeletion controls whether resources should be preserved on the + member clusters when the binding object is deleted. + If set to true, resources will be preserved on the member clusters. + Default is false, which means resources will be deleted along with the binding object. + This setting applies to all Work objects created under this binding object. + type: boolean propagateDeps: description: |- PropagateDeps tells if relevant resources should be propagated automatically. @@ -1150,7 +1138,6 @@ spec: It works with the status.lastScheduledTime field, and only when this timestamp is later than timestamp in status.lastScheduledTime will the rescheduling actually execute, otherwise, ignored. - It is represented in RFC3339 form (like '2006-01-02T15:04:05Z') and is in UTC. format: date-time type: string @@ -1264,16 +1251,8 @@ spec: conditions: description: Conditions contain the different condition statuses. items: - description: "Condition contains details for one aspect of the current - state of this API Resource.\n---\nThis struct is intended for - direct use as an array at the field path .status.conditions. For - example,\n\n\n\ttype FooStatus struct{\n\t // Represents the - observations of a foo's current state.\n\t // Known .status.conditions.type - are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // - +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t - \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t - \ // other fields\n\t}" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: description: |- @@ -1314,12 +1293,7 @@ spec: - Unknown type: string type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - --- - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be - useful (see .node.status.conditions), the ability to deconflict is important. - The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + description: type of condition in CamelCase or in foo.example.com/CamelCase. maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string diff --git a/charts/karmada/_crds/bases/work/work.karmada.io_resourcebindings.yaml b/charts/karmada/_crds/bases/work/work.karmada.io_resourcebindings.yaml index f69531f230bd..c0c20e1217bd 100644 --- a/charts/karmada/_crds/bases/work/work.karmada.io_resourcebindings.yaml +++ b/charts/karmada/_crds/bases/work/work.karmada.io_resourcebindings.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.5 name: resourcebindings.work.karmada.io spec: group: work.karmada.io @@ -145,16 +145,8 @@ spec: conditions: description: Conditions contain the different condition statuses. items: - description: "Condition contains details for one aspect of the current - state of this API Resource.\n---\nThis struct is intended for - direct use as an array at the field path .status.conditions. For - example,\n\n\n\ttype FooStatus struct{\n\t // Represents the - observations of a foo's current state.\n\t // Known .status.conditions.type - are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // - +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t - \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t - \ // other fields\n\t}" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: description: |- @@ -195,12 +187,7 @@ spec: - Unknown type: string type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - --- - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be - useful (see .node.status.conditions), the ability to deconflict is important. - The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + description: type of condition in CamelCase or in foo.example.com/CamelCase. maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -280,7 +267,6 @@ spec: ConflictResolution declares how potential conflict should be handled when a resource that is being propagated already exists in the target cluster. - It defaults to "Abort" which means stop propagating to avoid unexpected overwrites. The "Overwrite" might be useful when migrating legacy cluster resources to Karmada, in which case conflict is predictable and can be @@ -369,7 +355,6 @@ spec: Clients should not set this value to avoid the time inconsistency issue. It is represented in RFC3339 form(like '2021-04-25T10:02:10Z') and is in UTC. - Populated by the system. Read-only. format: date-time type: string @@ -434,24 +419,20 @@ spec: ClusterAffinities represents scheduling restrictions to multiple cluster groups that indicated by ClusterAffinityTerm. - The scheduler will evaluate these groups one by one in the order they appear in the spec, the group that does not satisfy scheduling restrictions will be ignored which means all clusters in this group will not be selected unless it also belongs to the next group(a cluster could belong to multiple groups). - If none of the groups satisfy the scheduling restrictions, then scheduling fails, which means no cluster will be selected. - Note: 1. ClusterAffinities can not co-exist with ClusterAffinity. 2. If both ClusterAffinity and ClusterAffinities are not set, any cluster can be scheduling candidates. - Potential use case 1: The private clusters in the local data center could be the main group, and the managed clusters provided by cluster providers could be the secondary @@ -459,7 +440,6 @@ spec: to the main group and the second group will only be considered in case of the main group does not satisfy restrictions(like, lack of resources). - Potential use case 2: For the disaster recovery scenario, the clusters could be organized to primary and backup groups, the workloads would be scheduled to primary @@ -930,6 +910,14 @@ spec: type: object type: array type: object + preserveResourcesOnDeletion: + description: |- + PreserveResourcesOnDeletion controls whether resources should be preserved on the + member clusters when the binding object is deleted. + If set to true, resources will be preserved on the member clusters. + Default is false, which means resources will be deleted along with the binding object. + This setting applies to all Work objects created under this binding object. + type: boolean propagateDeps: description: |- PropagateDeps tells if relevant resources should be propagated automatically. @@ -1150,7 +1138,6 @@ spec: It works with the status.lastScheduledTime field, and only when this timestamp is later than timestamp in status.lastScheduledTime will the rescheduling actually execute, otherwise, ignored. - It is represented in RFC3339 form (like '2006-01-02T15:04:05Z') and is in UTC. format: date-time type: string @@ -1264,16 +1251,8 @@ spec: conditions: description: Conditions contain the different condition statuses. items: - description: "Condition contains details for one aspect of the current - state of this API Resource.\n---\nThis struct is intended for - direct use as an array at the field path .status.conditions. For - example,\n\n\n\ttype FooStatus struct{\n\t // Represents the - observations of a foo's current state.\n\t // Known .status.conditions.type - are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // - +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t - \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t - \ // other fields\n\t}" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: description: |- @@ -1314,12 +1293,7 @@ spec: - Unknown type: string type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - --- - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be - useful (see .node.status.conditions), the ability to deconflict is important. - The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + description: type of condition in CamelCase or in foo.example.com/CamelCase. maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string diff --git a/charts/karmada/_crds/bases/work/work.karmada.io_works.yaml b/charts/karmada/_crds/bases/work/work.karmada.io_works.yaml index 6f1fa665e438..2dd6d43a32b5 100644 --- a/charts/karmada/_crds/bases/work/work.karmada.io_works.yaml +++ b/charts/karmada/_crds/bases/work/work.karmada.io_works.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.5 name: works.work.karmada.io spec: group: work.karmada.io @@ -54,11 +54,19 @@ spec: spec: description: Spec represents the desired behavior of Work. properties: + preserveResourcesOnDeletion: + description: |- + PreserveResourcesOnDeletion controls whether resources should be preserved on the + member cluster when the Work object is deleted. + If set to true, resources will be preserved on the member cluster. + Default is false, which means resources will be deleted along with the Work object. + type: boolean suspendDispatching: description: |- SuspendDispatching controls whether dispatching should be suspended, nil means not suspend. - Note: true means stop propagating to all clusters. + Note: true means stop propagating to the corresponding member cluster, and + does not prevent status collection. type: boolean workload: description: Workload represents the manifest workload to be deployed @@ -88,16 +96,8 @@ spec: 4. Degraded represents the current state of workload does not match the desired state for a certain period. items: - description: "Condition contains details for one aspect of the current - state of this API Resource.\n---\nThis struct is intended for - direct use as an array at the field path .status.conditions. For - example,\n\n\n\ttype FooStatus struct{\n\t // Represents the - observations of a foo's current state.\n\t // Known .status.conditions.type - are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // - +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t - \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t - \ // other fields\n\t}" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: description: |- @@ -138,12 +138,7 @@ spec: - Unknown type: string type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - --- - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be - useful (see .node.status.conditions), the ability to deconflict is important. - The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + description: type of condition in CamelCase or in foo.example.com/CamelCase. maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string diff --git a/charts/karmada/templates/_helpers.tpl b/charts/karmada/templates/_helpers.tpl index 2d129b7cea09..546b2a149931 100644 --- a/charts/karmada/templates/_helpers.tpl +++ b/charts/karmada/templates/_helpers.tpl @@ -586,40 +586,31 @@ Return the proper Docker Image Registry Secret Names {{- end }} {{- end -}} -{{- define "karmada.init-sa-secret.volume" -}} -{{- $name := include "karmada.name" . -}} -- name: init-sa-secret - secret: - secretName: {{ $name }}-hook-job -{{- end -}} - -{{- define "karmada.init-sa-secret.volumeMount" -}} -- name: init-sa-secret - mountPath: /opt/mount -{{- end -}} - -{{- define "karmada.initContainer.build-kubeconfig" -}} -TOKEN=$(cat /opt/mount/token) -kubectl config set-cluster karmada-host --server=https://${KUBERNETES_SERVICE_HOST}:${KUBERNETES_SERVICE_PORT} --certificate-authority=/opt/mount/ca.crt -kubectl config set-credentials default --token=$TOKEN -kubectl config set-context karmada-host-context --cluster=karmada-host --user=default --namespace=default -kubectl config use-context karmada-host-context -{{- end -}} - {{- define "karmada.initContainer.waitEtcd" -}} - name: wait - image: {{ include "karmada.kubectl.image" . }} + image: {{ include "karmada.cfssl.image" . }} imagePullPolicy: {{ .Values.kubectl.image.pullPolicy }} command: - /bin/sh - -c - | bash <<'EOF' - {{- include "karmada.initContainer.build-kubeconfig" . | nindent 6 }} - kubectl rollout status statefulset etcd -n {{ include "karmada.namespace" . }} + set -ex + while true; do + ETCD_ENDPOINT=${ETCD_CLIENT_SERVICE_HOST}":"${ETCD_CLIENT_SERVICE_PORT} + + # check etcd connectivity by executing curl. + # if etcd is ready, the response of curl would be `curl: (52) Empty reply from server`, with return code 52. + # if not, the response of curl would be like `curl: (7) Failed to connect to .....`, with other return code. + if curl --connect-timeout 2 ${ETCD_ENDPOINT} || [ $? -eq 52 ]; then + break + fi + + echo "failed to connect to "${ETCD_ENDPOINT} + sleep 2 + done + echo "successfully connect to "${ETCD_ENDPOINT} EOF - volumeMounts: - {{- include "karmada.init-sa-secret.volumeMount" .| nindent 4 }} {{- end -}} {{- define "karmada.initContainer.waitStaticResource" -}} @@ -631,9 +622,18 @@ kubectl config use-context karmada-host-context - -c - | bash <<'EOF' - {{- include "karmada.initContainer.build-kubeconfig" . | nindent 6 }} - kubectl wait --for=condition=complete job {{ include "karmada.name" . }}-static-resource -n {{ include "karmada.namespace" . }} + set -ex + + # here are three cases: + # case first installation: no `cm/karmada-version` at first, so when you get it, it means `karmada-static-resource-job` finished. + # case restart: already has `cm/karmada-version`, which means `karmada-static-resource-job` already finished. + # case upgrading: already has `cm/karmada-version`, but it may be old version, we should wait until `.data.karmadaVersion` equal to current `.Values.karmadaImageVersion`. + while [[ $(kubectl --kubeconfig /etc/kubeconfig get configmap karmada-version -n {{ .Values.systemNamespace }} -o jsonpath='{.data.karmadaVersion}') != {{ .Values.karmadaImageVersion }} ]]; do + echo "wait for karmada-static-resource-job finished"; sleep 2 + done + + echo "karmada-static-resource-job successfully completed since expected configmap value was found" EOF volumeMounts: - {{- include "karmada.init-sa-secret.volumeMount" .| nindent 4 }} + {{- include "karmada.kubeconfig.volumeMount" .| nindent 4 }} {{- end -}} diff --git a/charts/karmada/templates/_karmada_apiservice.tpl b/charts/karmada/templates/_karmada_apiservice.tpl index 74b35ff8dd53..1166a2ffd17d 100644 --- a/charts/karmada/templates/_karmada_apiservice.tpl +++ b/charts/karmada/templates/_karmada_apiservice.tpl @@ -32,6 +32,8 @@ metadata: spec: type: ExternalName externalName: {{ $name }}-aggregated-apiserver.{{ include "karmada.namespace" . }}.svc.{{ .Values.clusterDomain }} +{{- end }} +{{- if has "metricsAdapter" .Values.components }} --- apiVersion: apiregistration.k8s.io/v1 kind: APIService diff --git a/charts/karmada/templates/_karmada_bootstrap_token_configuration.tpl b/charts/karmada/templates/_karmada_bootstrap_token_configuration.tpl index 4ec33dd6e445..93b27322fe9d 100644 --- a/charts/karmada/templates/_karmada_bootstrap_token_configuration.tpl +++ b/charts/karmada/templates/_karmada_bootstrap_token_configuration.tpl @@ -134,6 +134,7 @@ rules: - watch - patch - update + - delete - apiGroups: - cluster.karmada.io resources: diff --git a/charts/karmada/templates/karmada-aggregated-apiserver.yaml b/charts/karmada/templates/karmada-aggregated-apiserver.yaml index cc5e45fdaa35..d353cc8b33d1 100644 --- a/charts/karmada/templates/karmada-aggregated-apiserver.yaml +++ b/charts/karmada/templates/karmada-aggregated-apiserver.yaml @@ -98,7 +98,6 @@ spec: {{- toYaml . | nindent 8 }} {{- end }} volumes: - {{- include "karmada.init-sa-secret.volume" . | nindent 8 }} {{- include "karmada.kubeconfig.volume" . | nindent 8 }} - name: apiserver-cert secret: diff --git a/charts/karmada/templates/karmada-apiserver.yaml b/charts/karmada/templates/karmada-apiserver.yaml index e4ec67d47166..69d2e285279a 100644 --- a/charts/karmada/templates/karmada-apiserver.yaml +++ b/charts/karmada/templates/karmada-apiserver.yaml @@ -55,9 +55,6 @@ spec: - --etcd-servers=https://etcd-client.{{ include "karmada.namespace" . }}.svc.{{ .Values.clusterDomain }}:2379 {{- end }} - --bind-address=0.0.0.0 - - --kubelet-client-certificate=/etc/kubernetes/pki/karmada.crt - - --kubelet-client-key=/etc/kubernetes/pki/karmada.key - - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname - --runtime-config= - --secure-port=5443 - --service-account-issuer=https://kubernetes.default.svc.{{ .Values.clusterDomain }} @@ -137,7 +134,6 @@ spec: {{- toYaml . | nindent 8 }} {{- end }} volumes: - {{- include "karmada.init-sa-secret.volume" . | nindent 8 }} - name: apiserver-cert secret: secretName: {{ $name }}-cert diff --git a/charts/karmada/templates/karmada-controller-manager.yaml b/charts/karmada/templates/karmada-controller-manager.yaml index 75d524b243dd..41585c54a59a 100644 --- a/charts/karmada/templates/karmada-controller-manager.yaml +++ b/charts/karmada/templates/karmada-controller-manager.yaml @@ -42,7 +42,6 @@ spec: {{- toYaml . | nindent 8 }} {{- end }} volumes: - {{- include "karmada.init-sa-secret.volume" . | nindent 8 }} {{- include "karmada.kubeconfig.volume" . | nindent 8 }} initContainers: {{- include "karmada.initContainer.waitStaticResource" . | nindent 8 }} @@ -53,10 +52,10 @@ spec: command: - /bin/karmada-controller-manager - --kubeconfig=/etc/kubeconfig - - --bind-address=0.0.0.0 - --cluster-status-update-frequency=10s - - --secure-port=10357 - --leader-elect-resource-namespace={{ $systemNamespace }} + - --health-probe-bind-address=0.0.0.0:10357 + - --metrics-bind-address=:8080 - --v=2 {{- if .Values.controllerManager.controllers }} - --controllers={{ .Values.controllerManager.controllers }} diff --git a/charts/karmada/templates/karmada-descheduler.yaml b/charts/karmada/templates/karmada-descheduler.yaml index 3b2ae61c8fb5..21772976f5c3 100644 --- a/charts/karmada/templates/karmada-descheduler.yaml +++ b/charts/karmada/templates/karmada-descheduler.yaml @@ -50,7 +50,8 @@ spec: command: - /bin/karmada-descheduler - --kubeconfig=/etc/kubeconfig - - --bind-address=0.0.0.0 + - --metrics-bind-address=0.0.0.0:8080 + - --health-probe-bind-address=0.0.0.0:10358 - --leader-elect-resource-namespace={{ $systemNamespace }} - --scheduler-estimator-ca-file=/etc/karmada/pki/server-ca.crt - --scheduler-estimator-cert-file=/etc/karmada/pki/karmada.crt @@ -66,7 +67,7 @@ spec: periodSeconds: 15 timeoutSeconds: 5 ports: - - containerPort: 10358 + - containerPort: 8080 name: metrics protocol: TCP volumeMounts: @@ -77,7 +78,6 @@ spec: resources: {{- toYaml .Values.descheduler.resources | nindent 12 }} volumes: - {{- include "karmada.init-sa-secret.volume" . | nindent 8 }} {{- include "karmada.descheduler.kubeconfig.volume" . | nindent 8 }} {{- include "karmada.scheduler.cert.volume" . | nindent 8 }} diff --git a/charts/karmada/templates/karmada-metrics-adapter.yaml b/charts/karmada/templates/karmada-metrics-adapter.yaml index 16bc12bb5dd7..4c13ea825d38 100644 --- a/charts/karmada/templates/karmada-metrics-adapter.yaml +++ b/charts/karmada/templates/karmada-metrics-adapter.yaml @@ -83,7 +83,6 @@ spec: {{- toYaml . | nindent 8 }} {{- end }} volumes: - {{- include "karmada.init-sa-secret.volume" . | nindent 8 }} {{- include "karmada.kubeconfig.volume" . | nindent 8 }} - name: apiserver-cert secret: diff --git a/charts/karmada/templates/karmada-scheduler-estimator.yaml b/charts/karmada/templates/karmada-scheduler-estimator.yaml index b42409c85706..7c1eabd7dd48 100644 --- a/charts/karmada/templates/karmada-scheduler-estimator.yaml +++ b/charts/karmada/templates/karmada-scheduler-estimator.yaml @@ -51,6 +51,8 @@ spec: - --grpc-auth-cert-file=/etc/karmada/pki/karmada.crt - --grpc-auth-key-file=/etc/karmada/pki/karmada.key - --grpc-client-ca-file=/etc/karmada/pki/server-ca.crt + - --metrics-bind-address=0.0.0.0:8080 + - --health-probe-bind-address=0.0.0.0:10351 {{- with (include "karmada.schedulerEstimator.featureGates" (dict "featureGatesArg" $.Values.schedulerEstimator.featureGates)) }} - {{ . }} {{- end}} @@ -64,7 +66,7 @@ spec: periodSeconds: 15 timeoutSeconds: 5 ports: - - containerPort: 10351 + - containerPort: 8080 name: metrics protocol: TCP volumeMounts: diff --git a/charts/karmada/templates/karmada-scheduler.yaml b/charts/karmada/templates/karmada-scheduler.yaml index a9321535c0ef..a690c5319600 100644 --- a/charts/karmada/templates/karmada-scheduler.yaml +++ b/charts/karmada/templates/karmada-scheduler.yaml @@ -50,8 +50,8 @@ spec: command: - /bin/karmada-scheduler - --kubeconfig=/etc/kubeconfig - - --bind-address=0.0.0.0 - - --secure-port=10351 + - --metrics-bind-address=0.0.0.0:8080 + - --health-probe-bind-address=0.0.0.0:10351 - --leader-elect-resource-namespace={{ $systemNamespace }} - --scheduler-estimator-ca-file=/etc/karmada/pki/server-ca.crt - --scheduler-estimator-cert-file=/etc/karmada/pki/karmada.crt @@ -66,8 +66,9 @@ spec: periodSeconds: 15 timeoutSeconds: 5 ports: - - containerPort: 10351 - name: http + - containerPort: 8080 + name: metrics + protocol: TCP volumeMounts: - name: karmada-certs mountPath: /etc/karmada/pki @@ -76,7 +77,6 @@ spec: resources: {{- toYaml .Values.scheduler.resources | nindent 12 }} volumes: - {{- include "karmada.init-sa-secret.volume" . | nindent 8 }} {{- include "karmada.kubeconfig.volume" . | nindent 8 }} {{- include "karmada.scheduler.cert.volume" . | nindent 8 }} diff --git a/charts/karmada/templates/karmada-search.yaml b/charts/karmada/templates/karmada-search.yaml index bb4c91fa9824..76c9f72611a4 100644 --- a/charts/karmada/templates/karmada-search.yaml +++ b/charts/karmada/templates/karmada-search.yaml @@ -92,7 +92,6 @@ spec: resources: {{- toYaml .Values.apiServer.resources | nindent 12 }} volumes: - {{- include "karmada.init-sa-secret.volume" . | nindent 8 }} {{- include "karmada.search.kubeconfig.volume" . | nindent 8 }} {{- include "karmada.search.etcd.cert.volume" . | nindent 8 }} --- diff --git a/charts/karmada/templates/karmada-static-resource-job.yaml b/charts/karmada/templates/karmada-static-resource-job.yaml index 1d2ea6179910..a1bf6cf605ab 100644 --- a/charts/karmada/templates/karmada-static-resource-job.yaml +++ b/charts/karmada/templates/karmada-static-resource-job.yaml @@ -11,6 +11,9 @@ metadata: spec: parallelism: 1 completions: 1 + {{- if semverCompare ">=1.23.0-0" .Capabilities.KubeVersion.GitVersion }} + ttlSecondsAfterFinished: {{ .Values.staticResourceJob.ttlSecondsAfterFinished }} + {{- end }} template: metadata: name: {{ $name }} @@ -42,6 +45,17 @@ spec: kubectl apply -k /crds --kubeconfig /etc/kubeconfig kubectl apply -f /static-resources/system-namespace.yaml --kubeconfig /etc/kubeconfig kubectl apply -f /static-resources/ --kubeconfig /etc/kubeconfig + + kubectl --kubeconfig /etc/kubeconfig apply -f - < 0 ]]; do echo "waiting for all pods of karmada control plane ready..."; sleep 1; done kubectl delete job {{ $name }}-static-resource -n {{ $namespace }} - kubectl delete secret {{ $name }}-hook-job -n {{ $namespace }} EOF {{- end }} +{{- end }} diff --git a/charts/karmada/templates/pre-install-job.yaml b/charts/karmada/templates/pre-install-job.yaml index cc8fe785f67a..4862252cdae6 100644 --- a/charts/karmada/templates/pre-install-job.yaml +++ b/charts/karmada/templates/pre-install-job.yaml @@ -459,21 +459,6 @@ metadata: {{- include "karmada.preInstallJob.labels" . | nindent 4 }} {{- end }} --- -apiVersion: v1 -kind: Secret -metadata: - name: {{ $name }}-hook-job - namespace: {{ $namespace }} - annotations: - "kubernetes.io/service-account.name": {{ $name }}-hook-job - "helm.sh/hook": pre-install - "helm.sh/hook-weight": "1" - {{- if "karmada.preInstallJob.labels" }} - labels: - {{- include "karmada.preInstallJob.labels" . | nindent 4 }} - {{- end }} -type: kubernetes.io/service-account-token ---- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: diff --git a/charts/karmada/values.yaml b/charts/karmada/values.yaml index 34fb8c1a7ce4..54874df67fd4 100644 --- a/charts/karmada/values.yaml +++ b/charts/karmada/values.yaml @@ -102,6 +102,9 @@ preInstallJob: staticResourceJob: tolerations: [] nodeSelector: {} + ## Set a TTL for the static-resource Job, the Job will be automatically cleaned up after this time. + ## This only works on Kubernetes version 1.23 or higher. + ttlSecondsAfterFinished: 10 ## post-install job config postInstallJob: @@ -371,7 +374,7 @@ apiServer: image: registry: registry.k8s.io repository: kube-apiserver - tag: "v1.29.6" + tag: "v1.30.4" ## Specify a imagePullPolicy, defaults to 'IfNotPresent' pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. @@ -569,7 +572,7 @@ kubeControllerManager: image: registry: registry.k8s.io repository: kube-controller-manager - tag: "v1.29.6" + tag: "v1.30.4" ## Specify a imagePullPolicy, defaults to 'IfNotPresent' pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. diff --git a/cluster/images/Dockerfile b/cluster/images/Dockerfile index d8fa12445ac9..b864d5215129 100644 --- a/cluster/images/Dockerfile +++ b/cluster/images/Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM alpine:3.20.2 +FROM alpine:3.20.3 ARG BINARY diff --git a/cluster/images/buildx.Dockerfile b/cluster/images/buildx.Dockerfile index f3fb70b054e9..937661a5be58 100644 --- a/cluster/images/buildx.Dockerfile +++ b/cluster/images/buildx.Dockerfile @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -FROM alpine:3.20.2 +FROM alpine:3.20.3 ARG BINARY ARG TARGETPLATFORM diff --git a/cmd/agent/app/agent.go b/cmd/agent/app/agent.go index ee89085e8fd8..06e3fd272550 100644 --- a/cmd/agent/app/agent.go +++ b/cmd/agent/app/agent.go @@ -78,10 +78,6 @@ func NewAgentCommand(ctx context.Context) *cobra.Command { plane and sync manifests from the Karmada control plane to the member cluster. In addition, it also syncs the status of member cluster and manifests to the Karmada control plane.`, RunE: func(_ *cobra.Command, _ []string) error { - // complete options - if err := opts.Complete(); err != nil { - return err - } // validate options if errs := opts.Validate(); len(errs) != 0 { return errs.ToAggregate() diff --git a/cmd/agent/app/options/options.go b/cmd/agent/app/options/options.go index 4ed8ca2b8444..d9bd4b643b07 100644 --- a/cmd/agent/app/options/options.go +++ b/cmd/agent/app/options/options.go @@ -18,8 +18,6 @@ package options import ( "fmt" - "net" - "strconv" "strings" "time" @@ -37,8 +35,6 @@ import ( const ( // DefaultKarmadaClusterNamespace defines the default namespace where the member cluster secrets are stored. DefaultKarmadaClusterNamespace = "karmada-cluster" - defaultBindAddress = "0.0.0.0" - defaultPort = 10357 ) var ( @@ -50,15 +46,8 @@ var ( // Options contains everything necessary to create and run controller-manager. type Options struct { // Controllers contains all controller names. - Controllers []string - LeaderElection componentbaseconfig.LeaderElectionConfiguration - // BindAddress is the IP address on which to listen for the --secure-port port. - // Deprecated: Use HealthProbeBindAddress instead. And will be removed in release 1.12+. - BindAddress string - // SecurePort is the port that the the server serves at. - // Note: We hope support https in the future once controller-runtime provides the functionality. - // Deprecated: Use HealthProbeBindAddress instead. And will be removed in release 1.12+. - SecurePort int + Controllers []string + LeaderElection componentbaseconfig.LeaderElectionConfiguration KarmadaKubeConfig string // ClusterContext is the name of the cluster context in control plane KUBECONFIG file. // Default value is the current-context. @@ -173,14 +162,6 @@ func (o *Options) AddFlags(fs *pflag.FlagSet, allControllers []string) { "A list of controllers to enable. '*' enables all on-by-default controllers, 'foo' enables the controller named 'foo', '-foo' disables the controller named 'foo'. All controllers: %s.", strings.Join(allControllers, ", "), )) - fs.StringVar(&o.BindAddress, "bind-address", defaultBindAddress, - "The IP address on which to listen for the --secure-port port.") - fs.IntVar(&o.SecurePort, "secure-port", defaultPort, - "The secure port on which to serve HTTPS.") - // nolint: errcheck - fs.MarkDeprecated("bind-address", "This flag is deprecated and will be removed in release 1.12+. Use --health-probe-bind-address instead.") - // nolint: errcheck - fs.MarkDeprecated("secure-port", "This flag is deprecated and will be removed in release 1.12+. Use --health-probe-bind-address instead.") fs.BoolVar(&o.LeaderElection.LeaderElect, "leader-elect", true, "Start a leader election client and gain leadership before executing the main loop. Enable this when running replicated components for high availability.") fs.StringVar(&o.LeaderElection.ResourceNamespace, "leader-elect-resource-namespace", util.NamespaceKarmadaSystem, "The namespace of resource object that is used for locking during leader election.") fs.DurationVar(&o.LeaderElection.LeaseDuration.Duration, "leader-elect-lease-duration", defaultElectionLeaseDuration.Duration, ""+ @@ -219,7 +200,7 @@ func (o *Options) AddFlags(fs *pflag.FlagSet, allControllers []string) { fs.IntVar(&o.ConcurrentWorkSyncs, "concurrent-work-syncs", 5, "The number of Works that are allowed to sync concurrently.") fs.StringSliceVar(&o.ReportSecrets, "report-secrets", []string{"KubeCredentials", "KubeImpersonator"}, "The secrets that are allowed to be reported to the Karmada control plane during registering. Valid values are 'KubeCredentials', 'KubeImpersonator' and 'None'. e.g 'KubeCredentials,KubeImpersonator' or 'None'.") fs.StringVar(&o.MetricsBindAddress, "metrics-bind-address", ":8080", "The TCP address that the controller should bind to for serving prometheus metrics(e.g. 127.0.0.1:8080, :8080). It can be set to \"0\" to disable the metrics serving.") - fs.StringVar(&o.HealthProbeBindAddress, "health-probe-bind-address", "", "The TCP address that the controller should bind to for serving health probes(e.g. 127.0.0.1:10357, :10357). It can be set to \"0\" to disable serving the health probe. Defaults to 0.0.0.0:10357.") + fs.StringVar(&o.HealthProbeBindAddress, "health-probe-bind-address", ":10357", "The TCP address that the controller should bind to for serving health probes(e.g. 127.0.0.1:10357, :10357). It can be set to \"0\" to disable serving the health probe. Defaults to 0.0.0.0:10357.") fs.StringVar(&o.ClusterProvider, "cluster-provider", "", "Provider of the joining cluster. The Karmada scheduler can use this information to spread workloads across providers for higher availability.") fs.StringVar(&o.ClusterRegion, "cluster-region", "", "The region of the joining cluster. The Karmada scheduler can use this information to spread workloads across regions for higher availability.") fs.StringSliceVar(&o.ClusterZones, "cluster-zones", []string{}, "The zones of the joining cluster. The Karmada scheduler can use this information to spread workloads across zones for higher availability.") @@ -233,11 +214,3 @@ func (o *Options) AddFlags(fs *pflag.FlagSet, allControllers []string) { features.FeatureGate.AddFlag(fs) o.ProfileOpts.AddFlags(fs) } - -// Complete ensures that options are valid and marshals them if necessary. -func (o *Options) Complete() error { - if len(o.HealthProbeBindAddress) == 0 { - o.HealthProbeBindAddress = net.JoinHostPort(o.BindAddress, strconv.Itoa(o.SecurePort)) - } - return nil -} diff --git a/cmd/controller-manager/app/controllermanager.go b/cmd/controller-manager/app/controllermanager.go index fb7ceab1ece8..3429981480b4 100644 --- a/cmd/controller-manager/app/controllermanager.go +++ b/cmd/controller-manager/app/controllermanager.go @@ -19,8 +19,6 @@ package app import ( "context" "flag" - "net" - "strconv" "time" "github.com/spf13/cobra" @@ -156,7 +154,7 @@ func Run(ctx context.Context, opts *options.Options) error { RenewDeadline: &opts.LeaderElection.RenewDeadline.Duration, RetryPeriod: &opts.LeaderElection.RetryPeriod.Duration, LeaderElectionResourceLock: opts.LeaderElection.ResourceLock, - HealthProbeBindAddress: net.JoinHostPort(opts.BindAddress, strconv.Itoa(opts.SecurePort)), + HealthProbeBindAddress: opts.HealthProbeBindAddress, LivenessEndpointName: "/healthz", Metrics: metricsserver.Options{BindAddress: opts.MetricsBindAddress}, MapperProvider: restmapper.MapperProvider, diff --git a/cmd/controller-manager/app/options/options.go b/cmd/controller-manager/app/options/options.go index dbe53e9185b3..933a78aab34c 100644 --- a/cmd/controller-manager/app/options/options.go +++ b/cmd/controller-manager/app/options/options.go @@ -34,11 +34,6 @@ import ( "github.com/karmada-io/karmada/pkg/util" ) -const ( - defaultBindAddress = "0.0.0.0" - defaultPort = 10357 -) - var ( defaultElectionLeaseDuration = metav1.Duration{Duration: 15 * time.Second} defaultElectionRenewDeadline = metav1.Duration{Duration: 10 * time.Second} @@ -55,11 +50,6 @@ type Options struct { Controllers []string // LeaderElection defines the configuration of leader election client. LeaderElection componentbaseconfig.LeaderElectionConfiguration - // BindAddress is the IP address on which to listen for the --secure-port port. - BindAddress string - // SecurePort is the port that the the server serves at. - // Note: We hope support https in the future once controller-runtime provides the functionality. - SecurePort int // ClusterStatusUpdateFrequency is the frequency that controller computes and report cluster status. // It must work with ClusterMonitorGracePeriod(--cluster-monitor-grace-period) in karmada-controller-manager. ClusterStatusUpdateFrequency metav1.Duration @@ -112,6 +102,11 @@ type Options struct { // It can be set to "0" to disable the metrics serving. // Defaults to ":8080". MetricsBindAddress string + // HealthProbeBindAddress is the TCP address that the controller should bind to + // for serving health probes + // It can be set to "0" to disable serving the health probe. + // Defaults to ":10357". + HealthProbeBindAddress string // ConcurrentClusterSyncs is the number of cluster objects that are // allowed to sync concurrently. ConcurrentClusterSyncs int @@ -168,10 +163,6 @@ func (o *Options) AddFlags(flags *pflag.FlagSet, allControllers, disabledByDefau "A list of controllers to enable. '*' enables all on-by-default controllers, 'foo' enables the controller named 'foo', '-foo' disables the controller named 'foo'. \nAll controllers: %s.\nDisabled-by-default controllers: %s", strings.Join(allControllers, ", "), strings.Join(disabledByDefaultControllers, ", "), )) - flags.StringVar(&o.BindAddress, "bind-address", defaultBindAddress, - "The IP address on which to listen for the --secure-port port.") - flags.IntVar(&o.SecurePort, "secure-port", defaultPort, - "The secure port on which to serve HTTPS.") flags.DurationVar(&o.ClusterStatusUpdateFrequency.Duration, "cluster-status-update-frequency", 10*time.Second, "Specifies how often karmada-controller-manager posts cluster status to karmada-apiserver.") flags.BoolVar(&o.LeaderElection.LeaderElect, "leader-elect", true, "Start a leader election client and gain leadership before executing the main loop. Enable this when running replicated components for high availability.") @@ -219,6 +210,7 @@ func (o *Options) AddFlags(flags *pflag.FlagSet, allControllers, disabledByDefau flags.DurationVar(&o.ClusterCacheSyncTimeout.Duration, "cluster-cache-sync-timeout", util.CacheSyncTimeout, "Timeout period waiting for cluster cache to sync.") flags.DurationVar(&o.ResyncPeriod.Duration, "resync-period", 0, "Base frequency the informers are resynced.") flags.StringVar(&o.MetricsBindAddress, "metrics-bind-address", ":8080", "The TCP address that the controller should bind to for serving prometheus metrics(e.g. 127.0.0.1:8080, :8080). It can be set to \"0\" to disable the metrics serving.") + flags.StringVar(&o.HealthProbeBindAddress, "health-probe-bind-address", ":10357", "The TCP address that the controller should bind to for serving health probes(e.g. 127.0.0.1:10357, :10357). It can be set to \"0\" to disable serving the health probe. Defaults to 0.0.0.0:10357.") flags.IntVar(&o.ConcurrentClusterSyncs, "concurrent-cluster-syncs", 5, "The number of Clusters that are allowed to sync concurrently.") flags.IntVar(&o.ConcurrentClusterResourceBindingSyncs, "concurrent-clusterresourcebinding-syncs", 5, "The number of ClusterResourceBindings that are allowed to sync concurrently.") flags.IntVar(&o.ConcurrentResourceBindingSyncs, "concurrent-resourcebinding-syncs", 5, "The number of ResourceBindings that are allowed to sync concurrently.") diff --git a/cmd/controller-manager/app/options/validation.go b/cmd/controller-manager/app/options/validation.go index c60b2fc14cf8..01115493aa99 100644 --- a/cmd/controller-manager/app/options/validation.go +++ b/cmd/controller-manager/app/options/validation.go @@ -34,9 +34,6 @@ func (o *Options) Validate() field.ErrorList { if err := skippedResourceConfig.Parse(o.SkippedPropagatingAPIs); err != nil { errs = append(errs, field.Invalid(newPath.Child("SkippedPropagatingAPIs"), o.SkippedPropagatingAPIs, "Invalid API string")) } - if o.SecurePort < 0 || o.SecurePort > 65535 { - errs = append(errs, field.Invalid(newPath.Child("SecurePort"), o.SecurePort, "must be between 0 and 65535 inclusive")) - } if o.ClusterStatusUpdateFrequency.Duration <= 0 { errs = append(errs, field.Invalid(newPath.Child("ClusterStatusUpdateFrequency"), o.ClusterStatusUpdateFrequency, "must be greater than 0")) } diff --git a/cmd/controller-manager/app/options/validation_test.go b/cmd/controller-manager/app/options/validation_test.go index 2060cf687a15..f1d3df5133e2 100644 --- a/cmd/controller-manager/app/options/validation_test.go +++ b/cmd/controller-manager/app/options/validation_test.go @@ -31,7 +31,6 @@ type ModifyOptions func(option *Options) func New(modifyOptions ModifyOptions) Options { option := Options{ SkippedPropagatingAPIs: "cluster.karmada.io;policy.karmada.io;work.karmada.io", - SecurePort: 8090, ClusterStatusUpdateFrequency: metav1.Duration{Duration: 10 * time.Second}, ClusterLeaseDuration: metav1.Duration{Duration: 10 * time.Second}, ClusterMonitorPeriod: metav1.Duration{Duration: 10 * time.Second}, @@ -67,12 +66,6 @@ func TestValidateControllerManagerConfiguration(t *testing.T) { }), expectedErrs: field.ErrorList{field.Invalid(newPath.Child("SkippedPropagatingAPIs"), "a/b/c/d?", "Invalid API string")}, }, - "invalid SecurePort": { - opt: New(func(options *Options) { - options.SecurePort = -10 - }), - expectedErrs: field.ErrorList{field.Invalid(newPath.Child("SecurePort"), -10, "must be between 0 and 65535 inclusive")}, - }, "invalid ClusterStatusUpdateFrequency": { opt: New(func(options *Options) { options.ClusterStatusUpdateFrequency.Duration = -10 * time.Second diff --git a/cmd/descheduler/app/descheduler.go b/cmd/descheduler/app/descheduler.go index 17183732e16f..4c17a4033bc3 100644 --- a/cmd/descheduler/app/descheduler.go +++ b/cmd/descheduler/app/descheduler.go @@ -19,10 +19,8 @@ package app import ( "context" "fmt" - "net" "net/http" "os" - "strconv" "time" "github.com/prometheus/client_golang/prometheus/promhttp" @@ -127,7 +125,7 @@ karmada-scheduler-estimator to get replica status.`, func run(opts *options.Options, stopChan <-chan struct{}) error { klog.Infof("karmada-descheduler version: %s", version.Get()) klog.Infof("Please make sure the karmada-scheduler-estimator of all member clusters has been deployed") - go serveHealthzAndMetrics(net.JoinHostPort(opts.BindAddress, strconv.Itoa(opts.SecurePort))) + serveHealthzAndMetrics(opts.HealthProbeBindAddress, opts.MetricsBindAddress) profileflag.ListenAndServe(opts.ProfileOpts) @@ -191,26 +189,64 @@ func run(opts *options.Options, stopChan <-chan struct{}) error { return nil } -func serveHealthzAndMetrics(address string) { +func serveHealthzAndMetrics(healthProbeBindAddress, metricsBindAddress string) { + if healthProbeBindAddress == metricsBindAddress { + if healthProbeBindAddress != "0" { + go serveCombined(healthProbeBindAddress) + } + } else { + if healthProbeBindAddress != "0" { + go serveHealthz(healthProbeBindAddress) + } + if metricsBindAddress != "0" { + go serveMetrics(metricsBindAddress) + } + } +} + +func serveCombined(address string) { mux := http.NewServeMux() - mux.HandleFunc("/healthz", func(w http.ResponseWriter, _ *http.Request) { - w.WriteHeader(http.StatusOK) - _, _ = w.Write([]byte("ok")) - }) + mux.HandleFunc("/healthz", healthzHandler) + mux.Handle("/metrics", metricsHandler()) + + serveHTTP(address, mux, "healthz and metrics") +} + +func serveHealthz(address string) { + mux := http.NewServeMux() + mux.HandleFunc("/healthz", healthzHandler) + serveHTTP(address, mux, "healthz") +} - mux.Handle("/metrics", promhttp.HandlerFor(ctrlmetrics.Registry, promhttp.HandlerOpts{ +func serveMetrics(address string) { + mux := http.NewServeMux() + mux.Handle("/metrics", metricsHandler()) + serveHTTP(address, mux, "metrics") +} + +func healthzHandler(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte("ok")) +} + +func metricsHandler() http.Handler { + return promhttp.HandlerFor(ctrlmetrics.Registry, promhttp.HandlerOpts{ ErrorHandling: promhttp.HTTPErrorOnError, - })) + }) +} - httpServer := http.Server{ +func serveHTTP(address string, handler http.Handler, name string) { + httpServer := &http.Server{ Addr: address, - Handler: mux, + Handler: handler, ReadHeaderTimeout: ReadHeaderTimeout, WriteTimeout: WriteTimeout, ReadTimeout: ReadTimeout, } + + klog.Infof("Starting %s server on %s", name, address) if err := httpServer.ListenAndServe(); err != nil { - klog.Errorf("Failed to serve healthz and metrics: %v", err) + klog.Errorf("Failed to serve %s on %s: %v", name, address, err) os.Exit(1) } } diff --git a/cmd/descheduler/app/descheduler_test.go b/cmd/descheduler/app/descheduler_test.go new file mode 100644 index 000000000000..5d787f7d781c --- /dev/null +++ b/cmd/descheduler/app/descheduler_test.go @@ -0,0 +1,128 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package app + +import ( + "net/http" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/karmada-io/karmada/cmd/descheduler/app/options" +) + +func TestNewDeschedulerCommand(t *testing.T) { + stopCh := make(chan struct{}) + cmd := NewDeschedulerCommand(stopCh) + + assert.NotNil(t, cmd) + assert.Equal(t, "karmada-descheduler", cmd.Use) + assert.NotEmpty(t, cmd.Long) +} + +func TestDeschedulerCommandFlagParsing(t *testing.T) { + testCases := []struct { + name string + args []string + expectError bool + }{ + {"Default flags", []string{}, false}, + {"With custom health probe bind address", []string{"--health-probe-bind-address=127.0.0.1:8080"}, false}, + {"With custom metrics bind address", []string{"--metrics-bind-address=127.0.0.1:8081"}, false}, + {"With leader election enabled", []string{"--leader-elect=true"}, false}, + {"With invalid flag", []string{"--invalid-flag=value"}, true}, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + stopCh := make(chan struct{}) + cmd := NewDeschedulerCommand(stopCh) + cmd.SetArgs(tc.args) + err := cmd.ParseFlags(tc.args) + if tc.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestServeHealthzAndMetrics(t *testing.T) { + healthAddress := "127.0.0.1:8082" + metricsAddress := "127.0.0.1:8083" + + go serveHealthzAndMetrics(healthAddress, metricsAddress) + + // For servers to start + time.Sleep(100 * time.Millisecond) + + t.Run("Healthz endpoint", func(t *testing.T) { + resp, err := http.Get("http://" + healthAddress + "/healthz") + require.NoError(t, err) + assert.Equal(t, http.StatusOK, resp.StatusCode) + }) + + t.Run("Metrics endpoint", func(t *testing.T) { + resp, err := http.Get("http://" + metricsAddress + "/metrics") + require.NoError(t, err) + assert.Equal(t, http.StatusOK, resp.StatusCode) + }) +} + +func TestDeschedulerOptionsValidation(t *testing.T) { + testCases := []struct { + name string + setupOpts func(*options.Options) + expectError bool + }{ + { + name: "Default options", + setupOpts: func(_ *options.Options) { + // Default options are valid + }, + expectError: false, + }, + { + name: "Invalid descheduling interval", + setupOpts: func(o *options.Options) { + o.DeschedulingInterval.Duration = -1 * time.Second + }, + expectError: true, + }, + { + name: "Invalid unschedulable threshold", + setupOpts: func(o *options.Options) { + o.UnschedulableThreshold.Duration = -1 * time.Second + }, + expectError: true, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + opts := options.NewOptions() + tc.setupOpts(opts) + errs := opts.Validate() + if tc.expectError { + assert.NotEmpty(t, errs) + } else { + assert.Empty(t, errs) + } + }) + } +} diff --git a/cmd/descheduler/app/options/options.go b/cmd/descheduler/app/options/options.go index 67e8a54d4e34..73e9abdb8587 100644 --- a/cmd/descheduler/app/options/options.go +++ b/cmd/descheduler/app/options/options.go @@ -29,8 +29,6 @@ import ( ) const ( - defaultBindAddress = "0.0.0.0" - defaultPort = 10358 defaultEstimatorPort = 10352 defaultDeschedulingInterval = 2 * time.Minute defaultUnschedulableThreshold = 5 * time.Minute @@ -47,10 +45,6 @@ type Options struct { LeaderElection componentbaseconfig.LeaderElectionConfiguration KubeConfig string Master string - // BindAddress is the IP address on which to listen for the --secure-port port. - BindAddress string - // SecurePort is the port that the server serves at. - SecurePort int KubeAPIQPS float32 // KubeAPIBurst is the burst to allow while talking with karmada-apiserver. @@ -58,6 +52,8 @@ type Options struct { // SchedulerEstimatorTimeout specifies the timeout period of calling the accurate scheduler estimator service. SchedulerEstimatorTimeout metav1.Duration + // SchedulerEstimatorServiceNamespace specifies the namespace to be used for discovering scheduler estimator services. + SchedulerEstimatorServiceNamespace string // SchedulerEstimatorServicePrefix presents the prefix of the accurate scheduler estimator service name. SchedulerEstimatorServicePrefix string // SchedulerEstimatorPort is the port that the accurate scheduler estimator server serves at. @@ -75,6 +71,16 @@ type Options struct { // UnschedulableThreshold specifies the period of pod unschedulable condition. UnschedulableThreshold metav1.Duration ProfileOpts profileflag.Options + // MetricsBindAddress is the TCP address that the server should bind to + // for serving prometheus metrics. + // It can be set to "0" to disable the metrics serving. + // Defaults to ":8080". + MetricsBindAddress string + // HealthProbeBindAddress is the TCP address that the server should bind to + // for serving health probes + // It can be set to "0" to disable serving the health probe. + // Defaults to ":10358". + HealthProbeBindAddress string } // NewOptions builds a default descheduler options. @@ -99,10 +105,21 @@ func (o *Options) AddFlags(fs *pflag.FlagSet) { } fs.BoolVar(&o.LeaderElection.LeaderElect, "leader-elect", true, "Enable leader election, which must be true when running multi instances.") fs.StringVar(&o.LeaderElection.ResourceNamespace, "leader-elect-resource-namespace", util.NamespaceKarmadaSystem, "The namespace of resource object that is used for locking during leader election.") + fs.DurationVar(&o.LeaderElection.LeaseDuration.Duration, "leader-elect-lease-duration", defaultElectionLeaseDuration.Duration, ""+ + "The duration that non-leader candidates will wait after observing a leadership "+ + "renewal until attempting to acquire leadership of a led but unrenewed leader "+ + "slot. This is effectively the maximum duration that a leader can be stopped "+ + "before it is replaced by another candidate. This is only applicable if leader "+ + "election is enabled.") + fs.DurationVar(&o.LeaderElection.RenewDeadline.Duration, "leader-elect-renew-deadline", defaultElectionRenewDeadline.Duration, ""+ + "The interval between attempts by the acting master to renew a leadership slot "+ + "before it stops leading. This must be less than or equal to the lease duration. "+ + "This is only applicable if leader election is enabled.") + fs.DurationVar(&o.LeaderElection.RetryPeriod.Duration, "leader-elect-retry-period", defaultElectionRetryPeriod.Duration, ""+ + "The duration the clients should wait between attempting acquisition and renewal "+ + "of a leadership. This is only applicable if leader election is enabled.") fs.StringVar(&o.KubeConfig, "kubeconfig", o.KubeConfig, "Path to karmada control plane kubeconfig file.") fs.StringVar(&o.Master, "master", o.Master, "The address of the Kubernetes API server. Overrides any value in KubeConfig. Only required if out-of-cluster.") - fs.StringVar(&o.BindAddress, "bind-address", defaultBindAddress, "The IP address on which to listen for the --secure-port port.") - fs.IntVar(&o.SecurePort, "secure-port", defaultPort, "The secure port on which to serve HTTPS.") fs.Float32Var(&o.KubeAPIQPS, "kube-api-qps", 40.0, "QPS to use while talking with karmada-apiserver.") fs.IntVar(&o.KubeAPIBurst, "kube-api-burst", 60, "Burst to use while talking with karmada-apiserver.") fs.DurationVar(&o.SchedulerEstimatorTimeout.Duration, "scheduler-estimator-timeout", 3*time.Second, "Specifies the timeout period of calling the scheduler estimator service.") @@ -111,8 +128,11 @@ func (o *Options) AddFlags(fs *pflag.FlagSet) { fs.StringVar(&o.SchedulerEstimatorKeyFile, "scheduler-estimator-key-file", "", "SSL key file used to secure scheduler estimator communication.") fs.StringVar(&o.SchedulerEstimatorCaFile, "scheduler-estimator-ca-file", "", "SSL Certificate Authority file used to secure scheduler estimator communication.") fs.BoolVar(&o.InsecureSkipEstimatorVerify, "insecure-skip-estimator-verify", false, "Controls whether verifies the scheduler estimator's certificate chain and host name.") + fs.StringVar(&o.SchedulerEstimatorServiceNamespace, "scheduler-estimator-service-namespace", util.NamespaceKarmadaSystem, "The namespace to be used for discovering scheduler estimator services.") fs.StringVar(&o.SchedulerEstimatorServicePrefix, "scheduler-estimator-service-prefix", "karmada-scheduler-estimator", "The prefix of scheduler estimator service name") fs.DurationVar(&o.DeschedulingInterval.Duration, "descheduling-interval", defaultDeschedulingInterval, "Time interval between two consecutive descheduler executions. Setting this value instructs the descheduler to run in a continuous loop at the interval specified.") fs.DurationVar(&o.UnschedulableThreshold.Duration, "unschedulable-threshold", defaultUnschedulableThreshold, "The period of pod unschedulable condition. This value is considered as a classification standard of unschedulable replicas.") + fs.StringVar(&o.MetricsBindAddress, "metrics-bind-address", ":8080", "The TCP address that the server should bind to for serving prometheus metrics(e.g. 127.0.0.1:8080, :8080). It can be set to \"0\" to disable the metrics serving. Defaults to 0.0.0.0:8080.") + fs.StringVar(&o.HealthProbeBindAddress, "health-probe-bind-address", ":10358", "The TCP address that the server should bind to for serving health probes(e.g. 127.0.0.1:10358, :10358). It can be set to \"0\" to disable serving the health probe. Defaults to 0.0.0.0:10358.") o.ProfileOpts.AddFlags(fs) } diff --git a/cmd/descheduler/app/options/validation.go b/cmd/descheduler/app/options/validation.go index 81bf557e0d08..92e0c8dd5d3d 100644 --- a/cmd/descheduler/app/options/validation.go +++ b/cmd/descheduler/app/options/validation.go @@ -17,8 +17,6 @@ limitations under the License. package options import ( - "net" - "k8s.io/apimachinery/pkg/util/validation/field" ) @@ -27,14 +25,6 @@ func (o *Options) Validate() field.ErrorList { errs := field.ErrorList{} newPath := field.NewPath("Options") - if net.ParseIP(o.BindAddress) == nil { - errs = append(errs, field.Invalid(newPath.Child("BindAddress"), o.BindAddress, "not a valid textual representation of an IP address")) - } - - if o.SecurePort < 0 || o.SecurePort > 65535 { - errs = append(errs, field.Invalid(newPath.Child("SecurePort"), o.SecurePort, "must be a valid port between 0 and 65535 inclusive")) - } - if o.SchedulerEstimatorPort < 0 || o.SchedulerEstimatorPort > 65535 { errs = append(errs, field.Invalid(newPath.Child("SchedulerEstimatorPort"), o.SchedulerEstimatorPort, "must be a valid port between 0 and 65535 inclusive")) } diff --git a/cmd/descheduler/app/options/validation_test.go b/cmd/descheduler/app/options/validation_test.go index 28c5ec126c8c..dd0545cf8f66 100644 --- a/cmd/descheduler/app/options/validation_test.go +++ b/cmd/descheduler/app/options/validation_test.go @@ -34,8 +34,6 @@ func New(modifyOptions ModifyOptions) Options { LeaderElection: componentbaseconfig.LeaderElectionConfiguration{ LeaderElect: false, }, - BindAddress: "127.0.0.1", - SecurePort: 9000, KubeAPIQPS: 40, KubeAPIBurst: 30, SchedulerEstimatorTimeout: metav1.Duration{Duration: 1 * time.Second}, @@ -61,8 +59,6 @@ func TestValidateKarmadaDescheduler(t *testing.T) { LeaderElection: componentbaseconfig.LeaderElectionConfiguration{ LeaderElect: false, }, - BindAddress: "127.0.0.1", - SecurePort: 9000, KubeAPIQPS: 40, KubeAPIBurst: 30, }} @@ -78,18 +74,6 @@ func TestValidateKarmadaDescheduler(t *testing.T) { opt Options expectedErrs field.ErrorList }{ - "invalid BindAddress": { - opt: New(func(option *Options) { - option.BindAddress = "127.0.0.1:8080" - }), - expectedErrs: field.ErrorList{field.Invalid(newPath.Child("BindAddress"), "127.0.0.1:8080", "not a valid textual representation of an IP address")}, - }, - "invalid SecurePort": { - opt: New(func(option *Options) { - option.SecurePort = 90000 - }), - expectedErrs: field.ErrorList{field.Invalid(newPath.Child("SecurePort"), 90000, "must be a valid port between 0 and 65535 inclusive")}, - }, "invalid SchedulerEstimatorPort": { opt: New(func(option *Options) { option.SchedulerEstimatorPort = 90000 diff --git a/cmd/scheduler-estimator/app/options/options.go b/cmd/scheduler-estimator/app/options/options.go index 1d6ee5d26435..018a56e07255 100644 --- a/cmd/scheduler-estimator/app/options/options.go +++ b/cmd/scheduler-estimator/app/options/options.go @@ -24,9 +24,7 @@ import ( ) const ( - defaultBindAddress = "0.0.0.0" - defaultServerPort = 10352 - defaultHealthzPort = 10351 + defaultServerPort = 10352 ) // Options contains everything necessary to create and run scheduler-estimator. @@ -34,10 +32,6 @@ type Options struct { KubeConfig string Master string ClusterName string - // BindAddress is the IP address on which to listen for the --secure-port port. - BindAddress string - // SecurePort is the port that the server serves at. - SecurePort int // ServerPort is the port that the server gRPC serves at. ServerPort int // InsecureSkipGrpcClientVerify controls whether verifies the grpc client's certificate chain and host name. @@ -55,6 +49,16 @@ type Options struct { // Parallelism defines the amount of parallelism in algorithms for estimating. Must be greater than 0. Defaults to 16. Parallelism int ProfileOpts profileflag.Options + // MetricsBindAddress is the TCP address that the server should bind to + // for serving prometheus metrics. + // It can be set to "0" to disable the metrics serving. + // Defaults to ":8080". + MetricsBindAddress string + // HealthProbeBindAddress is the TCP address that the server should bind to + // for serving health probes + // It can be set to "0" to disable serving the health probe. + // Defaults to ":10351". + HealthProbeBindAddress string } // NewOptions builds an empty options. @@ -70,16 +74,16 @@ func (o *Options) AddFlags(fs *pflag.FlagSet) { fs.StringVar(&o.KubeConfig, "kubeconfig", o.KubeConfig, "Path to member cluster's kubeconfig file.") fs.StringVar(&o.Master, "master", o.Master, "The address of the member Kubernetes API server. Overrides any value in KubeConfig. Only required if out-of-cluster.") fs.StringVar(&o.ClusterName, "cluster-name", o.ClusterName, "Name of member cluster that the estimator serves for.") - fs.StringVar(&o.BindAddress, "bind-address", defaultBindAddress, "The IP address on which to listen for the --secure-port port.") fs.IntVar(&o.ServerPort, "server-port", defaultServerPort, "The secure port on which to serve gRPC.") fs.StringVar(&o.GrpcAuthCertFile, "grpc-auth-cert-file", "", "SSL certification file used for grpc SSL/TLS connections.") fs.StringVar(&o.GrpcAuthKeyFile, "grpc-auth-key-file", "", "SSL key file used for grpc SSL/TLS connections.") fs.BoolVar(&o.InsecureSkipGrpcClientVerify, "insecure-skip-grpc-client-verify", false, "If set to true, the estimator will not verify the grpc client's certificate chain and host name. When the relevant certificates are not configured, it will not take effect.") - fs.StringVar(&o.GrpcClientCaFile, "grpc-client-ca-file", "", "SSL Certificate Authority file used to verify grpc client certificates on incoming requests if --client-cert-auth flag is set.") - fs.IntVar(&o.SecurePort, "secure-port", defaultHealthzPort, "The secure port on which to serve HTTPS.") + fs.StringVar(&o.GrpcClientCaFile, "grpc-client-ca-file", "", "SSL Certificate Authority file used to verify grpc client certificates on incoming requests.") fs.Float32Var(&o.ClusterAPIQPS, "kube-api-qps", 20.0, "QPS to use while talking with apiserver.") fs.IntVar(&o.ClusterAPIBurst, "kube-api-burst", 30, "Burst to use while talking with apiserver.") fs.IntVar(&o.Parallelism, "parallelism", o.Parallelism, "Parallelism defines the amount of parallelism in algorithms for estimating. Must be greater than 0. Defaults to 16.") + fs.StringVar(&o.MetricsBindAddress, "metrics-bind-address", ":8080", "The TCP address that the server should bind to for serving prometheus metrics(e.g. 127.0.0.1:8080, :8080). It can be set to \"0\" to disable the metrics serving. Defaults to 0.0.0.0:8080.") + fs.StringVar(&o.HealthProbeBindAddress, "health-probe-bind-address", ":10351", "The TCP address that the server should bind to for serving health probes(e.g. 127.0.0.1:10351, :10351). It can be set to \"0\" to disable serving the health probe. Defaults to 0.0.0.0:10351.") features.FeatureGate.AddFlag(fs) o.ProfileOpts.AddFlags(fs) diff --git a/cmd/scheduler-estimator/app/options/validation.go b/cmd/scheduler-estimator/app/options/validation.go index 806a938b2438..69f5e66007b7 100644 --- a/cmd/scheduler-estimator/app/options/validation.go +++ b/cmd/scheduler-estimator/app/options/validation.go @@ -17,8 +17,6 @@ limitations under the License. package options import ( - "net" - "k8s.io/apimachinery/pkg/util/validation/field" ) @@ -31,17 +29,9 @@ func (o *Options) Validate() field.ErrorList { errs = append(errs, field.Invalid(newPath.Child("ClusterName"), o.ClusterName, "clusterName cannot be empty")) } - if net.ParseIP(o.BindAddress) == nil { - errs = append(errs, field.Invalid(newPath.Child("BindAddress"), o.BindAddress, "not a valid textual representation of an IP address")) - } - if o.ServerPort < 0 || o.ServerPort > 65535 { errs = append(errs, field.Invalid(newPath.Child("ServerPort"), o.ServerPort, "must be a valid port between 0 and 65535 inclusive")) } - if o.SecurePort < 0 || o.SecurePort > 65535 { - errs = append(errs, field.Invalid(newPath.Child("SecurePort"), o.SecurePort, "must be a valid port between 0 and 65535 inclusive")) - } - return errs } diff --git a/cmd/scheduler-estimator/app/options/validation_test.go b/cmd/scheduler-estimator/app/options/validation_test.go index c9cf0845d16b..b4cc15747d0d 100644 --- a/cmd/scheduler-estimator/app/options/validation_test.go +++ b/cmd/scheduler-estimator/app/options/validation_test.go @@ -29,8 +29,6 @@ type ModifyOptions func(option *Options) func New(modifyOptions ModifyOptions) Options { option := Options{ ClusterName: "testCluster", - BindAddress: "0.0.0.0", - SecurePort: 10100, ServerPort: 8088, } @@ -61,18 +59,6 @@ func TestValidateKarmadaSchedulerEstimator(t *testing.T) { }), expectedErrs: field.ErrorList{field.Invalid(newPath.Child("ClusterName"), "", "clusterName cannot be empty")}, }, - "invalid BindAddress": { - opt: New(func(option *Options) { - option.BindAddress = "127.0.0.1:8082" - }), - expectedErrs: field.ErrorList{field.Invalid(newPath.Child("BindAddress"), "127.0.0.1:8082", "not a valid textual representation of an IP address")}, - }, - "invalid SecurePort": { - opt: New(func(option *Options) { - option.SecurePort = 908188 - }), - expectedErrs: field.ErrorList{field.Invalid(newPath.Child("SecurePort"), 908188, "must be a valid port between 0 and 65535 inclusive")}, - }, "invalid ServerPort": { opt: New(func(option *Options) { option.ServerPort = 80888 diff --git a/cmd/scheduler-estimator/app/scheduler-estimator.go b/cmd/scheduler-estimator/app/scheduler-estimator.go index 4697a5130152..fed9ec40f4eb 100644 --- a/cmd/scheduler-estimator/app/scheduler-estimator.go +++ b/cmd/scheduler-estimator/app/scheduler-estimator.go @@ -19,10 +19,8 @@ package app import ( "context" "fmt" - "net" "net/http" "os" - "strconv" "time" "github.com/prometheus/client_golang/prometheus/promhttp" @@ -114,7 +112,7 @@ provides the scheduler with more accurate cluster resource information.`, func run(ctx context.Context, opts *options.Options) error { klog.Infof("karmada-scheduler-estimator version: %s", version.Get()) - go serveHealthzAndMetrics(net.JoinHostPort(opts.BindAddress, strconv.Itoa(opts.SecurePort))) + serveHealthzAndMetrics(opts.HealthProbeBindAddress, opts.MetricsBindAddress) profileflag.ListenAndServe(opts.ProfileOpts) @@ -143,26 +141,64 @@ func run(ctx context.Context, opts *options.Options) error { return nil } -func serveHealthzAndMetrics(address string) { +func serveHealthzAndMetrics(healthProbeBindAddress, metricsBindAddress string) { + if healthProbeBindAddress == metricsBindAddress { + if healthProbeBindAddress != "0" { + go serveCombined(healthProbeBindAddress) + } + } else { + if healthProbeBindAddress != "0" { + go serveHealthz(healthProbeBindAddress) + } + if metricsBindAddress != "0" { + go serveMetrics(metricsBindAddress) + } + } +} + +func serveCombined(address string) { mux := http.NewServeMux() - mux.HandleFunc("/healthz", func(w http.ResponseWriter, _ *http.Request) { - w.WriteHeader(http.StatusOK) - _, _ = w.Write([]byte("ok")) - }) + mux.HandleFunc("/healthz", healthzHandler) + mux.Handle("/metrics", metricsHandler()) + + serveHTTP(address, mux, "healthz and metrics") +} + +func serveHealthz(address string) { + mux := http.NewServeMux() + mux.HandleFunc("/healthz", healthzHandler) + serveHTTP(address, mux, "healthz") +} - mux.Handle("/metrics", promhttp.HandlerFor(ctrlmetrics.Registry, promhttp.HandlerOpts{ +func serveMetrics(address string) { + mux := http.NewServeMux() + mux.Handle("/metrics", metricsHandler()) + serveHTTP(address, mux, "metrics") +} + +func healthzHandler(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte("ok")) +} + +func metricsHandler() http.Handler { + return promhttp.HandlerFor(ctrlmetrics.Registry, promhttp.HandlerOpts{ ErrorHandling: promhttp.HTTPErrorOnError, - })) + }) +} - httpServer := http.Server{ +func serveHTTP(address string, handler http.Handler, name string) { + httpServer := &http.Server{ Addr: address, - Handler: mux, + Handler: handler, ReadHeaderTimeout: ReadHeaderTimeout, WriteTimeout: WriteTimeout, ReadTimeout: ReadTimeout, } + + klog.Infof("Starting %s server on %s", name, address) if err := httpServer.ListenAndServe(); err != nil { - klog.Errorf("Failed to serve healthz and metrics: %v", err) + klog.Errorf("Failed to serve %s on %s: %v", name, address, err) os.Exit(1) } } diff --git a/cmd/scheduler/app/options/options.go b/cmd/scheduler/app/options/options.go index ac994775253c..36f67044763e 100644 --- a/cmd/scheduler/app/options/options.go +++ b/cmd/scheduler/app/options/options.go @@ -35,8 +35,6 @@ import ( ) const ( - defaultBindAddress = "0.0.0.0" - defaultPort = 10351 defaultEstimatorPort = 10352 ) @@ -51,10 +49,18 @@ type Options struct { LeaderElection componentbaseconfig.LeaderElectionConfiguration KubeConfig string Master string - // BindAddress is the IP address on which to listen for the --secure-port port. - BindAddress string - // SecurePort is the port that the server serves at. - SecurePort int + + // MetricsBindAddress is the TCP address that the controller should bind to + // for serving prometheus metrics. + // It can be set to "0" to disable the metrics serving. + // Defaults to ":8080". + MetricsBindAddress string + + // HealthProbeBindAddress is the TCP address that the controller should bind to + // for serving health probes + // It can be set to "0" or "" to disable serving the health probe. + // Defaults to ":10351". + HealthProbeBindAddress string // KubeAPIQPS is the QPS to use while talking with karmada-apiserver. KubeAPIQPS float32 @@ -67,6 +73,8 @@ type Options struct { DisableSchedulerEstimatorInPullMode bool // SchedulerEstimatorTimeout specifies the timeout period of calling the accurate scheduler estimator service. SchedulerEstimatorTimeout metav1.Duration + // SchedulerEstimatorServiceNamespace specifies the namespace to be used for discovering scheduler estimator services. + SchedulerEstimatorServiceNamespace string // SchedulerEstimatorServicePrefix presents the prefix of the accurate scheduler estimator service name. SchedulerEstimatorServicePrefix string // SchedulerEstimatorPort is the port that the accurate scheduler estimator server serves at. @@ -137,13 +145,14 @@ func (o *Options) AddFlags(fs *pflag.FlagSet) { "of a leadership. This is only applicable if leader election is enabled.") fs.StringVar(&o.KubeConfig, "kubeconfig", o.KubeConfig, "Path to karmada control plane kubeconfig file.") fs.StringVar(&o.Master, "master", o.Master, "The address of the Kubernetes API server. Overrides any value in KubeConfig. Only required if out-of-cluster.") - fs.StringVar(&o.BindAddress, "bind-address", defaultBindAddress, "The IP address on which to listen for the --secure-port port.") - fs.IntVar(&o.SecurePort, "secure-port", defaultPort, "The secure port on which to serve HTTPS.") + fs.StringVar(&o.MetricsBindAddress, "metrics-bind-address", ":8080", "The TCP address that the server should bind to for serving prometheus metrics(e.g. 127.0.0.1:8080, :8080). It can be set to \"0\" to disable the metrics serving. Defaults to 0.0.0.0:8080.") + fs.StringVar(&o.HealthProbeBindAddress, "health-probe-bind-address", ":10351", "The TCP address that the server should bind to for serving health probes(e.g. 127.0.0.1:10351, :10351). It can be set to \"0\" to disable serving the health probe. Defaults to 0.0.0.0:10351.") fs.Float32Var(&o.KubeAPIQPS, "kube-api-qps", 40.0, "QPS to use while talking with karmada-apiserver.") fs.IntVar(&o.KubeAPIBurst, "kube-api-burst", 60, "Burst to use while talking with karmada-apiserver.") fs.BoolVar(&o.EnableSchedulerEstimator, "enable-scheduler-estimator", false, "Enable calling cluster scheduler estimator for adjusting replicas.") fs.BoolVar(&o.DisableSchedulerEstimatorInPullMode, "disable-scheduler-estimator-in-pull-mode", false, "Disable the scheduler estimator for clusters in pull mode, which takes effect only when enable-scheduler-estimator is true.") fs.DurationVar(&o.SchedulerEstimatorTimeout.Duration, "scheduler-estimator-timeout", 3*time.Second, "Specifies the timeout period of calling the scheduler estimator service.") + fs.StringVar(&o.SchedulerEstimatorServiceNamespace, "scheduler-estimator-service-namespace", util.NamespaceKarmadaSystem, "The namespace to be used for discovering scheduler estimator services.") fs.StringVar(&o.SchedulerEstimatorServicePrefix, "scheduler-estimator-service-prefix", "karmada-scheduler-estimator", "The prefix of scheduler estimator service name") fs.IntVar(&o.SchedulerEstimatorPort, "scheduler-estimator-port", defaultEstimatorPort, "The secure port on which to connect the accurate scheduler estimator.") fs.StringVar(&o.SchedulerEstimatorCertFile, "scheduler-estimator-cert-file", "", "SSL certification file used to secure scheduler estimator communication.") diff --git a/cmd/scheduler/app/options/options_test.go b/cmd/scheduler/app/options/options_test.go new file mode 100644 index 000000000000..96487533f9ae --- /dev/null +++ b/cmd/scheduler/app/options/options_test.go @@ -0,0 +1,81 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package options + +import ( + "testing" + "time" + + "github.com/spf13/pflag" + "github.com/stretchr/testify/assert" +) + +func TestNewOptions(t *testing.T) { + opts := NewOptions() + + assert.True(t, opts.LeaderElection.LeaderElect, "Expected default LeaderElect to be true") + assert.Equal(t, "karmada-system", opts.LeaderElection.ResourceNamespace, "Unexpected default ResourceNamespace") + assert.Equal(t, 15*time.Second, opts.LeaderElection.LeaseDuration.Duration, "Unexpected default LeaseDuration") + assert.Equal(t, "karmada-scheduler", opts.LeaderElection.ResourceName, "Unexpected default ResourceName") +} + +func TestAddFlags(t *testing.T) { + opts := NewOptions() + fs := pflag.NewFlagSet("test", pflag.ContinueOnError) + opts.AddFlags(fs) + + testCases := []struct { + name string + expectedType string + expectedDefault string + }{ + {"kubeconfig", "string", ""}, + {"leader-elect", "bool", "true"}, + {"enable-scheduler-estimator", "bool", "false"}, + {"scheduler-estimator-port", "int", "10352"}, + {"plugins", "stringSlice", "[*]"}, + {"scheduler-name", "string", "default-scheduler"}, + } + + for _, tc := range testCases { + flag := fs.Lookup(tc.name) + assert.NotNil(t, flag, "Flag %s not found", tc.name) + assert.Equal(t, tc.expectedType, flag.Value.Type(), "Unexpected type for flag %s", tc.name) + assert.Equal(t, tc.expectedDefault, flag.DefValue, "Unexpected default value for flag %s", tc.name) + } +} + +func TestOptionsFlagParsing(t *testing.T) { + opts := NewOptions() + fs := pflag.NewFlagSet("test", pflag.ContinueOnError) + opts.AddFlags(fs) + + testArgs := []string{ + "--leader-elect=false", + "--enable-scheduler-estimator=true", + "--plugins=*,-foo,bar", + "--scheduler-name=custom-scheduler", + } + + err := fs.Parse(testArgs) + assert.NoError(t, err) + + assert.False(t, opts.LeaderElection.LeaderElect) + assert.True(t, opts.EnableSchedulerEstimator) + assert.Equal(t, []string{"*", "-foo", "bar"}, opts.Plugins) + assert.Equal(t, "custom-scheduler", opts.SchedulerName) +} diff --git a/cmd/scheduler/app/options/validation.go b/cmd/scheduler/app/options/validation.go index 7612ff2e83ce..8dc83188ec7a 100644 --- a/cmd/scheduler/app/options/validation.go +++ b/cmd/scheduler/app/options/validation.go @@ -17,8 +17,6 @@ limitations under the License. package options import ( - "net" - "k8s.io/apimachinery/pkg/util/validation/field" ) @@ -27,18 +25,9 @@ func (o *Options) Validate() field.ErrorList { errs := field.ErrorList{} newPath := field.NewPath("Options") - if net.ParseIP(o.BindAddress) == nil { - errs = append(errs, field.Invalid(newPath.Child("BindAddress"), o.BindAddress, "not a valid textual representation of an IP address")) - } - - if o.SecurePort < 0 || o.SecurePort > 65535 { - errs = append(errs, field.Invalid(newPath.Child("SecurePort"), o.SecurePort, "must be a valid port between 0 and 65535 inclusive")) - } - if o.SchedulerEstimatorPort < 0 || o.SchedulerEstimatorPort > 65535 { errs = append(errs, field.Invalid(newPath.Child("SchedulerEstimatorPort"), o.SchedulerEstimatorPort, "must be a valid port between 0 and 65535 inclusive")) } - if o.SchedulerEstimatorTimeout.Duration < 0 { errs = append(errs, field.Invalid(newPath.Child("SchedulerEstimatorTimeout"), o.SchedulerEstimatorTimeout, "must be greater than or equal to 0")) } diff --git a/cmd/scheduler/app/options/validation_test.go b/cmd/scheduler/app/options/validation_test.go index 42940a6e6a03..9cc5e1c63143 100644 --- a/cmd/scheduler/app/options/validation_test.go +++ b/cmd/scheduler/app/options/validation_test.go @@ -34,8 +34,6 @@ func New(modifyOptions ModifyOptions) Options { LeaderElection: componentbaseconfig.LeaderElectionConfiguration{ LeaderElect: false, }, - BindAddress: "127.0.0.1", - SecurePort: 9000, KubeAPIQPS: 40, KubeAPIBurst: 30, EnableSchedulerEstimator: false, @@ -62,8 +60,6 @@ func TestValidateKarmadaSchedulerConfiguration(t *testing.T) { LeaderElection: componentbaseconfig.LeaderElectionConfiguration{ LeaderElect: false, }, - BindAddress: "127.0.0.1", - SecurePort: 9000, KubeAPIQPS: 40, KubeAPIBurst: 30, SchedulerName: "default-scheduler", @@ -81,18 +77,6 @@ func TestValidateKarmadaSchedulerConfiguration(t *testing.T) { opt Options expectedErrs field.ErrorList }{ - "invalid BindAddress": { - opt: New(func(option *Options) { - option.BindAddress = "127.0.0.1:8080" - }), - expectedErrs: field.ErrorList{field.Invalid(newPath.Child("BindAddress"), "127.0.0.1:8080", "not a valid textual representation of an IP address")}, - }, - "invalid SecurePort": { - opt: New(func(option *Options) { - option.SecurePort = 90000 - }), - expectedErrs: field.ErrorList{field.Invalid(newPath.Child("SecurePort"), 90000, "must be a valid port between 0 and 65535 inclusive")}, - }, "invalid SchedulerEstimatorPort": { opt: New(func(option *Options) { option.SchedulerEstimatorPort = 90000 diff --git a/cmd/scheduler/app/scheduler.go b/cmd/scheduler/app/scheduler.go index 9d4bad259136..475ee9482d5d 100644 --- a/cmd/scheduler/app/scheduler.go +++ b/cmd/scheduler/app/scheduler.go @@ -19,10 +19,8 @@ package app import ( "context" "fmt" - "net" "net/http" "os" - "strconv" "time" "github.com/prometheus/client_golang/prometheus/promhttp" @@ -139,7 +137,7 @@ the most suitable cluster.`, func run(opts *options.Options, stopChan <-chan struct{}, registryOptions ...Option) error { klog.Infof("karmada-scheduler version: %s", version.Get()) - go serveHealthzAndMetrics(net.JoinHostPort(opts.BindAddress, strconv.Itoa(opts.SecurePort))) + serveHealthzAndMetrics(opts.HealthProbeBindAddress, opts.MetricsBindAddress) profileflag.ListenAndServe(opts.ProfileOpts) @@ -169,6 +167,7 @@ func run(opts *options.Options, stopChan <-chan struct{}, registryOptions ...Opt scheduler.WithOutOfTreeRegistry(outOfTreeRegistry), scheduler.WithEnableSchedulerEstimator(opts.EnableSchedulerEstimator), scheduler.WithDisableSchedulerEstimatorInPullMode(opts.DisableSchedulerEstimatorInPullMode), + scheduler.WithSchedulerEstimatorServiceNamespace(opts.SchedulerEstimatorServiceNamespace), scheduler.WithSchedulerEstimatorServicePrefix(opts.SchedulerEstimatorServicePrefix), scheduler.WithSchedulerEstimatorConnection(opts.SchedulerEstimatorPort, opts.SchedulerEstimatorCertFile, opts.SchedulerEstimatorKeyFile, opts.SchedulerEstimatorCaFile, opts.InsecureSkipEstimatorVerify), scheduler.WithSchedulerEstimatorTimeout(opts.SchedulerEstimatorTimeout), @@ -225,26 +224,64 @@ func run(opts *options.Options, stopChan <-chan struct{}, registryOptions ...Opt return nil } -func serveHealthzAndMetrics(address string) { +func serveHealthzAndMetrics(healthProbeBindAddress, metricsBindAddress string) { + if healthProbeBindAddress == metricsBindAddress { + if healthProbeBindAddress != "0" { + go serveCombined(healthProbeBindAddress) + } + } else { + if healthProbeBindAddress != "0" { + go serveHealthz(healthProbeBindAddress) + } + if metricsBindAddress != "0" { + go serveMetrics(metricsBindAddress) + } + } +} + +func serveCombined(address string) { mux := http.NewServeMux() - mux.HandleFunc("/healthz", func(w http.ResponseWriter, _ *http.Request) { - w.WriteHeader(http.StatusOK) - _, _ = w.Write([]byte("ok")) - }) + mux.HandleFunc("/healthz", healthzHandler) + mux.Handle("/metrics", metricsHandler()) + + serveHTTP(address, mux, "healthz and metrics") +} + +func serveHealthz(address string) { + mux := http.NewServeMux() + mux.HandleFunc("/healthz", healthzHandler) + serveHTTP(address, mux, "healthz") +} + +func serveMetrics(address string) { + mux := http.NewServeMux() + mux.Handle("/metrics", metricsHandler()) + serveHTTP(address, mux, "metrics") +} - mux.Handle("/metrics", promhttp.HandlerFor(ctrlmetrics.Registry, promhttp.HandlerOpts{ +func healthzHandler(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte("ok")) +} + +func metricsHandler() http.Handler { + return promhttp.HandlerFor(ctrlmetrics.Registry, promhttp.HandlerOpts{ ErrorHandling: promhttp.HTTPErrorOnError, - })) + }) +} - httpServer := http.Server{ +func serveHTTP(address string, handler http.Handler, name string) { + httpServer := &http.Server{ Addr: address, - Handler: mux, + Handler: handler, ReadHeaderTimeout: ReadHeaderTimeout, WriteTimeout: WriteTimeout, ReadTimeout: ReadTimeout, } + + klog.Infof("Starting %s server on %s", name, address) if err := httpServer.ListenAndServe(); err != nil { - klog.Errorf("Failed to serve healthz and metrics: %v", err) + klog.Errorf("Failed to serve %s on %s: %v", name, address, err) os.Exit(1) } } diff --git a/cmd/scheduler/app/scheduler_test.go b/cmd/scheduler/app/scheduler_test.go new file mode 100644 index 000000000000..60fce37b42bd --- /dev/null +++ b/cmd/scheduler/app/scheduler_test.go @@ -0,0 +1,136 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package app + +import ( + "net/http" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/karmada-io/karmada/cmd/scheduler/app/options" +) + +func TestNewSchedulerCommand(t *testing.T) { + stopCh := make(chan struct{}) + cmd := NewSchedulerCommand(stopCh) + assert.NotNil(t, cmd) + assert.Equal(t, "karmada-scheduler", cmd.Use) + assert.NotEmpty(t, cmd.Long) +} + +func TestSchedulerCommandFlagParsing(t *testing.T) { + testCases := []struct { + name string + args []string + expectError bool + }{ + {"Default flags", []string{}, false}, + {"With custom health probe bind address", []string{"--health-probe-bind-address=127.0.0.1:8080"}, false}, + {"With custom metrics bind address", []string{"--metrics-bind-address=127.0.0.1:8081"}, false}, + {"With leader election enabled", []string{"--leader-elect=true"}, false}, + {"With invalid flag", []string{"--invalid-flag=value"}, true}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + stopCh := make(chan struct{}) + cmd := NewSchedulerCommand(stopCh) + cmd.SetArgs(tc.args) + err := cmd.ParseFlags(tc.args) + if tc.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestServeHealthzAndMetrics(t *testing.T) { + healthAddress := "127.0.0.1:8082" + metricsAddress := "127.0.0.1:8083" + + go serveHealthzAndMetrics(healthAddress, metricsAddress) + + // For servers to start + time.Sleep(100 * time.Millisecond) + + t.Run("Healthz endpoint", func(t *testing.T) { + resp, err := http.Get("http://" + healthAddress + "/healthz") + require.NoError(t, err) + assert.Equal(t, http.StatusOK, resp.StatusCode) + }) + + t.Run("Metrics endpoint", func(t *testing.T) { + resp, err := http.Get("http://" + metricsAddress + "/metrics") + require.NoError(t, err) + assert.Equal(t, http.StatusOK, resp.StatusCode) + }) +} + +func TestSchedulerOptionsValidation(t *testing.T) { + testCases := []struct { + name string + setupOpts func(*options.Options) + expectError bool + }{ + { + name: "Default options", + setupOpts: func(o *options.Options) { + o.SchedulerName = "default-scheduler" + }, + expectError: false, + }, + { + name: "Empty scheduler name", + setupOpts: func(o *options.Options) { + o.SchedulerName = "" + }, + expectError: true, + }, + { + name: "Invalid kube API QPS", + setupOpts: func(o *options.Options) { + o.KubeAPIQPS = -1 + }, + expectError: true, + }, + { + name: "Invalid kube API burst", + setupOpts: func(o *options.Options) { + o.KubeAPIBurst = -1 + }, + expectError: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + opts := options.NewOptions() + tc.setupOpts(opts) + errs := opts.Validate() + if tc.expectError { + assert.NotEmpty(t, errs) + } else { + assert.Empty(t, errs) + } + }) + } +} diff --git a/docs/CHANGELOG/CHANGELOG-1.10.md b/docs/CHANGELOG/CHANGELOG-1.10.md index 035a0aef0dae..6d5a26109282 100644 --- a/docs/CHANGELOG/CHANGELOG-1.10.md +++ b/docs/CHANGELOG/CHANGELOG-1.10.md @@ -2,30 +2,42 @@ **Table of Contents** *generated with [DocToc](https://github.com/thlorenz/doctoc)* -- [v1.10.4](#v1104) - - [Downloads for v1.10.4](#downloads-for-v1104) - - [Changelog since v1.10.3](#changelog-since-v1103) +- [v1.10.6](#v1106) + - [Downloads for v1.10.6](#downloads-for-v1106) + - [Changelog since v1.10.5](#changelog-since-v1105) - [Changes by Kind](#changes-by-kind) - [Bug Fixes](#bug-fixes) - [Others](#others) -- [v1.10.3](#v1103) - - [Downloads for v1.10.3](#downloads-for-v1103) - - [Changelog since v1.10.2](#changelog-since-v1102) +- [v1.10.5](#v1105) + - [Downloads for v1.10.5](#downloads-for-v1105) + - [Changelog since v1.10.4](#changelog-since-v1104) - [Changes by Kind](#changes-by-kind-1) - [Bug Fixes](#bug-fixes-1) - [Others](#others-1) -- [v1.10.2](#v1102) - - [Downloads for v1.10.2](#downloads-for-v1102) - - [Changelog since v1.10.1](#changelog-since-v1101) +- [v1.10.4](#v1104) + - [Downloads for v1.10.4](#downloads-for-v1104) + - [Changelog since v1.10.3](#changelog-since-v1103) - [Changes by Kind](#changes-by-kind-2) - [Bug Fixes](#bug-fixes-2) - [Others](#others-2) -- [v1.10.1](#v1101) - - [Downloads for v1.10.1](#downloads-for-v1101) - - [Changelog since v1.10.0](#changelog-since-v1100) +- [v1.10.3](#v1103) + - [Downloads for v1.10.3](#downloads-for-v1103) + - [Changelog since v1.10.2](#changelog-since-v1102) - [Changes by Kind](#changes-by-kind-3) - [Bug Fixes](#bug-fixes-3) - [Others](#others-3) +- [v1.10.2](#v1102) + - [Downloads for v1.10.2](#downloads-for-v1102) + - [Changelog since v1.10.1](#changelog-since-v1101) + - [Changes by Kind](#changes-by-kind-4) + - [Bug Fixes](#bug-fixes-4) + - [Others](#others-4) +- [v1.10.1](#v1101) + - [Downloads for v1.10.1](#downloads-for-v1101) + - [Changelog since v1.10.0](#changelog-since-v1100) + - [Changes by Kind](#changes-by-kind-5) + - [Bug Fixes](#bug-fixes-5) + - [Others](#others-5) - [v1.10.0](#v1100) - [Downloads for v1.10.0](#downloads-for-v1100) - [What's New](#whats-new) @@ -34,7 +46,7 @@ - [Other Notable Changes](#other-notable-changes) - [API Changes](#api-changes) - [Deprecation](#deprecation) - - [Bug Fixes](#bug-fixes-4) + - [Bug Fixes](#bug-fixes-6) - [Security](#security) - [Features & Enhancements](#features--enhancements) - [Other](#other) @@ -45,6 +57,36 @@ +# v1.10.6 +## Downloads for v1.10.6 + +Download v1.10.6 in the [v1.10.6 release page](https://github.com/karmada-io/karmada/releases/tag/v1.10.6). + +## Changelog since v1.10.5 +### Changes by Kind +#### Bug Fixes +- `karmada-aggregated-apiserver`: User can append a "/" at the end when configuring the cluster's apiEndpoint. ([#5556](https://github.com/karmada-io/karmada/pull/5556), @spiritNO1) +- `karmada-controller-manager`: Ignored StatefulSet Dependencies with PVCs created via the VolumeClaimTemplates. ([#5687](https://github.com/karmada-io/karmada/pull/5687), @seanlaii) +- `karmada-scheduler`: Fixed unexpected modification of original `ResourceSummary` due to lack of deep copy. ([#5725](https://github.com/karmada-io/karmada/pull/5725), @RainbowMango) +- `karmada-scheduler`: Fixes an issue where resource model grades were incorrectly matched based on resource requests. Now only grades that can provide sufficient resources will be selected. ([#5729](https://github.com/karmada-io/karmada/pull/5729), @RainbowMango) +- `karmada-search`: Modify the logic of checking whether the resource is registered when selecting the plugin. ([#5736](https://github.com/karmada-io/karmada/pull/5736), @seanlaii) + +#### Others +None. + +# v1.10.5 +## Downloads for v1.10.5 + +Download v1.10.5 in the [v1.10.5 release page](https://github.com/karmada-io/karmada/releases/tag/v1.10.5). + +## Changelog since v1.10.4 +### Changes by Kind +#### Bug Fixes +- `karmada-controller-manager`: Fixed the error of cluster status old condition update will overwrite the newest condition. ([#5401](https://github.com/karmada-io/karmada/pull/5401), @XiShanYongYe-Chang) + +#### Others +- The base image `alpine` now has been promoted from `alpine:3.20.2` to `alpine:3.20.3`. + # v1.10.4 ## Downloads for v1.10.4 diff --git a/docs/CHANGELOG/CHANGELOG-1.11.md b/docs/CHANGELOG/CHANGELOG-1.11.md new file mode 100644 index 000000000000..68043abadd66 --- /dev/null +++ b/docs/CHANGELOG/CHANGELOG-1.11.md @@ -0,0 +1,330 @@ + + +**Table of Contents** *generated with [DocToc](https://github.com/thlorenz/doctoc)* + +- [v1.11.2](#v1112) + - [Downloads for v1.11.2](#downloads-for-v1112) + - [Changelog since v1.11.1](#changelog-since-v1111) + - [Changes by Kind](#changes-by-kind) + - [Bug Fixes](#bug-fixes) + - [Others](#others) +- [v1.11.1](#v1111) + - [Downloads for v1.11.1](#downloads-for-v1111) + - [Changelog since v1.11.0](#changelog-since-v1110) + - [Changes by Kind](#changes-by-kind-1) + - [Bug Fixes](#bug-fixes-1) + - [Others](#others-1) +- [v1.11.0](#v1110) + - [Downloads for v1.11.0](#downloads-for-v1110) + - [What's New](#whats-new) + - [Cluster-Level Resource Propagation Pause and Resume](#cluster-level-resource-propagation-pause-and-resume) + - [Karmadactl Offers More Advanced Features](#karmadactl-offers-more-advanced-features) + - [Consistent generation semantics for multi-cluster workloads](#consistent-generation-semantics-for-multi-cluster-workloads) + - [Karmada Operator Supports Custom CRD Download Strategy](#karmada-operator-supports-custom-crd-download-strategy) + - [Other Notable Changes](#other-notable-changes) + - [API Changes](#api-changes) + - [Deprecation](#deprecation) + - [Bug Fixes](#bug-fixes-2) + - [Security](#security) + - [Features & Enhancements](#features--enhancements) + - [Other](#other) + - [Dependencies](#dependencies) + - [Helm Charts](#helm-charts) + - [Instrumentation](#instrumentation) + - [Contributors](#contributors) + + + +# v1.11.2 +## Downloads for v1.11.2 + +Download v1.11.2 in the [v1.11.2 release page](https://github.com/karmada-io/karmada/releases/tag/v1.11.2). + +## Changelog since v1.11.1 +### Changes by Kind +#### Bug Fixes +- `karmada-controller-manager`: Ignored StatefulSet Dependencies with PVCs created via the VolumeClaimTemplates. ([#5686](https://github.com/karmada-io/karmada/pull/5686), @seanlaii) +- `karmada-scheduler`: Fixed unexpected modification of original `ResourceSummary` due to lack of deep copy. ([#5724](https://github.com/karmada-io/karmada/pull/5724), @RainbowMango) +- `karmada-scheduler`: Fixes an issue where resource model grades were incorrectly matched based on resource requests. Now only grades that can provide sufficient resources will be selected. ([#5728](https://github.com/karmada-io/karmada/pull/5728), @RainbowMango) +- `karmada-search`: Modify the logic of checking whether the resource is registered when selecting the plugin. ([#5737](https://github.com/karmada-io/karmada/pull/5737), @seanlaii) + +#### Others +None. + +# v1.11.1 +## Downloads for v1.11.1 + +Download v1.11.1 in the [v1.11.1 release page](https://github.com/karmada-io/karmada/releases/tag/v1.11.1). + +## Changelog since v1.11.0 +### Changes by Kind +#### Bug Fixes +- `karmada-operator`: Fixed the issue where the manifests for the `karmada-scheduler` and `karmada-descheduler` components were not parsed correctly. ([#5550](https://github.com/karmada-io/karmada/pull/5550) @zhzhuang-zju) +- `karmadactl`ļ¼šFixed the issue where commands `create`, `annotate`, `delete`, `edit`, `label`, and `patch` cannot specify the namespace flag. ([#5513](https://github.com/karmada-io/karmada/pull/5513) @zhzhuang-zju) +- `karmadactl`: Fixed the issue that karmadactl addon failed to install karmada-scheduler-estimator due to unknown flag. ([#5538](https://github.com/karmada-io/karmada/pull/5538) @chaosi-zju) + +#### Others +- The base image `alpine` now has been promoted from `alpine:3.20.2` to `alpine:3.20.3`. +- Karmada(release-1.11) now using Golang v1.22.7. ([#5531](https://github.com/karmada-io/karmada/pull/5531) @RainbowMango) + +# v1.11.0 +## Downloads for v1.11.0 + +Download v1.11.0 in the [v1.11.0 release page](https://github.com/karmada-io/karmada/releases/tag/v1.11.0). + +## What's New + +### Cluster-Level Resource Propagation Pause and Resume + +This release provides a capability that supports the pause and resume of resource propagation at the cluster granularity, bringing more possibilities to developing, operating, and maintaining the system. + +In some scenarios, Karmada users would like to control the timing of the synchronization of the above resource changes themselves, such as: + +* As a developer, when the Karmada control plane competes with member clusters for control of resources, there is a situation where resources are repeatedly updated. Pausing the process of synchronizing the resource to the member clusters would be helpful to quickly locate the problem. + +* As a release manager, this feature allows for control over which clusters receive application updates, thus achieving a rolling update cluster by cluster. + +With the cluster-level resource propagation pause and resume capabilities, users will be able to better control the propagation of resources. + +For a detailed description of this feature, see the [User Guide](https://karmada.io/docs/next/userguide/scheduling/resource-propagating/#suspend-and-resume-of-resource-propagation). + +(Feature contributors: @a7i, @XiShanYongYe-Chang) + +### Karmadactl Offers More Advanced Features + +In this release, the Karmada community is dedicated to enhancing Karmadactl capabilities and improving its functionalities from multiple perspectives. + +- Richer command set + +Karmadactl has implemented new commands, such as `create`, `patch`, `delete`, `label`, `annotate`, `edit`, `attach`, `top node`,`api-resources` and `explain`, which allow users to perform more operations on resources in the Karmada control plane or member clusters. + +- Richer capabilities + +Karmadactl introduces the `--operation-scope` flag to control the scope of command operations. With the new flag, the commands `get`, `describe`, `exec`, and `explain` can operate on resources in the Karmada control plane or member clusters. + +- More detailed command output information + +The output of the `karmadactl get cluster` command now add the information of `Zones`, `Region`, `Provider`, `API-Endpoint`, and `Proxy-URL`. + +With these capability enhancements, the operational experience of `karmadactl` could be improved. The new capabilities and more information about `karmadactl` can be obtained using `karmadactl --help`. + +(Feature contributors: @hulizhe, @zhzhuang-zju, @whitewindmills, @a7i, @guozheng-shen, @grosser) + +### Consistent generation semantics for multi-cluster workloads + +In this release, Karmada has introduced consistent generation semantics for workloads running on multiple clusters. This update provides a reliable reference for release systems, enhancing the precision of multi-cluster deployments. By standardizing the generation semantics, Karmada simplifies the release process and ensures that workload statuses are consistently tracked, making it easier to manage and monitor applications across multiple clusters. + +The following resource adaptations have been completed. + +- GroupVersion: apps/v1 +Kind: Deployment, DaemonSet, StatefulSet + +- GroupVersion: apps.kruise.io/v1alpha1 +Kind: CloneSet, DaemonSet + +- GroupVersion: apps.kruise.io/v1beta1 +Kind: StatefulSet + +- GroupVersion: helm.toolkit.fluxcd.io/v2beta1 +Kind: HelmRelease + +- GroupVersion: kustomize.toolkit.fluxcd.io/v1 +Kind: Kustomization + +- GroupVersion: source.toolkit.fluxcd.io/v1 +Kind: GitRepository + +- GroupVersion: source.toolkit.fluxcd.io/v1beta2 +Kind: Bucket, HelmChart, HelmRepository, OCIRepository + +For a detailed description of this feature, see the [issue](https://github.com/karmada-io/karmada/issues/4870). + +(Feature contributors: @yike21, @veophi, @whitewindmills, @liangyuanpeng, @zhy76) + +### Karmada Operator Supports Custom CRD Download Strategy + +CRD (Custom Resource Definition) resources are crucial prerequisite resources used by the Karmada operator for provisioning a new Karmada instance. +This release `Karmada-Operator` Supports Custom CRD Download Strategy. With this, users can specify the download path for CRD resources and define more download strategies, providing richer and more configurable CRD download capabilities. + +For a detailed description of this feature, see the [Proposal: Custom CRD Download Strategy Support for Karmada Operator](https://github.com/karmada-io/karmada/tree/master/docs/proposals/operator-custom-crd-download-strategy) + +(Feature contributors: @jabellard) + +## Other Notable Changes +### API Changes +- Introduced `Suspension` to the `PropagationPolicy/ClusterPropagationPolicy` API to provide a cluster-level resource propagation pause and resume capabilities. ([#4838](https://github.com/karmada-io/karmada/pull/4838), @a7i) +- Introduced `Dispatching` condition to the `Work` API to represent the dispatching status. ([#5317](https://github.com/karmada-io/karmada/pull/5317), @a7i) +- `ResourceInterpreterCustomization`: Added two additional printer columns, TARGET-API-VERSION and TARGET-KIND, to represent the target resource type, these columns will be displayed in the output of kubectl get. ([#5077](https://github.com/karmada-io/karmada/pull/5077), @a7i) +- `PropagationPolicy`/`ClusterPropagationPolicy`: Added two additional printer columns, `Conflict-Resolution` and `Priority`, to represent the conflict resolution strategy and priority, these columns will be displayed in the output of kubectl get. ([#5077](https://github.com/karmada-io/karmada/pull/5077), @a7i) +- Introduced `CRDTarball` to the `Karmada` API to supports custom CRD download strategy. ([#5185](https://github.com/karmada-io/karmada/pull/5185) @jabellard) + +### Deprecation +- The following labels that were deprecated(replaced by `propagationpolicy.karmada.io/permanent-id` and `clusterpropagationpolicy.karmada.io/permanent-id`) at release-1.10 now have been removed: + * propagationpolicy.karmada.io/namespace + * propagationpolicy.karmada.io/name + * clusterpropagationpolicy.karmada.io/name +- Specification of merics and health probe port parameters. Karmada introduced the `--metrics-bind-address` and `--health-probe-bind-address` flags and deprecated the following labels. This is a compatible change as the default values remain unchanged from previous versions. (contributors: @whitewindmills, @seanlaii, @liangyuanpeng) + * The flags deprecated by `karmada-agent` are: + --bind-address + --secure-port + * The flags deprecated by `karmada-controller-manager` are: + --bind-address + --secure-port + * The flags deprecated by `karmada-descheduler` are: + --bind-address + --secure-port + * The flags deprecated by `karmada-scheduler` are: + --bind-address + --secure-port + * The flags deprecated by `karmada-scheduler-estimator` are: + --bind-address + --secure-port + +### Bug Fixes +- `karmada-scheduler-estimator`: Fixed the `Unschedulable` result returned by plugins to be treated as an exception issue. ([#5012](https://github.com/karmada-io/karmada/pull/5012), @mszacillo) +- `karmada-controller-manager`: Fixed the issue that the cluster-status-controller overwrites the remedyActions field. ([#5030](https://github.com/karmada-io/karmada/pull/5030), @XiShanYongYe-Chang) +- `karmada-controller-manager`: Fixed the issue that the default resource interpreter doesn't accurately interpret the numbers of replicas. ([#5095](https://github.com/karmada-io/karmada/pull/5095), @whitewindmills) +- `karmada-controller-manager`: Fixed the issue of residual work in the MultiClusterService feature. ([#5188](https://github.com/karmada-io/karmada/pull/5188), @XiShanYongYe-Chang) +- `karmada-controller-manager`: Fixed the issue that status aggregation against the resource template might be missed due to slow cache sync. ([#5318](https://github.com/karmada-io/karmada/pull/5318), @chaosi-zju) +- `karmada-controller-manager`: Fixed the error of cluster status old condition update will overwrite the newest condition. ([#5227](https://github.com/karmada-io/karmada/pull/5227), @XiShanYongYe-Chang) +- `karmada-controller-manager`: work status sync when work dispatching is suspended. ([#5403](https://github.com/karmada-io/karmada/pull/5403), @a7i) +- `karmada-aggregated-apiserver`: User can append a "/" at the end when configuring the cluster's apiEndpoint. ([#5432](https://github.com/karmada-io/karmada/pull/5432), @spiritNO1) +- Correct `ClusterResourceBinding` scope in `MutatingWebhookConfiguration`. ([#5252](https://github.com/karmada-io/karmada/pull/5252), @a7i) + +### Security +- `Security Enhancement`: Introduced TLS certificate authentication mechanism to secure gRPC connections to `karmada-scheduler-estimator`. ([#5040](https://github.com/karmada-io/karmada/pull/5040), @zhzhuang-zju) + * The flags added to `karmada-scheduler-estimator` are: + --grpc-auth-cert-file + --grpc-auth-key-file + --grpc-client-ca-file + --insecure-skip-grpc-client-verify + * The flags added to `karmada-scheduler` are: + --scheduler-estimator-ca-file + --scheduler-estimator-key-file + --scheduler-estimator-cert-file + --insecure-skip-estimator-verify + * The flags added to `karmada-descheduler` are: + --scheduler-estimator-ca-file + --scheduler-estimator-key-file + --scheduler-estimator-cert-file + --insecure-skip-estimator-verify + The added filed don't require any adoption during the process of upgrading from a previous version of Karmada, but gives an optional and recommended wait to secure the gRPC connection. +- In this release, Karmada has invested significant effort in enhancing Karmada maturity based on [Clomonitor check sets](https://clomonitor.io/docs/topics/checks/). So far, we have achieved a score of [99](https://clomonitor.io/projects/cncf/karmada), and the last check (Signed releases) will be passed after 5 releases. This indicates that the Karmada community has made great strides in both security and maturity. (contributors: @zhzhuang-zju, @B1F030, @adiya7302, @Akash-Singh04, @RainbowMango) +- Introduced [SBOM](https://www.aquasec.com/cloud-native-academy/supply-chain-security/sbom/) to release assests to enhance transparency around Karmada components and dependencies, bolster the security posture of our project. The composition and usage of SBOM can refer to [SBOM DOC](https://karmada.io/docs/next/administrator/security/verify-artifacts#sbom). ([#5110](https://github.com/karmada-io/karmada/pull/5110), @zhzhuang-zju) +- Introduced [SLSA provenance file](https://slsa.dev/spec/v0.1/) to release assets, with which Karmada users verify the artifacts to prevent counterfeiting. ([#5178](https://github.com/karmada-io/karmada/pull/5178), @zhzhuang-zju) + +### Features & Enhancements +- `karmada-controller-manager`: Added work namespace/name annotation in the endpointslice resources to explain which work is associated. ([#5042](https://github.com/karmada-io/karmada/pull/5042), @XiShanYongYe-Chang) +- `karmada-controller-manager`: Mark `.status.observedGeneration` of Deployment with `.metadata.generation` only when all members' statuses are algined with its resource template generation. ([#4867](https://github.com/karmada-io/karmada/pull/4867), @veophi) +- `karmada-controller-manager`: Mark `.status.observedGeneration` of Kustomization with `.metadata.generation` only when all members' statuses are algined with its resource template generation. ([#5084](https://github.com/karmada-io/karmada/pull/5084), @yike21) +- `karmada-controller-manager`: Mark `.status.observedGeneration` of StatefulSet with `.metadata.generation` only when all members' statuses are algined with its resource template generation. ([#5094](https://github.com/karmada-io/karmada/pull/5094), @zhy76) +- `karmada-controller-manager`: Mark `.status.observedGeneration` of GitRepository with `.metadata.generation` only when all members' statuses are algined with its resource template generation. ([#5086](https://github.com/karmada-io/karmada/pull/5086), @yike21) +- `karmada-controller-manager`: Mark `.status.observedGeneration` of CloneSet with `.metadata.generation` only when all members' statuses are algined with its resource template generation. ([#5057](https://github.com/karmada-io/karmada/pull/5057), @veophi) +- `karmada-controller-manager`: Mark `.status.observedGeneration` of DaemonSet with `.metadata.generation` only when all members' statuses are algined with its resource template generation. ([#5165](https://github.com/karmada-io/karmada/pull/5165), @whitewindmills) +- `karmada-controller-manager`: Mark `.status.observedGeneration` of daemonsets.apps.kruise.io with `.metadata.generation` only when all members' statuses are algined with its resource template generation. ([#5167](https://github.com/karmada-io/karmada/pull/5167), @whitewindmills) +- `karmada-controller-manager`: Mark `.status.observedGeneration` of StatefulSet with `.metadata.generation` only when all members' statuses are algined with its resource template generation. ([#5204](https://github.com/karmada-io/karmada/pull/5204), @liangyuanpeng) +- `karmada-controller-manager`: Mark `.status.observedGeneration` of helmrepositories.source.toolkit.fluxcd.io with `.metadata.generation` only when all members' statuses are algined with its resource template generation. ([#5196](https://github.com/karmada-io/karmada/pull/5196), @yike21) +- `karmada-controller-manager`: Mark `.status.observedGeneration` of buckets.source.toolkit.fluxcd.io with `.metadata.generation` only when all members' statuses are algined with its resource template generation. ([#5193](https://github.com/karmada-io/karmada/pull/5193), @yike21) +- `karmada-controller-manager`: Mark `.status.observedGeneration` of ocirepositories.source.toolkit.fluxcd.io with `.metadata.generation` only when all members' statuses are algined with its resource template generation. ([#5197](https://github.com/karmada-io/karmada/pull/5197), @yike21) +- `karmada-controller-manager`: Mark `.status.observedGeneration` of helmcharts.source.toolkit.fluxcd.io with `.metadata.generation` only when all members' statuses are algined with its resource template generation. ([#5194](https://github.com/karmada-io/karmada/pull/5194), @yike21) +- `karmada-controller-manager`: Mark `.status.observedGeneration` of helmreleases.helm.toolkit.fluxcd.io with `.metadata.generation` only when all members' statuses are algined with its resource template generation. ([#5311](https://github.com/karmada-io/karmada/pull/5311), @yike21) +- `karmada-controller-manager`: Add health probe argument `health-probe-bind-address`, and deprecate `--bind-address` and `--secure-port` flag. ([#5290](https://github.com/karmada-io/karmada/pull/5290), @seanlaii) +- `karmadactl`: Renamed join command options from --host-as-* to --karmada-as-*. ([#5099](https://github.com/karmada-io/karmada/pull/5099), @grosser) +- `karmadactl`: Expose the metrics port for PodMonitor. ([#5169](https://github.com/karmada-io/karmada/pull/5169), @whitewindmills) +- `karmadactl`: Add the reserved label `karmada.io/system` to resources created by the `join` command. ([#4620](https://github.com/karmada-io/karmada/pull/4620), @a7i) +- `karmadactl`: Introduced `--ca-cert-file` and `--ca-key-file` flags to `init` command to specify the root CA which will be used to issue the certificate for components. ([#5127](https://github.com/karmada-io/karmada/pull/5127), @guozheng-shen) +- `karmadactl`: The `get` command can show Karmada resources now. ([#5254](https://github.com/karmada-io/karmada/pull/5254), @hulizhe) +- `karmadactl`: The `describe` command can show details of Karmada resources now. ([#5392](https://github.com/karmada-io/karmada/pull/5392), @hulizhe) +- `karmadactl`: The `exec` command can execute a command in a Karmada container now. ([#5398](https://github.com/karmada-io/karmada/pull/5398), @hulizhe) +- `karmadactl`: add new command `top node` to display resource (CPU/memory) usage of nodes in member clusters. ([#4224](https://github.com/karmada-io/karmada/pull/4224), @zhzhuang-zju) +- `karmadactl`: add new command `create` to create a resource from a file or from stdin in Karmada control plane. ([#5399](https://github.com/karmada-io/karmada/pull/5399), @hulizhe) +- `karmadactl`: add new command `attach` to attach to a running container in Karmada control plane or a member cluster. ([#5395](https://github.com/karmada-io/karmada/pull/5395), @hulizhe) +- `karmadactl`: add new command `api-resources` to print the supported API resources on the server in Karmada control plane or a member cluster. ([#5394](https://github.com/karmada-io/karmada/pull/5394), @hulizhe) +- `karmadactl`: add new command `api-versions` to print the supported API versions on the server in Karmada control plane or a member cluster. ([#5394](https://github.com/karmada-io/karmada/pull/5394), @hulizhe) +- `karmadactl`: add new command `karmadactl explain` to describe fields and structure of various resources in Karmada control plane or a member cluster. ([#5393](https://github.com/karmada-io/karmada/pull/5393), @hulizhe) +- `karmadactl`: add new command `karmadactl delete` to delete resources. ([#5431](https://github.com/karmada-io/karmada/pull/5431), @zhzhuang-zju) +- `karmadactl`: add new command `karmadactl edit` to edit a resource on the server. ([#5434](https://github.com/karmada-io/karmada/pull/5434), @zhzhuang-zju) +- `karmadactl`: add new command `label` to update the labels on a resource. ([#5453](https://github.com/karmada-io/karmada/pull/5453), @zhzhuang-zju) +- `karmadactl`: add new command `annotate` to update the annotations on a resource. ([#5458](https://github.com/karmada-io/karmada/pull/5458), @zhzhuang-zju) +- `karmadactl`: add new command `patch` to update fields of a resource. ([#5463](https://github.com/karmada-io/karmada/pull/5463), @zhzhuang-zju) +- `karmada-operator`: Introduced `--metrics-bind-address` and `--health-probe-bind-address` flags, it's a compatible change as the default value does not change from previous versions. ([#5174](https://github.com/karmada-io/karmada/pull/5174), @whitewindmills) +- `karmada-operator`: Introduced CRD download strategy that allows downloading CRD from a private source. ([#5185](https://github.com/karmada-io/karmada/pull/5185), @jabellard) +- `karmada-scheduler`: GroupClusters will sort clusters by score and availableReplica count. ([#5144](https://github.com/karmada-io/karmada/pull/5144), @mszacillo) +- `karmada-scheduler`: Add health probe argument `health-probe-bind-address` and metrics argument `metrics-bind-address`. Deprecate `--bind-address` and `--secure-port` flags. ([#5437](https://github.com/karmada-io/karmada/pull/5437), @liangyuanpeng) +- `karmada-webhook`: changed "app" label from mutating-config/validating-config to karmada-webhook to make them identifiyable. ([#5246](https://github.com/karmada-io/karmada/pull/5246), @grosser) +- `karmada-webhook`: Remove the limit of 63 name lengths with PropagationPolicy/ClusterPropagationPolicy resource. ([#5029](https://github.com/karmada-io/karmada/pull/5029), @XiShanYongYe-Chang) +- `karmada-agent`: Add health probe argument `health-probe-bind-address`, deprecate `--bind-address` and `--secure-port` flag. ([#5223](https://github.com/karmada-io/karmada/pull/5223), @whitewindmills) +- `karmada-scheduler-estimator`: Add health probe argument `health-probe-bind-address` and metrics argument `metrics-bind-address`. Deprecate `--bind-address` and `--secure-port` flags. ([#5273](https://github.com/karmada-io/karmada/pull/5273), @seanlaii) +- `karmada-descheduler`: Add health probe argument `health-probe-bind-address` and metrics argument `metrics-bind-address`. Deprecate `--bind-address` and `--secure-port` flags. ([#5435](https://github.com/karmada-io/karmada/pull/5435), @whitewindmills) +- Expose the metrics port for the karmada-controller-manager, scheduler态agent态karmada-webhook态descheduler and scheduler-estimator in local-up-karmada. ([#5428](https://github.com/karmada-io/karmada/pull/5428), @dzcvxe) +- Expose the metrics port for the karmada-controller-manager, scheduler态karmada-webhook and descheduler in operator installation. ([#5465](https://github.com/karmada-io/karmada/pull/5465), @chaosi-zju) +- Expose the default port for the karmada-controller-manager, scheduler and agent when creating a PodMonitor. ([#5139]https://github.com/karmada-io/karmada/pull/5139, wangxf1987) +- Added generic handling of priorityclass and namespace for default flinkdeployment interpreter. ([#5215](https://github.com/karmada-io/karmada/pull/5215), @mszacillo) +- add karmada.io/system=true label to newly created karmada-es-* namespaces. ([#5243](https://github.com/karmada-io/karmada/pull/5243), @grosser) +- add karmada.io/system=true label to internally created karmada cluster-roles and cluster-role-bindings. ([#5281](https://github.com/karmada-io/karmada/pull/5281), @grosser) +- cluster-level resource propagation pause and resume capabilities. ([#4838](https://github.com/karmada-io/karmada/pull/4838), @a7i) +- Adding FlinkDeployment v1beta1 CRD to supported third party resource customizatons. ([#5023](https://github.com/karmada-io/karmada/pull/5023), @mszacillo) + +## Other +### Dependencies +- Karmada is now built with Go1.22.4. ([#5015](https://github.com/karmada-io/karmada/pull/5015), @grosser) +- karmada-apiserver and kube-controller-manager is using v1.28.9 by default. ([#5065](https://github.com/karmada-io/karmada/pull/5065), @liangyuanpeng) +- The base image `alpine` now has been promoted from `alpine:3.20.0` to `alpine:3.20.1`. +- karmada-apiserver and kube-controller-manager is using v1.29.6 by default. ([#5209](https://github.com/karmada-io/karmada/pull/5209), @liangyuanpeng) +- The base image `alpine` now has been promoted from `alpine:3.20.1` to `alpine:3.20.2`. +- Karmada is now built with Go1.22.6. ([#5335](https://github.com/karmada-io/karmada/pull/5335), @RainbowMango) + +### Helm Charts +- helm install karmada components in order to reduce components crashing during the installation. ([#5010](https://github.com/karmada-io/karmada/pull/5010) @chaosi-zju) +- set karmada-metrics-adapter image pull policy to karmadaImagePullPolicy, in order to keep the same as other components. ([#5113](https://github.com/karmada-io/karmada/pull/5113), @chaosi-zju) +- expose metrics port for helm installation. ([#5168](https://github.com/karmada-io/karmada/pull/5168), @whitewindmills) +- ignore the static-resource Pod in the post-install check. ([#5369](https://github.com/karmada-io/karmada/pull/5369), @iawia002) +- Support custom cluster service CIDR in the helm chart. ([#5379](https://github.com/karmada-io/karmada/pull/5379), @iawia002) +- Fix the creation condition of metrics-adapter related APIService. ([#5378](https://github.com/karmada-io/karmada/pull/5378), @iawia002) +- fix controller can't restart in helm for dependent secret not found. ([#5305](https://github.com/karmada-io/karmada/pull/5305), @chaosi-zju) +- automatically clean up the static-resource Job after it completes. ([#5442](https://github.com/karmada-io/karmada/pull/5442), @iawia002) + +### Instrumentation +- `karmada-controller-manager`: add metrics `recreate_resource_to_cluster` and `update_resource_to_cluster` for recreate/update resource event when sync work status. ([#5247](https://github.com/karmada-io/karmada/pull/5247), @chaosi-zju) + +## Contributors +Thank you to everyone who contributed to this release! + +Users whose commits are in this release (alphabetically by username) + +- @08AHAD +- @a7i +- @aditya7302 +- @Affan-7 +- @Akash-Singh04 +- @anujagrawal699 +- @B1F030 +- @chaosi-zju +- @dzcvxe +- @grosser +- @guozheng-shen +- @hulizhe +- @iawia002 +- @mohamedawnallah +- @mszacillo +- @NishantBansal2003 +- @jabellard +- @khanhtc1202 +- @liangyuanpeng +- @qinguoyi +- @RainbowMango +- @renxiangyu_yewu +- @seanlaii +- @spiritNO1 +- @tiansuo114 +- @varshith257 +- @veophi +- @wangxf1987 +- @whitewindmills +- @xiaoloongfang +- @XiShanYongYe-Chang +- @xovoxy +- @Yash Pandey +- @yike21 +- @zhy76 +- @zhzhuang-zju diff --git a/docs/CHANGELOG/CHANGELOG-1.12.md b/docs/CHANGELOG/CHANGELOG-1.12.md new file mode 100644 index 000000000000..9fabe2bf4872 --- /dev/null +++ b/docs/CHANGELOG/CHANGELOG-1.12.md @@ -0,0 +1,150 @@ + + +**Table of Contents** *generated with [DocToc](https://github.com/thlorenz/doctoc)* + +- [v1.12.0-beta.0](#v1120-beta0) + - [Downloads for v1.12.0-beta.0](#downloads-for-v1120-beta0) + - [Changelog since v1.12.0-alpha.1](#changelog-since-v1120-alpha1) + - [Urgent Update Notes](#urgent-update-notes) + - [Changes by Kind](#changes-by-kind) + - [API Changes](#api-changes) + - [Features & Enhancements](#features--enhancements) + - [Deprecation](#deprecation) + - [Bug Fixes](#bug-fixes) + - [Security](#security) + - [Other](#other) + - [Dependencies](#dependencies) + - [Helm Charts](#helm-charts) + - [Instrumentation](#instrumentation) +- [v1.12.0-alpha.1](#v1120-alpha1) + - [Downloads for v1.12.0-alpha.1](#downloads-for-v1120-alpha1) + - [Changelog since v1.11.0](#changelog-since-v1110) + - [Urgent Update Notes](#urgent-update-notes-1) + - [Changes by Kind](#changes-by-kind-1) + - [API Changes](#api-changes-1) + - [Features & Enhancements](#features--enhancements-1) + - [Deprecation](#deprecation-1) + - [Bug Fixes](#bug-fixes-1) + - [Security](#security-1) + - [Other](#other-1) + - [Dependencies](#dependencies-1) + - [Helm Charts](#helm-charts-1) + - [Instrumentation](#instrumentation-1) + + + +# v1.12.0-beta.0 +## Downloads for v1.12.0-beta.0 + +Download v1.12.0-beta.0 in the [v1.12.0-beta.0 release page](https://github.com/karmada-io/karmada/releases/tag/v1.12.0-beta.0). + +## Changelog since v1.12.0-alpha.1 + +## Urgent Update Notes + +## Changes by Kind + +### API Changes +- Introduced `SecretRef` to `Karmada` API as part of the configuration for connecting to an external etcd cluster can be used to reference a secret that contains credentials for connecting to an external etcd cluster. ([#5699](https://github.com/karmada-io/karmada/pull/5699), @jabellard) + +### Features & Enhancements +- Standardize the naming of karmada secrets in local up installation method. ([#5423](https://github.com/karmada-io/karmada/pull/5423), @chaosi-zju) +- `karmada-scheduler-estimator`: grpc connection adds the support for custom DNS Domain. ([#5472](https://github.com/karmada-io/karmada/pull/5472), @zhzhuang-zju) +- `karmada-operator`: The new `SecretRef` field added as part of the configuration for connecting to an external etcd cluster can be used to reference a secret that contains credentials for connecting to an external etcd cluster. ([#5699](https://github.com/karmada-io/karmada/pull/5699), @jabellard) +- `karmada-operator`: Adds one-click script to install a Karmada instance through the `karmada-operator`. ([#5519](https://github.com/karmada-io/karmada/pull/5519), @zhzhuang-zju) +- `karmada-controller-manager`: keep preserveResourcesOnDeletion of the dependent resource consistent with that of the primary resource. ([#5717](https://github.com/karmada-io/karmada/pull/5717), @XiShanYongYe-Chang) +- `karmada-controller-manager`: set conflictResolution for dependent resources. ([#4418](https://github.com/karmada-io/karmada/pull/4418), @@chaunceyjiang) +- `karmadactl`: `karmadactl init` supports deployment through configuration files. ([#5357](https://github.com/karmada-io/karmada/pull/5357), @tiansuo114) +- `karmadactl`: new command `karmadactl unregister` supports unregister a pull mode cluster. ([#5626](https://github.com/karmada-io/karmada/pull/5626), @wulemao) +- `karmada-scheduler`: implement group score calculation instead of take the highest score of clusters. ([#5621](https://github.com/karmada-io/karmada/pull/5621), @ipsum-0320) + +### Deprecation +- `ExternalEtcd.CAData`, `ExternalEtcd.CertData` and `ExternalEtcd.KeyData` in `Karmada` API are deprecated and will be removed in a future version. Use SecretRef for providing client connection credentials. ([#5699](https://github.com/karmada-io/karmada/pull/5699), @jabellard) + +### Bug Fixes +- `karmada-scheduler`: Fixed unexpected modification of original `ResourceSummary` due to lack of deep copy. ([#5685](https://github.com/karmada-io/karmada/pull/5685), @LivingCcj) +- `karmada-scheduler`: Fixes an issue where resource model grades were incorrectly matched based on resource requests. Now only grades that can provide sufficient resources will be selected. ([#5706](https://github.com/karmada-io/karmada/pull/5706), @RainbowMango) +- `karmada-search`: Modify the logic of checking whether the resource is registered when selecting the plugin. ([#5662](https://github.com/karmada-io/karmada/pull/5662), @yanfeng1992) + +### Security +- `karmada-operator`: minimize the rbac permissions for karmada-operator. ([#5586](https://github.com/karmada-io/karmada/pull/5586), @B1F030) +- `local up`: add CA data to generated karmada config for enhanced security. ([#5739](https://github.com/karmada-io/karmada/pull/5739), @chaosi-zju) + +## Other +### Dependencies + +### Helm Charts + +### Instrumentation + +# v1.12.0-alpha.1 +## Downloads for v1.12.0-alpha.1 + +Download v1.12.0-alpha.1 in the [v1.12.0-alpha.1 release page](https://github.com/karmada-io/karmada/releases/tag/v1.12.0-alpha.1). + +## Changelog since v1.11.0 + +## Urgent Update Notes + +## Changes by Kind + +### API Changes +- Introduced `extraVolumes` and `extraVolumemounts` to the `Karmada` API to optionally specify extra volumes and volume mounts for the Karmada API server component. ([#5509](https://github.com/karmada-io/karmada/pull/5509), @jabellard) +- Introduced a new condition `CompleteAPIEnablements` to represent api collection status of clusters. ([#5400](https://github.com/karmada-io/karmada/pull/5400), @whitewindmills) +- Introduced `PreserveResourcesOnDeletion` field to both PropagationPolicy and ClusterPropagationPolicy API, which provides the ability to roll back migration safely. ([#5575](https://github.com/karmada-io/karmada/pull/5575), @RainbowMango) +- API Change: Introduced `FieldOverrider` to both OverridePolicy and ClusterOverridePolicy, which provides the ability to override structured data nested in manifest like ConfigMap or Secret. ([#5581](https://github.com/karmada-io/karmada/pull/5581), @RainbowMango) + +### Features & Enhancements +- implement preserveResourcesOnDeletion to support migration rollback. ([#5597](https://github.com/karmada-io/karmada/pull/5597), @a7i) +- Introduced `FieldOverrider` for overriding values in JSON and YAML. ([#5591](https://github.com/karmada-io/karmada/pull/5591), @sophiefeifeifeiya) +- standardize the naming of karmada config in local up installation method. ([#5679](https://github.com/karmada-io/karmada/pull/5679), @chaosi-zju) +- `karmadactl`: Implementing autocompletion for karmadactl to save a lot of typing. ([#5533](https://github.com/karmada-io/karmada/pull/5533), @zhzhuang-zju) +- `karmadactl`: Added shorthand letter `s` to 'operation-scope' flags across commands. ([#5483](https://github.com/karmada-io/karmada/pull/5483), @ahorine) +- `karmadactl`: `karmadactl init` support multiple label selection ability with flag `EtcdNodeSelectorLabels`. ([#5321](https://github.com/karmada-io/karmada/pull/5321), @tiansuo114) +- `karmadactl`: set `PreserveResourcesOnDeletion` by default in auto-created propagation policy during promotion process. ([#5601](https://github.com/karmada-io/karmada/pull/5601), #wulemao) +- `karmada-sheduler`: The `scheduler-estimator-service-namespace` flag is introduced, which can be used to explicitly specify the namespace that should be used to discover scheduler estimator services. For backwards compatibility, when not explicitly set, the default value of `karmada-system` is retained. ([#5478](https://github.com/karmada-io/karmada/pull/5478), @jabellard) +- `karmada-desheduler`: The `scheduler-estimator-service-namespace` flag is introduced, which can be used to explicitly specify the namespace that should be used to discover scheduler estimator services. For backwards compatibility, when not explicitly set, the default value of `karmada-system` is retained. ([#5478](https://github.com/karmada-io/karmada/pull/5478), @jabellard) +- `karmada-controller-manager`: The health status of resources without ResourceInterpreter customization will be treated as healthy by default. ([#5530](https://github.com/karmada-io/karmada/pull/5530), @a7i) +- `karmada-webhook`: validate fieldOverrider operation. ([#5671](https://github.com/karmada-io/karmada/pull/5671), @chaunceyjiang) + +### Deprecation +- The following flags have been deprecated from release `v1.11.0` and now have been removed: + * `karmada-agent`: ([#5548](https://github.com/karmada-io/karmada/pull/5548), @whitewindmills) + --bind-address + --secure-port + * `karmada-controller-manager`: ([#5549](https://github.com/karmada-io/karmada/pull/5549), @whitewindmills) + --bind-address + --secure-port + * `karmada-scheduler-estimator`: ([#5555](https://github.com/karmada-io/karmada/pull/5555), @seanlaii) + --bind-address + --secure-port + * `karmada-scheduler`: ([#5551](https://github.com/karmada-io/karmada/pull/5551), @chaosi-zju) + --bind-address + --secure-port + * `karmada-descheduler`: ([#5552](https://github.com/karmada-io/karmada/pull/5552), @chaosi-zju) + --bind-address + --secure-port + +### Bug Fixes +- `karmada-operator`: Fixed the issue where the manifests for the `karmada-scheduler` and `karmada-descheduler` components were not parsed correctly. ([#5546](https://github.com/karmada-io/karmada/pull/5546), @jabellard) +- `karmada-operator`: Fixed `system:admin` can not proxy to member cluster issue. ([#5572](https://github.com/karmada-io/karmada/pull/5572), @chaosi-zju) +- `karmada-aggregate-apiserver`: limit aggregate apiserver http method to get. User can modify member cluster's object with * in aggregated apiserver url. ([#5430](https://github.com/karmada-io/karmada/pull/5430), @spiritNO1) +- `karmada-scheduler`: skip the filter if the cluster is already in the list of scheduling result even if the API is missed. ([#5216](https://github.com/karmada-io/karmada/pull/5216), @yanfeng1992) +- `karmada-controller-manager`: Ignored StatefulSet Dependencies with PVCs created via the VolumeClaimTemplates. ([#5568](https://github.com/karmada-io/karmada/pull/5568), @jklaw90) +- `karmada-controller-manager`: Clean up the residual annotations when resources are preempted by pp from cpp. ([#5563](https://github.com/karmada-io/karmada/pull/5563), @zhzhuang-zju) +- `karmada-controller-manager`: Fixed an issue that policy claim metadata might be lost during the rapid deletion and creation of PropagationPolicy(s)/ClusterPropagationPolicy(s). ([#5319](https://github.com/karmada-io/karmada/pull/5319), @zhzhuang-zju) +- `karmadactl`ļ¼šFixed the issue where commands `create`, `annotate`, `delete`, `edit`, `label`, and `patch` cannot specify the namespace flag. ([#5487](https://github.com/karmada-io/karmada/pull/5487), @zhzhuang-zju) +- `karmadactl`: Fixed the issue that karmadactl addon failed to install karmada-scheduler-estimator due to unknown flag. ([#5523](https://github.com/karmada-io/karmada/pull/5523), @chaosi-zju) + +### Security + +## Other +### Dependencies +- `karmada-apiserver` and `kube-controller-manager` is using v1.30.4 by default. ([#5515](https://github.com/karmada-io/karmada/pull/5515), @liangyuanpeng) +- The base image `alpine` now has been promoted from `alpine:3.20.2` to `alpine:3.20.3`. +- Karmada now using Golang v1.22.7. ([#5529](https://github.com/karmada-io/karmada/pull/5529), @yelshall) + +### Helm Charts +- `Helm chart`: Added helm index for v1.10.0 and v1.11.0 release. ([#5579](https://github.com/karmada-io/karmada/pull/5579), @chaosi-zju) + +### Instrumentation diff --git a/docs/CHANGELOG/CHANGELOG-1.9.md b/docs/CHANGELOG/CHANGELOG-1.9.md index 998f12b3a027..2fa628ef3374 100644 --- a/docs/CHANGELOG/CHANGELOG-1.9.md +++ b/docs/CHANGELOG/CHANGELOG-1.9.md @@ -2,42 +2,54 @@ **Table of Contents** *generated with [DocToc](https://github.com/thlorenz/doctoc)* -- [v1.9.6](#v196) - - [Downloads for v1.9.6](#downloads-for-v196) - - [Changelog since v1.9.5](#changelog-since-v195) +- [v1.9.8](#v198) + - [Downloads for v1.9.8](#downloads-for-v198) + - [Changelog since v1.9.7](#changelog-since-v197) - [Changes by Kind](#changes-by-kind) - [Bug Fixes](#bug-fixes) - [Others](#others) -- [v1.9.5](#v195) - - [Downloads for v1.9.5](#downloads-for-v195) - - [Changelog since v1.9.4](#changelog-since-v194) +- [v1.9.7](#v197) + - [Downloads for v1.9.7](#downloads-for-v197) + - [Changelog since v1.9.6](#changelog-since-v196) - [Changes by Kind](#changes-by-kind-1) - [Bug Fixes](#bug-fixes-1) - [Others](#others-1) -- [v1.9.4](#v194) - - [Downloads for v1.9.4](#downloads-for-v194) - - [Changelog since v1.9.3](#changelog-since-v193) +- [v1.9.6](#v196) + - [Downloads for v1.9.6](#downloads-for-v196) + - [Changelog since v1.9.5](#changelog-since-v195) - [Changes by Kind](#changes-by-kind-2) - [Bug Fixes](#bug-fixes-2) - [Others](#others-2) -- [v1.9.3](#v193) - - [Downloads for v1.9.3](#downloads-for-v193) - - [Changelog since v1.9.2](#changelog-since-v192) +- [v1.9.5](#v195) + - [Downloads for v1.9.5](#downloads-for-v195) + - [Changelog since v1.9.4](#changelog-since-v194) - [Changes by Kind](#changes-by-kind-3) - [Bug Fixes](#bug-fixes-3) - [Others](#others-3) -- [v1.9.2](#v192) - - [Downloads for v1.9.2](#downloads-for-v192) - - [Changelog since v1.9.1](#changelog-since-v191) +- [v1.9.4](#v194) + - [Downloads for v1.9.4](#downloads-for-v194) + - [Changelog since v1.9.3](#changelog-since-v193) - [Changes by Kind](#changes-by-kind-4) - [Bug Fixes](#bug-fixes-4) - [Others](#others-4) -- [v1.9.1](#v191) - - [Downloads for v1.9.1](#downloads-for-v191) - - [Changelog since v1.9.0](#changelog-since-v190) +- [v1.9.3](#v193) + - [Downloads for v1.9.3](#downloads-for-v193) + - [Changelog since v1.9.2](#changelog-since-v192) - [Changes by Kind](#changes-by-kind-5) - [Bug Fixes](#bug-fixes-5) - [Others](#others-5) +- [v1.9.2](#v192) + - [Downloads for v1.9.2](#downloads-for-v192) + - [Changelog since v1.9.1](#changelog-since-v191) + - [Changes by Kind](#changes-by-kind-6) + - [Bug Fixes](#bug-fixes-6) + - [Others](#others-6) +- [v1.9.1](#v191) + - [Downloads for v1.9.1](#downloads-for-v191) + - [Changelog since v1.9.0](#changelog-since-v190) + - [Changes by Kind](#changes-by-kind-7) + - [Bug Fixes](#bug-fixes-7) + - [Others](#others-7) - [v1.9.0](#v190) - [Downloads for v1.9.0](#downloads-for-v190) - [What's New](#whats-new) @@ -47,7 +59,7 @@ - [Other Notable Changes](#other-notable-changes) - [API Changes](#api-changes) - [Deprecation](#deprecation) - - [Bug Fixes](#bug-fixes-6) + - [Bug Fixes](#bug-fixes-8) - [Security](#security) - [Features & Enhancements](#features--enhancements) - [Other](#other) @@ -58,6 +70,36 @@ +# v1.9.8 +## Downloads for v1.9.8 + +Download v1.9.8 in the [v1.9.8 release page](https://github.com/karmada-io/karmada/releases/tag/v1.9.8). + +## Changelog since v1.9.7 +### Changes by Kind +#### Bug Fixes +- `karmada-aggregated-apiserver`: User can append a "/" at the end when configuring the cluster's apiEndpoint. ([#5557](https://github.com/karmada-io/karmada/pull/5557), @spiritNO1) +- `karmada-controller-manager`: Ignored StatefulSet Dependencies with PVCs created via the VolumeClaimTemplates. ([#5688](https://github.com/karmada-io/karmada/pull/5688), @seanlaii) +- `karmada-scheduler`: Fixed unexpected modification of original `ResourceSummary` due to lack of deep copy. ([#5726](https://github.com/karmada-io/karmada/pull/5726), @RainbowMango) +- `karmada-scheduler`: Fixes an issue where resource model grades were incorrectly matched based on resource requests. Now only grades that can provide sufficient resources will be selected. ([#5730](https://github.com/karmada-io/karmada/pull/5730), @RainbowMango) +- `karmada-search`: Modify the logic of checking whether the resource is registered when selecting the plugin. ([#5735](https://github.com/karmada-io/karmada/pull/5735), @seanlaii) + +#### Others +None. + +# v1.9.7 +## Downloads for v1.9.7 + +Download v1.9.7 in the [v1.9.7 release page](https://github.com/karmada-io/karmada/releases/tag/v1.9.7). + +## Changelog since v1.9.6 +### Changes by Kind +#### Bug Fixes +- `karmada-controller-manager`: Fixed the error of cluster status old condition update will overwrite the newest condition. ([#5402](https://github.com/karmada-io/karmada/pull/5402), @XiShanYongYe-Chang) + +#### Others +- The base image `alpine` now has been promoted from `alpine:3.20.2` to `alpine:3.20.3`. + # v1.9.6 ## Downloads for v1.9.6 diff --git a/docs/proposals/cleanup-propagated-resources/README.md b/docs/proposals/cleanup-propagated-resources/README.md index 36dec6990c87..cadc0c5fa613 100644 --- a/docs/proposals/cleanup-propagated-resources/README.md +++ b/docs/proposals/cleanup-propagated-resources/README.md @@ -75,7 +75,7 @@ In kubefed's propose, the author suggest add a `BestEffort` strategy, reviewers #### Needless Strategy -Not need cleanup propagated resources when unjoining cluster. `Karmada` should use this strategy as default value, condsider the business risk. +Not need cleanup propagated resources when unjoining cluster. `Karmada` should use this strategy as default value, consider the business risk. #### Required Strategy diff --git a/docs/proposals/karmada-operator/api-server-extra-volumes-volume-mounts/README.md b/docs/proposals/karmada-operator/api-server-extra-volumes-volume-mounts/README.md new file mode 100644 index 000000000000..82571b3f85a6 --- /dev/null +++ b/docs/proposals/karmada-operator/api-server-extra-volumes-volume-mounts/README.md @@ -0,0 +1,73 @@ +--- +title: Support to Specify Extra Volumes and Volume Mounts for Karmada API Server Component +authors: +- "@jabellard" +reviewers: +- "@RainbowMango" +approvers: +- "@RainbowMango" + +creation-date: 2024-07-29 + +--- + +# Support to Specify Extra Volumes and Volume Mounts for Karmada API Server Component + + +## Summary + +This proposal aims to introduce the ability to specify extra volumes and volume mounts for the Karmada API server component when creating a Karmada instance managed by the Karmada operator. +This enhancement will extend the current configuration capabilities, allowing users to meet additional requirements such as having the ability to configure encryption at rest or to configure +custom authentication webhooks. + +## Motivation +Currently, the Karmada operator allows specifying extra arguments for the Karmada API server component, but lacks support for specifying extra volumes and volume mounts for it. +This limitation hinders certain use cases that require additional configurations for security and customization. For instance, as of today, it's not possible to configure a custom authentication +webhook nor encryption at rest. + +### Goals +- Enable users to specify extra volumes and volume mounts for Karmada API server component to unlock the following use cases: + - Ability to configure a custom authentication webhook + - Ability to configure encryption at rest + +It's important to note that although this support will unlock the aforementioned use cases, given that there is a lot that can be configured for the API server component by having the ability to specify not only extra args, but +also extra volumes and volume mounts, other potential use cases requiring similar configurations are also unlocked. + + +### Non-Goals +- Introducing changes to the core functionality of the Karmada API server component. +- Overhauling the existing configuration of the Karmada API server component beyond the scope of adding extra volumes and volume mounts. + + +## Proposal +Introduce the new optional fields `ExtraVolumes` and `ExtraVolumeMounts` within the Karmada API server component configuration. + +## Design Details + +Introduce the new optional fields `ExtraVolumes` and `ExtraVolumeMounts` within Karmada API server component configuration. +With these new fields, the configuration would look as follows: +```go +// KarmadaAPIServer holds settings to kube-apiserver component of the kubernetes. +// Karmada uses it as its own apiserver in order to provide Kubernetes-native APIs. +type KarmadaAPIServer struct { + // Other existing fields + + // ExtraVolumes specifies a list of extra volumes for the API server's pod + // To fulfil the base functionality required for a functioning control plane, when provisioning a new Karmada instance, + // the operator will automatically attach volumes for the API server pod needed to configure things such as TLS, + // SA token issuance/signing and secured connection to etcd, amongst others. However, given the wealth of options for configurability, + // there are additional features (e.g., encryption at rest and custom AuthN webhook) that can be configured. ExtraVolumes, in conjunction + // with ExtraArgs and ExtraVolumeMounts can be used to fulfil those use cases. + // +optional + ExtraVolumes []corev1.Volume `json:"extraVolumes,omitempty"` + + // ExtraVolumeMounts specifies a list of extra volume mounts to be mounted into the API server's container + // To fulfil the base functionality required for a functioning control plane, when provisioning a new Karmada instance, + // the operator will automatically mount volumes into the API server container needed to configure things such as TLS, + // SA token issuance/signing and secured connection to etcd, amongst others. However, given the wealth of options for configurability, + // there are additional features (e.g., encryption at rest and custom AuthN webhook) that can be configured. ExtraVolumeMounts, in conjunction + // with ExtraArgs and ExtraVolumes can be used to fulfil those use cases. + // +optional + ExtraVolumeMounts []corev1.VolumeMount `json:"extraVolumeMounts,omitempty"` +} +``` diff --git a/docs/proposals/karmada-operator/api-server-service-status/README.md b/docs/proposals/karmada-operator/api-server-service-status/README.md new file mode 100644 index 000000000000..731bacdb6e5f --- /dev/null +++ b/docs/proposals/karmada-operator/api-server-service-status/README.md @@ -0,0 +1,102 @@ +--- +title: Add API Server Service Information to `KarmadaStatus` +authors: +- "@jabellard" +reviewers: +- "@RainbowMango" +approvers: +- "@RainbowMango" + +creation-date: 2024-09-24 + +--- + +# Add API Server Service Information to `KarmadaStatus` + +## Summary + +This proposal aims to enhance `KarmadaStatus` by adding a new field that contains the name and port of the API server service for a Karmada control plane. This change will simplify the process of referencing an API server service, +eliminating the need to infer the service name and exposed client port. This is useful for higher-level operators that may need to perform additional tasks like creating an ingress resource to configure external traffic to the service once a Karmada instance has been provisioned. + +## Motivation + +When managing a Karmada instance, referencing its API server service (e.g., for creating an ingress resource) currently requires inferring the service name and exposed port from conventions. Relying on this method is brittle since it depends on internal implementation details that may change. + +By including the API server service information directly in the `KarmadaStatus` field, operators can directly reference the service name and exposed port, improving reliability and simplifying cluster management. + +### Goals + +- Add a new field to `KarmadaStatus` to store the API server service information. +- Ensure the API server service information is accessible to operators or higher-level systems. + +### Non-Goals + +- Modify the process of how the Karmada API server service is created. +- Affect backward compatibility of the existing `KarmadaStatus`. + +## Proposal + +The proposal is to introduce a new field, `APIServerService`, in `KarmadaStatus` to capture the name and port of the Karmada API server service. This would provide a more reliable method for referencing the service when creating additional resources, like `Ingress` objects. + + +### User Stories + +#### Story 1 +As an operator, I want to provision an Ingress for the Karmada API server without needing to guess or infer the service name and port. As an example, when using the Nginx +ingress controller, to configure ingress traffic to the API server running in the host cluster, I need to create an ingress resource like the following: +```yaml +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + annotations: + nginx.ingress.kubernetes.io/backend-protocol: HTTPS + nginx.ingress.kubernetes.io/ssl-passthrough: "true" + name: demo-karmada-apiserver + namespace: demo +spec: + ingressClassName: nginx + rules: + - host: demo-demo.karmada.example.com + http: + paths: + - backend: + service: + name: demo-karmada-apiserver + port: + number: 5443 + path: / + pathType: Prefix +``` + + +#### Story 2 +As an administrator, I want to ensure the control plane's API server service is reliably discoverable by systems provisioning resources on top of Karmada. + +### Risks and Mitigations + +This change introduces minimal risk as it is backward-compatible. The new field will be optional, and systems that do not need it can safely ignore it. +Testing should focus on ensuring that the Karmada operator correctly populates the new field in `KarmadaStatus`. + +## Design Details +With the proposed changes, `KarmadaStatus` will have the following structure: + +```go +// KarmadaStatus define the most recently observed status of the Karmada. +type KarmadaStatus struct { + // APIServerService reports the location of the Karmada API server service which + // can be used by third-party applications to discover the Karmada Service, e.g. + // expose the service outside the cluster by Ingress. + // +optional + APIServerService *APIServerService `json:"apiServerService,omitempty"` +} + +// APIServerService tells the location of Karmada API server service. +// Currently, it only includes the name of the service. The namespace +// of the service is the same as the namespace of the current Karmada object. +type APIServerService struct { + // Name represents the name of the Karmada API Server service. + // +required + Name string `json:"name"` +} +``` +The Karmada operator will need to be updated to populate the `APIServerService` field during its reconciliation process. \ No newline at end of file diff --git a/docs/proposals/karmada-operator/external-etcd-credentials-ref/README.md b/docs/proposals/karmada-operator/external-etcd-credentials-ref/README.md new file mode 100644 index 000000000000..7868e420a02a --- /dev/null +++ b/docs/proposals/karmada-operator/external-etcd-credentials-ref/README.md @@ -0,0 +1,92 @@ +--- +title: Enhance `EcternalEtcd` Configuration to Support Retrieving etcd Client Credentials from a Kubernetes Secret +authors: +- "@jabellard" +reviewers: +- "@RainbowMango" +- "@zhzhuang-zju" +approvers: +- "@RainbowMango" +- "@zhzhuang-zju" + +creation-date: 2024-07-26 + +--- + +# Enhance `EcternalEtcd` Configuration to Support Retrieving etcd Client Credentials from a Kubernetes Secret + +## Summary + +This proposal aims to enhance the `EternalEtcd` configuration in the Karmada API by introducing support for loading external etcd client credentials from a Kubernetes secret. +This change will improve the security and manageability of sensitive data, such as TLS certificates and private keys by leveraging Kubernetes' native secret management capabilities. + +## Motivation + +The current implementation of the `ExternalEtcd` configuration requires the etcd client credentials, including the certificate private key, to be provided inline within the configuration. +This approach poses security risks and can be cumbersome to manage, especially in CI/CD scenarios driven by GitOps. By enabling the ability of loading those credentials from a secret, we can ensure that sensitive data is stored securely and managed more efficiently. +Additionally, this enhancement is very well-aligned with Kubernetes best practices for managing sensitive information and provides a more flexible and secure way to configure external etcd connections. + +### Goals + +- Add support for connecting to an external etcd cluster. +- Enable the use of Kubernetes secrets for storing and retrieving external etcd connection credentials. +- Improve the security of sensitive data by leveraging Kubernetes' native secret management capabilities. +- Simplify the management of external etcd configurations. + +### Non-Goals + +- Modifying the internal etcd configuration. +- Changing the existing inline configuration approach. + +## Proposal + +Introduce a new field, `SecretRef`, within the `ExternalEtcd` configuration to reference a Kubernetes secret that contains the necessary etcd connection credentials. +The existing inline configuration fields will be preserved to maintain backward compatibility. However, given that they are currently unused, it would make sense to mark them as deprecated. + +## Design Details + +Introduce the new field `SecretRef` within the `ExternalEtcd` configuration: +```go +// ExternalEtcd describes an external etcd cluster. +type ExternalEtcd struct { + // Other, existing fields + + + // SecretRef references a Kubernetes secret containing the etcd connection credentials. + // The secret must contain the following data keys: + // ca.crt: The Certificate Authority (CA) certificate data. + // tls.crt: The TLS certificate data. + // tls.key: The TLS private key data. + // +optional + SecretRef *LocalSecretReference `json:"secretRef,omitempty"` +} +``` + +Type `LocalSecretReference` already exists in the API. + +### Secret Structure + +The Kubernetes secret referenced by `SecretRef` must contain the following data keys: + +- `ca.crt`: The Certificate Authority (CA) certificate data. +- `tls.crt`: The TLS certificate data. +- `tls.key`: The TLS private key data. + +Example: +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: tenant1-etcd-credentials + namespace: tenant1 +type: Opaque +data: + ca.crt: + tls.crt: + tls.key: +``` + +### Control Plane Component Configurations + +Today, the Karmada operator stores the in-cluster etcd client credentials in a secret. That secret is then used as the source of a volume that is mounted into control plane component containers +that need to connect to etcd such as `karmada-apiserver`, `karmada-aggregated-apiserver` and `karmada-search`. The configuration for these components will be very similar when connecting to an external etcd cluster. \ No newline at end of file diff --git a/docs/proposals/karmadactl/init-configuration-file.md b/docs/proposals/karmadactl/init-configuration-file.md new file mode 100644 index 000000000000..b06cb6acb6d6 --- /dev/null +++ b/docs/proposals/karmadactl/init-configuration-file.md @@ -0,0 +1,581 @@ +--- +title: Karmadactl Supports Configuration File Method for Production-Grade Installation and Deployment +authors: + - "@tiansuo114" +reviewers: + - "@liangyuanpeng" + - "@zhzhuang-zju" + - "@XiShanYongYe-Chang" + +approvers: + - "@liangyuanpeng" + - "@zhzhuang-zju" + - "@XiShanYongYe-Chang" + +creation-date: 2024-07-29 +--- + +# Karmadactl Supports Configuration File Method for Production-Grade Installation and Deployment + +## Summary + +The `karmadactl init` command is expected to support loading deployment parameters from a configuration file, simplifying the process of production-grade installation and reducing the complexity of managing numerous command-line flags. This enhancement allows users to streamline their multi-cluster management setups by utilizing predefined configuration files, similar to Kubernetes' `kubeadm`, making it easier to customize and maintain consistent deployments across environments. + +## Motivation + +As Karmada is widely used in multi-cluster management, the current `karmadactl init` command has accumulated dozens of command-line parameters. This not only increases the learning curve for users but also reduces the convenience of usage. With the ongoing development of the community, the number and complexity of these parameters are likely to continue growing, further impacting the user experience. + +### Goals + +- Provide the ability for `karmadactl` to construct `init` command parameters from a configuration file. + +### Non-Goals +- Add additional commands for karmadactl config, such as karmadactl config --init-config to add the ability to output default initialization config to the command + +## Proposal + +I believe that the flags in `karmada init` can mainly be divided into the following categories: + +- Certificate-related Options +- Etcd-related Options +- Karmada Control Plane-related Options +- Kubernetes-related Options +- Networking Options +- Image Options + +Therefore, I think the configuration file can be designed to focus on these parts, structuring the YAML accordingly, and incorporating the existing configuration items into them. Additionally, similar to the commonly used `Extra Arg` field in Kubernetes structures, an extra field can be added to store other general fields. When designing the fields, it is advisable to refer to the field names in the kubeadm configuration file as much as possible, to facilitate user familiarity and quick adoption. + +### User Stories (Optional) + +#### User Story 1: Simplified Cluster Deployment + +As a cloud platform operations engineer, Mr. Wang is responsible for managing multiple Karmada clusters for his company. Due to the large number of parameters that need to be set for each Karmada deployment, he finds the command line too complex and prone to errors. To improve deployment efficiency, Mr. Wang wants to use a predefined configuration file and streamline the cluster deployment process using the `karmadactl init --config` command. + +Now that Karmada supports loading deployment parameters from a configuration file, Mr. Wang only needs to create a configuration file and apply the parameters, reducing the complexity of manual command-line input and ensuring consistent deployments across different environments. This way, Mr. Wang can easily reuse the same configuration file, significantly simplifying his workflow. + +### Notes/Constraints/Caveats (Optional) + +### Risks and Mitigations + +## Design Details + + + +### CLI flags changes + +This proposal proposes new flags `config`in `karmadactl init` flag set. + +| name | shorthand | default | usage | +|--------|:---------:|---------|-----------------------------------| +| config | / | "" | Karmada init --config= | + +With these flag, we will: + +- When the user has a karmadactl init file, they can use the `--config` flag to specify that `karmadactl init` should read the configuration from the specified config file. Additionally, external cmd flags can still be used. In the design, if there are overlapping parts between the two, the configuration file configuration items will take higher priority. + +#### Example Performance: + +1. ##### Using a Default Configuration File for Karmada Initialization + + Assume that the user has prepared a default `karmada-init.yaml` configuration file that contains basic parameters for Karmada deployment, such as control plane components, etcd configuration, and image repositories. The user can start the Karmada initialization process with the following command: + + ``` + karmadactl init --config karmada-init.yaml + ``` + + yaml example + + ``` + apiVersion: config.karmada.io/v1alpha1 + kind: InitConfiguration + general: + namespace: "karmada-system" + kubeConfigPath: "/etc/karmada/kubeconfig" + kubeImageTag: "v1.21.0" + privateImageRegistry: "registry.k8s.io" + port: 32443 + karmadaControlPlane: + apiServer: + replicas: 3 + etcd: + local: + replicas: 3 + ``` + + If we configure without using a configuration file and rely solely on command-line flags, the command would look like this: + + ``` + karmadactl init \ + --namespace=karmada-system \ + --kubeconfig=/etc/karmada/kubeconfig \ + --kube-image-tag=v1.21.0 \ + --private-image-registry=registry.k8s.io \ + --port=32443 \ + --karmada-apiserver-replicas=3 \ + --etcd-replicas=3 + ``` + +2. ##### Specifying a Private Image Registry for Offline Deployment + + If a user is deploying Karmada in an offline environment, they may need to pull images from an internal private image registry. In this case, the user can specify the `privateImageRegistry` parameter in the configuration file and load it using the `--config` option. + + yaml example + + ``` + apiVersion: config.karmada.io/v1alpha1 + kind: InitConfiguration + general: + namespace: "karmada-system" + kubeConfigPath: "/etc/karmada/kubeconfig" + privateImageRegistry: "registry.company.com" + karmadaControlPlane: + apiServer: + replicas: 3 + etcd: + local: + replicas: 3 + ``` + + `karmadactl init` will pull all required Karmada images from the private image registry `registry.company.com`, ensuring that the deployment can proceed smoothly even in an offline or restricted network environment. + +### Implementation of Loading Configuration File + +Based on my classification of dozens of flags in the current `karmadactl init` command, I have designed the following data structure: + +```go +// KarmadaInitConfig defines the configuration for initializing Karmada +type KarmadaInitConfig struct { + metav1.TypeMeta `json:",inline" yaml:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" yaml:"metadata,omitempty"` + + // Spec defines the desired state for initializing Karmada + // +optional + Spec KarmadaInitSpec `json:"spec,omitempty" yaml:"spec,omitempty"` +} + +// KarmadaInitSpec is the specification part of KarmadaInitConfig, containing all configurable options +type KarmadaInitSpec struct { + // Certificates configures the certificate information required by Karmada + // +optional + Certificates Certificates `json:"certificates,omitempty" yaml:"certificates,omitempty"` + + // Etcd configures the information of the Etcd cluster + // +optional + Etcd Etcd `json:"etcd,omitempty" yaml:"etcd,omitempty"` + + // ExternalEtcd configures the information of an external Etcd cluster + // +optional + ExternalEtcd *ExternalEtcd `json:"externalEtcd,omitempty" yaml:"externalEtcd,omitempty"` + + // HostCluster configures the information of the host cluster + // +optional + HostCluster HostCluster `json:"hostCluster,omitempty" yaml:"hostCluster,omitempty"` + + // Images configures image-related information + // +optional + Images Images `json:"images,omitempty" yaml:"images,omitempty"` + + // Components configures information about Karmada components + // +optional + Components KarmadaComponents `json:"components,omitempty" yaml:"components,omitempty"` + + // Networking configures network-related information + // +optional + Networking Networking `json:"networking,omitempty" yaml:"networking,omitempty"` + + // KarmadaDataPath configures the data directory for Karmada + // +optional + KarmadaDataPath string `json:"karmadaDataPath,omitempty" yaml:"karmadaDataPath,omitempty"` + + // KarmadaPkiPath configures the PKI directory for Karmada + // +optional + KarmadaPkiPath string `json:"karmadaPkiPath,omitempty" yaml:"karmadaPkiPath,omitempty"` + + // WaitComponentReadyTimeout configures the timeout (in seconds) for waiting for components to be ready + // +optional + WaitComponentReadyTimeout int `json:"waitComponentReadyTimeout,omitempty" yaml:"waitComponentReadyTimeout,omitempty"` +} + +// Certificates defines the configuration related to certificates +type Certificates struct { + // CACertFile is the path to the root CA certificate file + // +optional + CACertFile string `json:"caCertFile,omitempty" yaml:"caCertFile,omitempty"` + + // CAKeyFile is the path to the root CA key file + // +optional + CAKeyFile string `json:"caKeyFile,omitempty" yaml:"caKeyFile,omitempty"` + + // ExternalDNS is the list of external DNS names for the certificate + // +optional + ExternalDNS []string `json:"externalDNS,omitempty" yaml:"externalDNS,omitempty"` + + // ExternalIP is the list of external IPs for the certificate + // +optional + ExternalIP []string `json:"externalIP,omitempty" yaml:"externalIP,omitempty"` + + // ValidityPeriod is the validity period of the certificate + // +optional + ValidityPeriod metav1.Duration `json:"validityPeriod,omitempty" yaml:"validityPeriod,omitempty"` +} + +// Etcd defines the configuration of the Etcd cluster +type Etcd struct { + // Local indicates using a local Etcd cluster + // +optional + Local *LocalEtcd `json:"local,omitempty" yaml:"local,omitempty"` + + // External indicates using an external Etcd cluster + // +optional + External *ExternalEtcd `json:"external,omitempty" yaml:"external,omitempty"` +} + +// LocalEtcd defines the configuration of a local Etcd cluster +type LocalEtcd struct { + // CommonSettings contains common settings like image and resources + CommonSettings `json:",inline" yaml:",inline"` + + // DataPath is the data storage path for Etcd + // +optional + DataPath string `json:"dataPath,omitempty" yaml:"dataPath,omitempty"` + + // InitImage is the image for the Etcd init container + // +optional + InitImage Image `json:"initImage,omitempty" yaml:"initImage,omitempty"` + + // NodeSelectorLabels are the node selector labels for the Etcd pods + // +optional + NodeSelectorLabels map[string]string `json:"nodeSelectorLabels,omitempty" yaml:"nodeSelectorLabels,omitempty"` + + // PVCSize is the size of the PersistentVolumeClaim for Etcd + // +optional + PVCSize string `json:"pvcSize,omitempty" yaml:"pvcSize,omitempty"` + + // Replicas is the number of replicas in the Etcd cluster + // +optional + Replicas int32 `json:"replicas,omitempty" yaml:"replicas,omitempty"` + + // StorageMode is the storage mode for Etcd (e.g., emptyDir, hostPath, PVC) + // +optional + StorageMode string `json:"storageMode,omitempty" yaml:"storageMode,omitempty"` +} + +// ExternalEtcd defines the configuration of an external Etcd cluster +type ExternalEtcd struct { + // Endpoints are the server addresses of the external Etcd cluster + // +required + Endpoints []string `json:"endpoints" yaml:"endpoints"` + + // CAFile is the path to the CA certificate for the external Etcd cluster + // +optional + CAFile string `json:"caFile,omitempty" yaml:"caFile,omitempty"` + + // CertFile is the path to the client certificate for the external Etcd cluster + // +optional + CertFile string `json:"certFile,omitempty" yaml:"certFile,omitempty"` + + // KeyFile is the path to the client key for the external Etcd cluster + // +optional + KeyFile string `json:"keyFile,omitempty" yaml:"keyFile,omitempty"` + + // KeyPrefix is the key prefix used in the external Etcd cluster + // +optional + KeyPrefix string `json:"keyPrefix,omitempty" yaml:"keyPrefix,omitempty"` +} + +// HostCluster defines the configuration of the host cluster +type HostCluster struct { + // APIEndpoint is the API server address of the host cluster + // +optional + APIEndpoint string `json:"apiEndpoint,omitempty" yaml:"apiEndpoint,omitempty"` + + // Kubeconfig is the path to the kubeconfig file for the host cluster + // +optional + Kubeconfig string `json:"kubeconfig,omitempty" yaml:"kubeconfig,omitempty"` + + // Context is the context name in the kubeconfig for the host cluster + // +optional + Context string `json:"context,omitempty" yaml:"context,omitempty"` + + // Domain is the domain name of the host cluster + // +optional + Domain string `json:"domain,omitempty" yaml:"domain,omitempty"` + + // SecretRef refers to the credentials needed to access the host cluster + // +optional + SecretRef *LocalSecretReference `json:"secretRef,omitempty" yaml:"secretRef,omitempty"` +} + +// Images defines the configuration related to images +type Images struct { + // ImagePullPolicy is the pull policy for images + // +optional + ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty" yaml:"imagePullPolicy,omitempty"` + + // ImagePullSecrets are the secrets used for pulling images + // +optional + ImagePullSecrets []string `json:"imagePullSecrets,omitempty" yaml:"imagePullSecrets,omitempty"` + + // KubeImageMirrorCountry is the country code for the Kubernetes image mirror + // +optional + KubeImageMirrorCountry string `json:"kubeImageMirrorCountry,omitempty" yaml:"kubeImageMirrorCountry,omitempty"` + + // KubeImageRegistry is the registry for Kubernetes images + // +optional + KubeImageRegistry string `json:"kubeImageRegistry,omitempty" yaml:"kubeImageRegistry,omitempty"` + + // KubeImageTag is the tag for Kubernetes images + // +optional + KubeImageTag string `json:"kubeImageTag,omitempty" yaml:"kubeImageTag,omitempty"` + + // PrivateRegistry is the private image registry + // +optional + PrivateRegistry *ImageRegistry `json:"privateRegistry,omitempty" yaml:"privateRegistry,omitempty"` +} + +// KarmadaComponents defines the configuration for all Karmada components +type KarmadaComponents struct { + // Etcd is the configuration for the Etcd component + // +optional + Etcd *Etcd `json:"etcd,omitempty" yaml:"etcd,omitempty"` + + // KarmadaAPIServer is the configuration for the Karmada API Server + // +optional + KarmadaAPIServer *KarmadaAPIServer `json:"karmadaAPIServer,omitempty" yaml:"karmadaAPIServer,omitempty"` + + // KarmadaAggregatedAPIServer is the configuration for the Karmada Aggregated API Server + // +optional + KarmadaAggregatedAPIServer *KarmadaAggregatedAPIServer `json:"karmadaAggregatedAPIServer,omitempty" yaml:"karmadaAggregatedAPIServer,omitempty"` + + // KubeControllerManager is the configuration for the Kube Controller Manager + // +optional + KubeControllerManager *KubeControllerManager `json:"kubeControllerManager,omitempty" yaml:"kubeControllerManager,omitempty"` + + // KarmadaControllerManager is the configuration for the Karmada Controller Manager + // +optional + KarmadaControllerManager *KarmadaControllerManager `json:"karmadaControllerManager,omitempty" yaml:"karmadaControllerManager,omitempty"` + + // KarmadaScheduler is the configuration for the Karmada Scheduler + // +optional + KarmadaScheduler *KarmadaScheduler `json:"karmadaScheduler,omitempty" yaml:"karmadaScheduler,omitempty"` + + // KarmadaWebhook is the configuration for the Karmada Webhook + // +optional + KarmadaWebhook *KarmadaWebhook `json:"karmadaWebhook,omitempty" yaml:"karmadaWebhook,omitempty"` +} + +// Networking defines network-related configuration +type Networking struct { + // Namespace is the Kubernetes namespace where Karmada is deployed + // +optional + Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty"` + + // Port is the port number for the Karmada API Server + // +optional + Port int32 `json:"port,omitempty" yaml:"port,omitempty"` +} + +// CommonSettings defines common settings for components +type CommonSettings struct { + // Image specifies the image to use for the component + Image `json:",inline" yaml:",inline"` + + // Replicas is the number of replicas for the component + // +optional + Replicas *int32 `json:"replicas,omitempty" yaml:"replicas,omitempty"` + + // Resources defines resource requests and limits for the component + // +optional + Resources corev1.ResourceRequirements `json:"resources,omitempty" yaml:"resources,omitempty"` + + // NodeSelector defines node selection constraints + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty"` + + // Tolerations define pod tolerations + // +optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty"` + + // Affinity defines pod affinity rules + // +optional + Affinity *corev1.Affinity `json:"affinity,omitempty" yaml:"affinity,omitempty"` +} + +// Image defines image information +type Image struct { + // ImageRepository is the repository for the image + // +optional + ImageRepository string `json:"imageRepository,omitempty" yaml:"imageRepository,omitempty"` + + // ImageTag is the tag for the image + // +optional + ImageTag string `json:"imageTag,omitempty" yaml:"imageTag,omitempty"` +} + +// KarmadaAPIServer defines the configuration for the Karmada API Server +type KarmadaAPIServer struct { + CommonSettings `json:",inline" yaml:",inline"` + + // AdvertiseAddress is the address advertised by the API server + // +optional + AdvertiseAddress string `json:"advertiseAddress,omitempty" yaml:"advertiseAddress,omitempty"` + + // ServiceType is the type of service for the API server (e.g., ClusterIP, NodePort) + // +optional + ServiceType corev1.ServiceType `json:"serviceType,omitempty" yaml:"serviceType,omitempty"` + + // ServiceAnnotations are annotations added to the API server service + // +optional + ServiceAnnotations map[string]string `json:"serviceAnnotations,omitempty" yaml:"serviceAnnotations,omitempty"` +} + +// KarmadaAggregatedAPIServer defines the configuration for the Karmada Aggregated API Server +type KarmadaAggregatedAPIServer struct { + CommonSettings `json:",inline" yaml:",inline"` +} + +// KubeControllerManager defines the configuration for the Kube Controller Manager +type KubeControllerManager struct { + CommonSettings `json:",inline" yaml:",inline"` +} + +// KarmadaControllerManager defines the configuration for the Karmada Controller Manager +type KarmadaControllerManager struct { + CommonSettings `json:",inline" yaml:",inline"` +} + +// KarmadaScheduler defines the configuration for the Karmada Scheduler +type KarmadaScheduler struct { + CommonSettings `json:",inline" yaml:",inline"` +} + +// KarmadaWebhook defines the configuration for the Karmada Webhook +type KarmadaWebhook struct { + CommonSettings `json:",inline" yaml:",inline"` +} + +// LocalSecretReference is a reference to a secret within the same namespace +type LocalSecretReference struct { + // Name is the name of the referenced secret + Name string `json:"name,omitempty" yaml:"name,omitempty"` +} + +// ImageRegistry represents an image registry +type ImageRegistry struct { + // Registry is the hostname of the image registry + // +required + Registry string `json:"registry" yaml:"registry"` +} +``` + +After reading the logic, the information in the configuration file will be placed into the `InitConfiguration` structure, and then the fields will be filled into the `CommandInitOption` structure used to control the `karmadactl init` process. This way, the deployment of the Karmada control plane can proceed according to the original logic. + +For the design of the configuration file, I think it can be designed similarly to the config structure in `kubeadm init`. I will provide a YAML file example below. To provide users with a configuration file, I think we can prepare a default configuration file example and make it a sub-command in `karmadactl config`. When the feature design is complete and the official documentation is updated, an example can also be demonstrated in the official documentation. + +Example configuration file: + +```yaml +apiVersion: v1alpha1 +kind: KarmadaInitConfig +metadata: + name: karmada-init +spec: + certificates: + caCertFile: "/etc/karmada/pki/ca.crt" + caKeyFile: "/etc/karmada/pki/ca.key" + externalDNS: + - "localhost" + - "example.com" + externalIP: + - "192.168.1.2" + - "172.16.1.2" + validityPeriod: "8760h0m0s" + etcd: + local: + image: + imageRepository: "karmada/etcd" + imageTag: "latest" + dataPath: "/var/lib/karmada-etcd" + initImage: + imageRepository: "alpine" + imageTag: "3.19.1" + nodeSelectorLabels: + karmada.io/etcd: "true" + pvcSize: "5Gi" + replicas: 3 + storageMode: "PVC" + hostCluster: + apiEndpoint: "https://kubernetes.example.com" + kubeconfig: "/root/.kube/config" + context: "karmada-host" + domain: "cluster.local" + images: + imagePullPolicy: "IfNotPresent" + imagePullSecrets: + - "PullSecret1" + - "PullSecret2" + kubeImageMirrorCountry: "cn" + kubeImageRegistry: "registry.cn-hangzhou.aliyuncs.com/google_containers" + kubeImageTag: "v1.29.6" + privateRegistry: + registry: "my.private.registry" + components: + karmadaAPIServer: + image: + imageRepository: "karmada/kube-apiserver" + imageTag: "v1.29.6" + replicas: 1 + advertiseAddress: "192.168.1.100" + serviceType: "NodePort" + karmadaAggregatedAPIServer: + image: + imageRepository: "karmada/karmada-aggregated-apiserver" + imageTag: "v0.0.0-master" + replicas: 1 + kubeControllerManager: + image: + imageRepository: "karmada/kube-controller-manager" + imageTag: "v1.29.6" + replicas: 1 + karmadaControllerManager: + image: + imageRepository: "karmada/karmada-controller-manager" + imageTag: "v0.0.0-master" + replicas: 1 + karmadaScheduler: + image: + imageRepository: "karmada/karmada-scheduler" + imageTag: "v0.0.0-master" + replicas: 1 + karmadaWebhook: + image: + imageRepository: "karmada/karmada-webhook" + imageTag: "v0.0.0-master" + replicas: 1 + networking: + namespace: "karmada-system" + port: 32443 + storage: + storageClassName: "fast-storage" + karmadaDataPath: "/etc/karmada" + karmadaPkiPath: "/etc/karmada/pki" + waitComponentReadyTimeout: 120 +``` + +## Alternatives + + + + \ No newline at end of file diff --git a/docs/proposals/migration-rollback-protection/README.md b/docs/proposals/migration-rollback-protection/README.md new file mode 100644 index 000000000000..3e0f4fe01c80 --- /dev/null +++ b/docs/proposals/migration-rollback-protection/README.md @@ -0,0 +1,324 @@ +--- +title: Migration Rollback Protection +authors: +- "@CharlesQQ" +reviewers: +- "@RainbowMango" +- "@XiShanYongYe-Chang" +- "@chaosi-zju" +- "@whitewindmills" +- "@grosser" +approvers: +- "@RainbowMango" + +creation-date: 2024-07-01 + +--- + +# Migration Rollback Protection + +## Summary + +Provide a deletion strategy for federated resources, allowing users to choose whether to synchronously delete the workloads in member clusters when deleting workloads at the federation level. + +This setting is particularly useful during workload migration scenarios to ensure that rollback can occur quickly without affecting the workloads running on the member clusters. + +## Motivation + +The current behavior of the Karmada system is that when a user deletes resources from the Karmada control plane, the distributed resources in the member clusters are also deleted synchronously. However, in certain scenarios, such as workload migration scenarios, users may wish to retain the workloads in the member clusters. + +### Goals + +- Provide the capability to retain resources in member clusters when deleting control plane resources, while at the same time, clean up labels/annotations and other information attached to member cluster resources by the Karmada system. + +### Non-Goals + +- Define different resource deletion strategies for different member clusters. +- Provide the capability to retain resources in member clusters for Karmada federated resources, such as cronfederatedhpa, federatedhpa, federatedresourcequota, etc. +- Other deletion strategies, such as retaining work objects in the Karmada control plane. +- Cascading deletion control of resources in member clusters. + +## Proposal + +### User Stories (Optional) + +#### Story 1 + +As an administrator, I hope that during the process of migrating workloads to Karmada, if any unexpected situations arise, such as the cloud platform being unable to publish the application or the Pod encountering unexpected issues, it is necessary to use the rollback mechanism provided by Karmada to immediately revert to the state before the migration in order to quickly stop the loss. + +### Notes/Constraints/Caveats (Optional) + +- For resources that are not distributed through PropagationPolicy, such as namespace, it is not possible to specify a deletion policy. Unless the controller for automatic resource propagation is disabled, and users are required to propagate resources through PP (PropagationPolicy) / CPP (ClusterPropagationPolicy). +- In one policy vs multi resource scene, we can't execute delete policy just by per resource. + +### Risks and Mitigations + +## Design Details + +### Extend the fields of PropagationPolicy/ClusterPropagationPolicy + +By extending the `PropagationPolicy/ClusterPropagationPolicy` API, a new bool field `PreserveResourcesOnDeletion` is introduced. The field will be transparently transmitted to `ResourceBinding/ClusterResourceBinding` and the work object. Finally, the execution controller determines the deletion strategy based on the value of the work field. + +#### API changes + +PropagationPolicy/ClusterPropagationPolicy +```go +type PropagationSpec struct { + ... + + // PreserveResourcesOnDeletion controls whether resources should be preserved on the + // member clusters when the resource template is deleted. + // If set to true, resources will be preserved on the member clusters. + // Default is false, which means resources will be deleted along with the resource template. + // + // This setting is particularly useful during workload migration scenarios to ensure + // that rollback can occur quickly without affecting the workloads running on the + // member clusters. + // + // Additionally, this setting applies uniformly across all member clusters and will not + // selectively control preservation on only some clusters. + // + // Note: This setting does not apply to the deletion of the policy itself. + // When the policy is deleted, the resource templates and their corresponding + // propagated resources in member clusters will remain unchanged unless explicitly deleted. + // + // +optional + PreserveResourcesOnDeletion *bool `json:"preserveResourcesOnDeletion,omitempty"` +} +``` + +ResourceBinding/ClusterResourceBinding +```go +type ResourceBindingSpec struct { + ... + + // PreserveResourcesOnDeletion controls whether resources should be preserved on the + // member clusters when the binding object is deleted. + // If set to true, resources will be preserved on the member clusters. + // Default is false, which means resources will be deleted along with the binding object. + // This setting applies to all Work objects created under this binding object. + // +optional + PreserveResourcesOnDeletion *bool `json:"preserveResourcesOnDeletion,omitempty"` +} +``` + +Work +```go +// WorkSpec defines the desired state of Work. +type WorkSpec struct { + ... + + // PreserveResourcesOnDeletion controls whether resources should be preserved on the + // member cluster when the Work object is deleted. + // If set to true, resources will be preserved on the member cluster. + // Default is false, which means resources will be deleted along with the Work object. + // +optional + PreserveResourcesOnDeletion *bool `json:"preserveResourcesOnDeletion,omitempty"` +} +``` + +#### Controller logic changes + +The `detector` needs to pass the `PreserveResourcesOnDeletion` from PropagationPolicy/ClusterPropagationPolicy to ResourceBinding/ClusterResourceBinding. + +The `binding-controller` needs to pass the `PreserveResourcesOnDeletion` from ResourceBinding to Work. + +The `cluster-resource-binding-controller` needs to pass the `PreserveResourcesOnDeletion` from ClusterResourceBinding to Work. + +The `execution-controller` needs to perform resource deletion based on the `PreserveResourcesOnDeletion` field in Work. + +#### User usage example + +Set the cascade deletion policy to orphan: + +```yaml +apiVersion: policy.karmada.io/v1alpha1 +kind: PropagationPolicy +metadata: + name: nginx-propagation +spec: + resourceSelectors: + - apiVersion: apps/v1 + kind: Deployment + name: nginx + preserveResourcesOnDeletion: true +``` + +#### Q&A: + +1. The resource deletion policy of dependent resources and main resources does not force binding. + +Since dependent resources may be shared by multiple resource templates, in this case it is difficult to decide which deletion strategy should be used for the dependent resources; it is not forced to be bound to the main resource, and is left to the user to decide, with greater flexibility and scalability. + +2. Whether the workload of the member cluster only clears the `karmada.io/managed` label is enough? + +Logically, after the label `karmada.io/managed` is cleared, the relationship with karmada is broken. + +### Test Plan + +TODO + +## Alternatives + +### Extended by Annotation + +#### API changes + +A new Annotation is added for users to include on resource templates in the Karmada control plane, with the key value: `resourcetemplate.karmada.io/cascadedeletion`. To increase extensibility, the value is of the string enumeration type, and currently supported types include: +- orphan: Retain resources in member clusters and clean up labels/annotations and other information attached to member cluster resources by the Karmada system. + +When users do not specify this annotation, the system's current behavior is to synchronously delete resources in member clusters. + +#### Controller logic changes + +The `resourcetemplate.karmada.io/cascadedeletion` annotation added by users to the resource template will be propagated to `work.spec.workload.manifests`. When the resource template is deleted, the `execution-controller` will execute the logic for deleting the work object. It can parse the value of the `resourcetemplate.karmada.io/cascadedeletion` annotation from `work.spec.workload.manifests` and perform the following judgment logic: +- If the target annotation does not exist, synchronously delete the resources in the member clusters. +- If the target annotation value is `orphan`, retain the resources in the member clusters and clean up the labels/annotations and other information attached to the member cluster resources by the Karmada system. + +![resource-delete-policy](./statics/resource-delete-policy.png) + +#### User usage example + +Set the cascade deletion policy to orphan + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + annotations: + propagationpolicy.karmada.io/name: foo + propagationpolicy.karmada.io/namespace: default + resourcetemplate.karmada.io/cascadedeletion: orphan + ... +``` + +In this approach, there is also a branch idea of adding a `CascadeDeletion` field in the Work API, so there is no need to parse `work.spec.workload.manifests`. + +Work +```go +// WorkSpec defines the desired state of Work. +type WorkSpec struct { + ... + + // CascadeDeletion Declare the cascade deletion strategy. The default value is null, which is equivalent to background. + // +optional + CascadeDeletion *CascadeDeletionPolicy `json:"cascadeDeletion,omitempty"` +} +``` + +The `binding-controller` needs to set the `CascadeDeletion` field in the Work object according the resource annotation. + +The `cluster-resource-binding-controller` needs to set the `CascadeDeletion` field in the Work object according the resource annotation. + +The `execution-controller` needs to perform resource deletion based on the `CascadeDeletion` field in Work. + +> Note: For namespace resources, the `namespace-sync-controller` in the Karmada system automatically propagates each new namespace created by users to member clusters, and the system achieves this functionality by directly generating work objects. For the scheme of adding new API fields in the work, the `namespace-sync-controller` needs to be responsible for processing that field. + +#### Advantages & Disadvantages + +Disadvantages: +- Using annotations as an API is somewhat informal. + +### Extended by adding a new CRD + +A new CRD resource is added, through which users define the CR (Custom Resource) of this CRD to describe the resource deletion strategy for the target resource. + +#### API changes + +```go +type CascadeDeletionPolicy struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Spec represents the desired cascadeDeletion Behavior. + Spec CascadeDeletionSpec `json:"spec"` + + // Status represents the status of cascadeDeletion. + // +optional + Status CascadeDeletionStatus `json:"status,omitempty"` +} + +type CascadeDeletionSpec struct { + // CascadeDeletion Declare the cascade deletion strategy. The default value is null, which is equivalent to background. + // +optional + CascadeDeletion *CascadeDeletionPolicy `json:"cascadeDeletion,omitempty"` + // ResourceSelectors used to select resources. + // Nil or empty selector is not allowed and doesn't mean match all kinds + // of resources for security concerns that sensitive resources(like Secret) + // might be accidentally propagated. + // +required + // +kubebuilder:validation:MinItems=1 + ResourceSelectors []ResourceSelector `json:"resourceSelectors"` +} + +// ResourceSelector the resources will be selected. +type ResourceSelector struct { + // APIVersion represents the API version of the target resources. + // +required + APIVersion string `json:"apiVersion"` + + // Kind represents the Kind of the target resources. + // +required + Kind string `json:"kind"` + + // Namespace of the target resource. + // Default is empty, which means inherit from the parent object scope. + // +optional + Namespace string `json:"namespace,omitempty"` + + // Name of the target resource. + // Default is empty, which means selecting all resources. + // +optional + Name string `json:"name,omitempty"` + + // A label query over a set of resources. + // If name is not empty, labelSelector will be ignored. + // +optional + LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty"` +} + +type CascadeDeletionStatus struct { + ... +} +``` + +Work +```go +// WorkSpec defines the desired state of Work. + type WorkSpec struct { + // CascadeDeletion Declare the cascade deletion strategy. The default value is null, which is equivalent to background. + // +optional + CascadeDeletion *CascadeDeletionPolicy `json:"cascadeDeletion,omitempty"` + + ... +} +``` + +#### Controller logic changes + +The `binding-controller`/`cluster-resource-binding-controller` checks for the existence of a `CascadeDeletionPolicy` associated with the target resource when creating or updating the Work object. If found, the deletion policy is synchronized into the Work object. + +The `execution-controller` carries out resource deletion based on the `CascadeDeletion` field in the Work object. + +#### User usage example + +Set the cascade deletion policy to orphan: + +```yaml +apiVersion: policy.karmada.io/v1alpha1 +kind: CascadeDeletionPolicy +metadata: + name: foo +spec: + cascadeDeletion: orphan + resourceSelectors: + - apiVersion: apps/v1 + kind: Deployment + name: foo + namespace: default +``` + +#### Advantages & Disadvantages + +Disadvantages: +- It increases the learning cost for users and results in an increased number of resources in the Karmada control plane. \ No newline at end of file diff --git a/docs/proposals/migration-rollback-protection/statics/resource-delete-policy.png b/docs/proposals/migration-rollback-protection/statics/resource-delete-policy.png new file mode 100644 index 000000000000..b9e1a5fd8f78 Binary files /dev/null and b/docs/proposals/migration-rollback-protection/statics/resource-delete-policy.png differ diff --git a/docs/proposals/scheduling/policy-preemption/README.md b/docs/proposals/scheduling/policy-preemption/README.md index 978d541f1ed3..026a79e83f8e 100644 --- a/docs/proposals/scheduling/policy-preemption/README.md +++ b/docs/proposals/scheduling/policy-preemption/README.md @@ -49,7 +49,7 @@ Even if workloads have been propagated by a policy, they can be preempted by a h Cluster administrators usually cannot foresee future expansion scenarios when configuring policies. They will usually start with a broad policy to set the base strategy. When an application requires special configuration, -the administrator wants to provide a persionalized policy to take over the application. +the administrator wants to provide a personalized policy to take over the application. At this time, it hopes that the high-priority policy can preempt the low-priority policy. ### Goals @@ -257,7 +257,7 @@ metadata: namespace: default ``` -Assume that there is a high-priority policy which allows preepmtion: +Assume that there is a high-priority policy which allows preemption: ```yaml apiVersion: policy.karmada.io/v1alpha1 diff --git a/docs/proposals/scheduling/workload-rebalancer/workload-rebalancer.md b/docs/proposals/scheduling/workload-rebalancer/workload-rebalancer.md index 160d1ce80662..2cecb5df454e 100644 --- a/docs/proposals/scheduling/workload-rebalancer/workload-rebalancer.md +++ b/docs/proposals/scheduling/workload-rebalancer/workload-rebalancer.md @@ -87,7 +87,7 @@ sufficient resource to accommodate all replicas, so that the application better #### Story 4 -In disaster-recovery scenario, replicas migrated from primary cluster to backup cluster when primary cluster failue. +In disaster-recovery scenario, replicas migrated from primary cluster to backup cluster when primary cluster failure. As a cluster administrator, I hope that replicas can migrate back when cluster restored, so that: @@ -402,7 +402,7 @@ status: > 1. the `observedWorkloads` is sorted in increasing dict order of the combined string of `apiVersion/kind/namespace/name` . > 2. if workload referenced binding not found, it will be marked as `failed` without retry. > 3. if workload rebalanced failed due to occasional network error, the controller will retry, and its `result` and `reason` -> field will left empty until it succees. +> field will left empty until it succeeds. ### How to update this resource diff --git a/docs/proposals/service-discovery/README.md b/docs/proposals/service-discovery/README.md index 8a2d6a182345..1cb48158af11 100644 --- a/docs/proposals/service-discovery/README.md +++ b/docs/proposals/service-discovery/README.md @@ -162,7 +162,7 @@ With this API, we will: * Use `ServiceProvisionClusters` to specify the member clusters which will provision the service backend, if leave it empty, we will collect the backend endpoints from all clusters and sync them to the `ServiceConsumptionClusters`. * Use `ServiceConsumptionClusters` to specify the clusters where the service will be exposed. If leave it empty, the service will be exposed to all clusters. -For example, if we want access `foo`` service which are localted in member2 from member3 , we can use the following yaml: +For example, if we want access `foo`` service which are located in member2 from member3 , we can use the following yaml: ```yaml apiVersion: v1 kind: Service @@ -223,10 +223,10 @@ The process of synchronizing `EndpointSlice` from `ServiceProvisionClusters` to 1. `endpointsliceDispatch` controller will list&watch `MultiClusterService`. 1. `endpointsliceDispatch` controller will list&watch `EndpointSlice` from `MultiClusterService`'s `spec.serviceProvisionClusters`. -1. `endpointsliceDispatch` controller will creat the corresponding Work for each `EndpointSlice` in the cluster namespace of `MultiClusterService`'s `spec.serviceConsumptionClusters`. +1. `endpointsliceDispatch` controller will create the corresponding Work for each `EndpointSlice` in the cluster namespace of `MultiClusterService`'s `spec.serviceConsumptionClusters`. When creating the Work, in order to facilitate problem investigation, we should add following annotation to record the original `EndpointSlice` information: * `endpointslice.karmada.io/work-provision-cluster`: the cluster name of the original `EndpointSlice`. - Also, we should add the following annotation to the syned `EndpointSlice` record the original information: + Also, we should add the following annotation to the synced `EndpointSlice` record the original information: * `endpointslice.karmada.io/endpointslice-generation`: the resource generation of the `EndpointSlice`, it could be used to check whether the `EndpointSlice` is the newest version. * `endpointslice.karmada.io/provision-cluster`: the cluster location of the original `EndpointSlice`. 1. Karmada will sync the `EndpointSlice`'s work to the member clusters. @@ -326,7 +326,7 @@ For better monitoring, we should have following metrics: * For `multiclusterservice` controller, List&watch cluster creation/deletion, reconcile the work in corresponding cluster execution namespace. (10) * For `endpointsliceCollect` controller, List&watch mcs, collect the corresponding EndpointSlice from `serviceProvisionClusters`, and `endpointsliceDispatch` controller should sync the corresponding Work. (5d) * For `endpointsliceCollect` controller, List&watch cluster creation/deletion, reconcile the EndpointSlice's work in corresponding cluster execution namespace. (10d) -* If cluster gets unhealth, mcs-eps-controller should delete the EndpointSlice from all the cluster execution namespace. (5d) +* If cluster gets unhealthy, mcs-eps-controller should delete the EndpointSlice from all the cluster execution namespace. (5d) ### Test Plan diff --git a/docs/proposals/structured-configuration/README.md b/docs/proposals/structured-configuration/README.md new file mode 100644 index 000000000000..969c016c1f7f --- /dev/null +++ b/docs/proposals/structured-configuration/README.md @@ -0,0 +1,543 @@ +--- +title: Structured configuration overrider +authors: +- "@Patrick0308" +- "@sophiefeifeifeiya" +reviewers: +- "@chaunceyjiang" +approvers: +- "@chaunceyjiang" +creation-date: 2024-08-12 +--- + +# Structured configuration overrider +## Summary +The proposal introduces a new feature that allows users to partially override values inside JSON and YAML fields. This is achieved using JSON patch operation. This design enables users to override the values within JSON/YAML fields partially, rather than replacing a whole JSON/YAML fields with `PlaintextOverrider`. Currently, `PlaintextOverrider` applies JSON patch operations to whole fields, rather than specific values within fields, making it unsuitable for cases where users need to override individual values within those fields. + +## Motivation +### Goals ++ Allow users to override specific values inside JSON and YAML in resources (e.g, configmap). ++ Support JSON patch (ā€œaddā€, ā€œremoveā€, ā€œreplaceā€) for both JSON and YAML. +### Non-Goals ++ Support all data formats, like XML. ++ Support every operation of YAML and JSON. +## Proposal +### User Stories (Optional) +#### Story 1 +As an administrator and developer, I want to update specific values within JSON/YAML in resources to without replacing the entire configuration, ensuring that my changes are minimal and targeted. +### Notes/Constraints/Caveats (Optional) +Illustrated in YAML Implementation. +### Risks and Mitigations +## Design Details +### API Change +```go +type Overriders struct { + ... + // FieldOverrider represents the rules dedicated to modifying a specific field in any Kubernetes resource. + // This allows changing a single field within the resource with multiple operations. + // It is designed to handle structured field values such as those found in ConfigMaps or Secrets. + // The current implementation supports JSON and YAML formats, but can easily be extended to support XML in the future. + // +optional + FieldOverrider []FieldOverrider `json:"fieldOverrider,omitempty"` +} + +type FieldOverrider struct { + // FieldPath specifies the initial location in the instance document where the operation should take place. + // The path uses RFC 6901 for navigating into nested structures. For example, the path "/data/db-config.yaml" + // specifies the configuration data key named "db-config.yaml" in a ConfigMap: "/data/db-config.yaml". + // +required + FieldPath string `json:"fieldPath"` + + // JSON represents the operations performed on the JSON document specified by the FieldPath. + // +optional + JSON []JSONPatchOperation `json:"json,omitempty"` + + // YAML represents the operations performed on the YAML document specified by the FieldPath. + // +optional + YAML []YAMLPatchOperation `json:"yaml,omitempty"` +} + +// JSONPatchOperation represents a single field modification operation for JSON format. +type JSONPatchOperation struct { + // SubPath specifies the relative location within the initial FieldPath where the operation should take place. + // The path uses RFC 6901 for navigating into nested structures. + // +required + SubPath string `json:"subPath"` + + // Operator indicates the operation on target field. + // Available operators are: "add", "remove", and "replace". + // +kubebuilder:validation:Enum=add;remove;replace + // +required + Operator OverriderOperator `json:"operator"` + + // Value is the new value to set for the specified field if the operation is "add" or "replace". + // For "remove" operation, this field is ignored. + // +optional + Value apiextensionsv1.JSON `json:"value,omitempty"` +} + +// YAMLPatchOperation represents a single field modification operation for YAML format. +type YAMLPatchOperation struct { + // SubPath specifies the relative location within the initial FieldPath where the operation should take place. + // The path uses RFC 6901 for navigating into nested structures. + // +required + SubPath string `json:"subPath"` + + // Operator indicates the operation on target field. + // Available operators are: "add", "remove", and "replace". + // +kubebuilder:validation:Enum=add;remove;replace + // +required + Operator OverriderOperator `json:"operator"` + + // Value is the new value to set for the specified field if the operation is "add" or "replace". + // For "remove" operation, this field is ignored. + // +optional + Value apiextensionsv1.JSON `json:"value,omitempty"` +} + +// OverriderOperator is the set of operators that can be used in an overrider. +type OverriderOperator string + +// These are valid overrider operators. +const ( + OverriderOpAdd OverriderOperator = "add" + OverriderOpRemove OverriderOperator = "remove" + OverriderOpReplace OverriderOperator = "replace" +) +``` + +### User usage example +For example, consider a ConfigMap with the following data in member cluster: +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: example-config +data: + db-config.yaml: | + database: + host: localhost + port: 3306 +``` +The following is OverridePolicy which uses FieldOverrider: +```yaml +apiVersion: karmada.io/v1alpha1 +kind: OverridePolicy +metadata: + name: example-configmap-override +spec: + resourceSelectors: + - apiVersion: v1 + kind: ConfigMap + name: example-config + overrideRules: + - overriders: + fieldOverrider: + - fieldPath: /data/db-config.yaml + yaml: + - subPath: /database/host + operator: replace + value: "remote-db.example.com" + - subPath: /database/port + operator: replace + value: "3307" +``` +After we apply this policy, we can modify the db-config.yaml +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: example-config +data: + db-config.yaml: | + database: + host: remote-db.example.com + port: 3307 +``` + +### YAML Implementation + +We choose Plan1. +(1) Plan1 is easy to implement by converting YAML directly to JSON, while Plan2 has to encapsulate ytt's grammar as if it is JSON patch. +(2) Plan1 and Plan2 both have the same issues illustrated in the following. + +#### Plan 1: directly convert to and back from JSON (chosen) +If YAML is directly converted to JSON and then converted back using (ā€˜sigs.k8s.io/yamlā€™), some data type information might be lost. +1. Dates and Times + ```yaml + dob: 1979-05-27T07:32:00Z + date_of_birth: 1979-05-27 + + dob: time.Time + date_of_birth: time.Time + + # after transformation + + dob: string + date_of_birth: string + ``` +2. Supports anchors (&) and aliases (*) to reference and reuse values + ```yaml + default: &default + name: Alice + age: 30 + employee1: + <<: *default + role: Developer + employee2: + <<: *default + role: Designer + + # after transformation + + default: + age: 30 + name: Alice + employee1: + age: 30 + name: Alice + role: Developer + employee2: + age: 30 + name: Alice + role: Designer + ``` + **Can be applied if:** + (1) Do not consider the situation described above + (2) Write cases to deal with different types **(large maintenance costs)** + +#### Plan 2: ytt (Aborted) +Use the third party libraries **ytt**, supporting the overlays https://carvel.dev/ytt/docs/v0.50.x/ytt-overlays/ to implement and json operation similar operations. Implementation has its own specific syntax for ytt. +Documents: https://carvel.dev/ytt/docs/v0.50.x/ytt-overlays/ +Joining methods: https://github.com/carvel-dev/ytt/blob/develop/examples/integrating-with-ytt/apis.md#as-a-go-module +After testing, it has the same problem as the **Plan 1**. +1. Dates and Times +```yaml +dob: 1979-05-27T07:32:00Z +date_of_birth: 1979-05-27 + +# after transformation + +dob: "1979-05-27T07:32:00Z" +date_of_birth: "1979-05-27" +``` + +2. Supports anchors (&) and aliases (*) to reference and reuse values +```yaml +default: &default + name: Alice + age: 30 +employee1: + <<: *default + role: Developer +employee2: + <<: *default + role: Designer + +# after transformation + +default: + name: Alice + age: 30 +employee1: + name: Alice + age: 30 + role: Developer +employee2: + name: Alice + age: 30 + role: Designer +number: 12345 +string: "6789" +``` +**Can be applied if:** +(1) Do not consider the situation described above +(2) Ability to maintain functionality despite incompatibilities arising from encapsulating ytt as JSON operations later on. + +### Test Plan +#### UT +- Add unit tests to cover the new functions. +#### E2E ++ Write proposal in `coverage_docs/overridepolicy_test.md` : deployment `FieldOverrider` testing; ++ Use ginkgo to complete the code `overridepolicy_test.go`. + +## Alternatives +There are three API designs to achieve this: +### (1) Each data format has different name with same struct +```go +type Overriders struct { + ... + // JSONPlaintextOverrider represents the rules dedicated to handling json object overrides + // +optional + JSONPlaintextOverrider []JSONPlaintextOverrider `json:"jsonPlaintextOverrider,omitempty"` + // YAMLPlaintextOverrider represents the rules dedicated to handling yaml object overrides + // +optional + YAMLPlaintextOverrider []YAMLPlaintextOverrider `json:"yamlPlaintextOverrider,omitempty"` +} + +type JSONPlaintextOverrider struct { + // Path indicates the path of target field + Path string `json:"path"` + // JSONPatch represents json patch rules defined with plaintext overriders. + Patch []Patch `json:"patch"` + // MergeValue t represents the object value to be merged into the object. + MergeValue apiextensionsv1.JSON `json:"mergeValue"` + // MergeRawValue represents the raw, original format data (e.g., YAML, JSON) to be merged into the object. + MergeRawValue string `json:"mergeRawValue"` +} + +type YAMLPlaintextOverrider struct { + // Path indicates the path of target field + Path string `json:"path"` + // JSONPatch represents json patch rules defined with plaintext overriders. + Patch []Patch `json:"patch"` + // MergeValue t represents the object value to be merged into the object. + MergeValue apiextensionsv1.JSON `json:"mergeValue"` + // MergeRawValue represents the raw, original format data (e.g., YAML, JSON) to be merged into the object. + MergeRawValue string `json:"mergeRawValue"` +} + +type Patch struct { + // Path indicates the path of target field + Path string `json:"path"` + // From indicates the path of original field when operator is move and copy + From string `json:"from"` + // Operator indicates the operation on target field. + // Available operators are: add, replace and remove. + // +kubebuilder:validation:Enum=add;remove;replace;move;test;copy + Operator PatchOperator `json:"operator"` + // Value to be applied to target field. + // Must be empty when operator is Remove. + // +optional + Value apiextensionsv1.JSON `json:"value,omitempty"` +} + +// PatchOperator is the set of operators that can be used in an overrider. +type PatchOperator string + +// These are valid patch operators. +const ( + PatchOpAdd PatchOperator = "add" + PatchOpRemove PatchOperator = "remove" + PatchOpReplace PatchOperator = "replace" + PatchOpMove PatchOperator = "move" + PatchOpTest PatchOperator = "test" + PatchOpCopy PatchOperator = "copy" +) +``` + +### (2) Each data format has different name with same struct + +```go +type Overriders struct { + ... + // JSONPlaintextOverrider represents the rules dedicated to handling json object overrides + // +optional + JSONPlaintextOverrider []PlaintextObjectOverrider `json:"jsonPlaintextOverrider,omitempty"` + // YAMLPlaintextOverrider represents the rules dedicated to handling yaml object overrides + // +optional + YAMLPlaintextOverrider []PlaintextObjectOverrider `json:"yamlPlaintextOverrider,omitempty"` + // TOMLPlaintextOverrider represents the rules dedicated to handling toml object overrides + // +optional + TOMLPlaintextOverrider []PlaintextObjectOverrider `json:"tomlPlaintextOverrider,omitempty"` + // XMLPlaintextOverrider represents the rules dedicated to handling xml object overrides + // +optional + XMLPlaintextOverrider []PlaintextObjectOverrider `json:"xmlPlaintextOverrider,omitempty"` +} + +type PlaintextObjectOverrider struct { + // Path indicates the path of target field + Path string `json:"path"` + // JSONPatch represents json patch rules defined with plaintext overriders. + JSONPatch []JSONPatch `json:"jsonPatch"` + // MergeValue t represents the object value to be merged into the object. + MergeValue apiextensionsv1.JSON `json:"mergeValue"` + // MergeRawValue represents the raw, original format data (e.g., YAML, JSON) to be merged into the object. + MergeRawValue string `json:"mergeRawValue"` +} + +type JSONPatch struct { + // Path indicates the path of target field + Path string `json:"path"` + // From indicates the path of original field when operator is move and copy + From string `json:"from"` + // Operator indicates the operation on target field. + // Available operators are: add, replace and remove. + // +kubebuilder:validation:Enum=add;remove;replace;move;test;copy + Operator JSONPatchOperator `json:"operator"` + // Value to be applied to target field. + // Must be empty when operator is Remove. + // +optional + Value apiextensionsv1.JSON `json:"value,omitempty"` +} + +// PatchOperator is the set of operators that can be used in an overrider. +type JSONPatchOperator string + +// These are valid patch operators. +const ( + PatchOpAdd JSONPatchOperator = "add" + PatchOpRemove JSONPatchOperator = "remove" + PatchOpReplace JSONPatchOperator = "replace" + PatchOpMove JSONPatchOperator = "move" + PatchOpTest JSONPatchOperator = "test" + PatchOpCopy JSONPatchOperator = "copy" +) +``` + +### (3) Enumeration + +```go +type Overriders struct { + ... + // PlaintextObjectOverrider represents the rules dedicated to handling yaml object overrides + // +optional + PlaintextObjectOverrider []PlaintextObjectOverrider `json:"yamlPlaintextOverrider,omitempty"` +} + +type PlaintextObjectOverrider struct { + // Path indicates the path of target field + Path string `json:"path"` + // DataFormat indicates the type of data formats type to be modified + DataFormat DataFormat `json:"dataFormat"` + // JSONPatch represents json patch rules defined with plaintext overriders. + JSONPatch []JSONPatch `json:"jsonPatch"` + // MergeValue t represents the object value to be merged into the object. + MergeValue apiextensionsv1.JSON `json:"mergeValue"` + // MergeRawValue represents the raw, original format data (e.g., YAML, JSON) to be merged into the object. + MergeRawValue string `json:"mergeRawValue"` +} + +type DataFormat string + +const ( + yaml DataFormat = "yaml" + json DataFormat = "json" + toml DataFormat = "toml" +) +``` + +### Analysis of 3 Implementations + +(1) Each data format has different struct (**Easiest to extend**) +json -> json * op -> json -> JSONPlaintextOverrider []JSONPlaintextOverrider +yaml -> yaml * op -> yaml -> YAMLPlaintextOverrider []YAMLPlaintextOverrider +xml -> xml * op -> xml -> XMLPlaintextOverrider []XMLPlaintextOverrider +... +This one is designed for **native operations/JSON operations** for each data format. +For example, json has 5 json operations, yaml has 3 yaml operations, and xml has 4 xml operations, ... +It should be 5+3+4+.. operations in total, which is a huge number. + +(2) Each data format has different name with same struct +json -> json * op -> json -> JSONPlaintextOverrider []PlaintextObjectOverrider +yaml -> yaml * op -> yaml -> YAMLPlaintextOverrider []PlaintextObjectOverrider +xml -> xml * op -> xml -> XMLPlaintextOverrider []PlaintextObjectOverrider +... +This one is designed for **JSON operations** for all data formats. +(3) enum +json -> json * op -> json -> enum +yaml -> json * op -> yaml -> enum +xml -> json * op -> xml -> enum +... +This one is designed for **JSON operations** for all data formats. + +### User usage example for (1) +For example, consider a ConfigMap with the following data in member cluster: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: example-configmap + namespace: default +data: + config.json: | + { + "keyA": "valueA", + "keyB": "valueB", + "keyC": "valueC", + "keyD": "valueD", + "keyE": "valueE", + "keyF": "valueF" + } +``` + +The following is OverridePolicy which uses JSONPlaintextOverrider: + +```yaml +apiVersion: policy.karmada.io/v1alpha1 +kind: OverridePolicy +metadata: + name: example-override + namespace: default +spec: + targetCluster: + clusterNames: + - member1 + resourceSelectors: + - apiVersion: v1 + kind: ConfigMap + name: example-configmap + namespace: default + overrideRules: + - overriders: + jsonPlaintextOverrider: + - path: /data/config.json + patch: + - path: /keyA + operator: test + value: "valueA" + - path: /keyD + operator: add + value: "" + - path: /keyB + operator: remove + - path: /keyC + operator: replace + value: "newly added value" + - from: /keyD + path: /keyF + operator: move + - from: /keyE + path: /keyG + operator: copy + mergeValue: + { + "keyH": "valueH", + "keyI": "valueI" + } + mergeRawValue: '{"keyJ": "valueJ","keyK": "valueK"}' +``` + +After we apply this policy, we can modify the config.json + +1. Test: The operation checks if keyA has the value `valueA`. Since it does, the operation proceeds. +2. Add: Adds an empty string as the value for keyD. +3. Remove: Removes keyB and its value. +4. Replace: Replaces the value of keyC with `newly added value`. +5. Move: Moves the value of keyD to keyF, which effectively deletes keyD and sets the value of keyF to `valueD`. +6. Copy: Copies the value of keyE to keyG. +7. Merge: Adds new keys keyH and keyI with values `valueH` and `valueI`. +8. Merge Raw Value: Adds keys keyJ and keyK with values `valueJ` and `valueK`. + Finally, we get a new config.json with JSON operation. + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: example-configmap + namespace: default +data: + config.json: | + { + "keyA": "valueA", + "keyC": "newly added value", + "keyE": "valueE", + "keyF": "valueD", + "keyG": "valueE", + "keyH": "valueH", + "keyI": "valueI", + "keyJ": "valueJ", + "keyK": "valueK" + } +``` \ No newline at end of file diff --git a/examples/customresourceinterpreter/apis/workload.example.io_workloads.yaml b/examples/customresourceinterpreter/apis/workload.example.io_workloads.yaml index 19edf33a0236..7d7f79ebb266 100644 --- a/examples/customresourceinterpreter/apis/workload.example.io_workloads.yaml +++ b/examples/customresourceinterpreter/apis/workload.example.io_workloads.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.5 name: workloads.workload.example.io spec: group: workload.example.io @@ -1102,9 +1102,7 @@ spec: This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. - TODO: Add other useful fields. apiVersion, kind, uid? More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string optional: description: Specify whether the ConfigMap @@ -1173,9 +1171,7 @@ spec: This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. - TODO: Add other useful fields. apiVersion, kind, uid? More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string optional: description: Specify whether the Secret @@ -1215,9 +1211,7 @@ spec: This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. - TODO: Add other useful fields. apiVersion, kind, uid? More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string optional: description: Specify whether the ConfigMap @@ -1239,9 +1233,7 @@ spec: This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. - TODO: Add other useful fields. apiVersion, kind, uid? More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string optional: description: Specify whether the Secret must @@ -1532,11 +1524,11 @@ spec: format: int32 type: integer service: + default: "" description: |- Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - If this is not specified, the default behavior is defined by gRPC. type: string required: @@ -1747,11 +1739,11 @@ spec: format: int32 type: integer service: + default: "" description: |- Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - If this is not specified, the default behavior is defined by gRPC. type: string required: @@ -1900,11 +1892,9 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. - This field is immutable. It can only be set for containers. items: description: ResourceClaim references one entry @@ -2121,7 +2111,6 @@ spec: type indicates which kind of seccomp profile will be applied. Valid options are: - Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied. @@ -2203,11 +2192,11 @@ spec: format: int32 type: integer service: + default: "" description: |- Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - If this is not specified, the default behavior is defined by gRPC. type: string required: @@ -2423,10 +2412,8 @@ spec: RecursiveReadOnly specifies whether read-only mounts should be handled recursively. - If ReadOnly is false, this field has no meaning and must be unspecified. - If ReadOnly is true, and this field is set to Disabled, the mount is not made recursively read-only. If this field is set to IfPossible, the mount is made recursively read-only, if it is supported by the container runtime. If this @@ -2434,11 +2421,9 @@ spec: supported by the container runtime, otherwise the pod will not be started and an error will be generated to indicate the reason. - If this field is set to IfPossible or Enabled, MountPropagation must be set to None (or be unspecified, which defaults to None). - If this field is not specified, it is treated as an equivalent of Disabled. type: string subPath: @@ -2547,7 +2532,6 @@ spec: removed or restarted. The kubelet may evict a Pod if an ephemeral container causes the Pod to exceed its resource allocation. - To add an ephemeral container, use the ephemeralcontainers subresource of an existing Pod. Ephemeral containers may not be removed or restarted. properties: @@ -2620,9 +2604,7 @@ spec: This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. - TODO: Add other useful fields. apiVersion, kind, uid? More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string optional: description: Specify whether the ConfigMap @@ -2691,9 +2673,7 @@ spec: This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. - TODO: Add other useful fields. apiVersion, kind, uid? More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string optional: description: Specify whether the Secret @@ -2733,9 +2713,7 @@ spec: This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. - TODO: Add other useful fields. apiVersion, kind, uid? More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string optional: description: Specify whether the ConfigMap @@ -2757,9 +2735,7 @@ spec: This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. - TODO: Add other useful fields. apiVersion, kind, uid? More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string optional: description: Specify whether the Secret must @@ -3043,11 +3019,11 @@ spec: format: int32 type: integer service: + default: "" description: |- Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - If this is not specified, the default behavior is defined by gRPC. type: string required: @@ -3246,11 +3222,11 @@ spec: format: int32 type: integer service: + default: "" description: |- Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - If this is not specified, the default behavior is defined by gRPC. type: string required: @@ -3398,11 +3374,9 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. - This field is immutable. It can only be set for containers. items: description: ResourceClaim references one entry @@ -3607,7 +3581,6 @@ spec: type indicates which kind of seccomp profile will be applied. Valid options are: - Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied. @@ -3682,11 +3655,11 @@ spec: format: int32 type: integer service: + default: "" description: |- Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - If this is not specified, the default behavior is defined by gRPC. type: string required: @@ -3824,7 +3797,6 @@ spec: The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. If not set then the ephemeral container uses the namespaces configured in the Pod spec. - The container runtime must implement support for this feature. If the runtime does not support namespace targeting then the result of setting this field is undefined. type: string @@ -3912,10 +3884,8 @@ spec: RecursiveReadOnly specifies whether read-only mounts should be handled recursively. - If ReadOnly is false, this field has no meaning and must be unspecified. - If ReadOnly is true, and this field is set to Disabled, the mount is not made recursively read-only. If this field is set to IfPossible, the mount is made recursively read-only, if it is supported by the container runtime. If this @@ -3923,11 +3893,9 @@ spec: supported by the container runtime, otherwise the pod will not be started and an error will be generated to indicate the reason. - If this field is set to IfPossible or Enabled, MountPropagation must be set to None (or be unspecified, which defaults to None). - If this field is not specified, it is treated as an equivalent of Disabled. type: string subPath: @@ -4039,9 +4007,7 @@ spec: This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. - TODO: Add other useful fields. apiVersion, kind, uid? More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic @@ -4137,9 +4103,7 @@ spec: This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. - TODO: Add other useful fields. apiVersion, kind, uid? More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string optional: description: Specify whether the ConfigMap @@ -4208,9 +4172,7 @@ spec: This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. - TODO: Add other useful fields. apiVersion, kind, uid? More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string optional: description: Specify whether the Secret @@ -4250,9 +4212,7 @@ spec: This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. - TODO: Add other useful fields. apiVersion, kind, uid? More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string optional: description: Specify whether the ConfigMap @@ -4274,9 +4234,7 @@ spec: This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. - TODO: Add other useful fields. apiVersion, kind, uid? More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string optional: description: Specify whether the Secret must @@ -4567,11 +4525,11 @@ spec: format: int32 type: integer service: + default: "" description: |- Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - If this is not specified, the default behavior is defined by gRPC. type: string required: @@ -4782,11 +4740,11 @@ spec: format: int32 type: integer service: + default: "" description: |- Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - If this is not specified, the default behavior is defined by gRPC. type: string required: @@ -4935,11 +4893,9 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. - This field is immutable. It can only be set for containers. items: description: ResourceClaim references one entry @@ -5156,7 +5112,6 @@ spec: type indicates which kind of seccomp profile will be applied. Valid options are: - Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied. @@ -5238,11 +5193,11 @@ spec: format: int32 type: integer service: + default: "" description: |- Service is the name of the service to place in the gRPC HealthCheckRequest (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - If this is not specified, the default behavior is defined by gRPC. type: string required: @@ -5458,10 +5413,8 @@ spec: RecursiveReadOnly specifies whether read-only mounts should be handled recursively. - If ReadOnly is false, this field has no meaning and must be unspecified. - If ReadOnly is true, and this field is set to Disabled, the mount is not made recursively read-only. If this field is set to IfPossible, the mount is made recursively read-only, if it is supported by the container runtime. If this @@ -5469,11 +5422,9 @@ spec: supported by the container runtime, otherwise the pod will not be started and an error will be generated to indicate the reason. - If this field is set to IfPossible or Enabled, MountPropagation must be set to None (or be unspecified, which defaults to None). - If this field is not specified, it is treated as an equivalent of Disabled. type: string subPath: @@ -5530,11 +5481,9 @@ spec: Specifies the OS of the containers in the pod. Some pod and container fields are restricted if this is set. - If the OS field is set to linux, the following fields must be unset: -securityContext.windowsOptions - If the OS field is set to windows, following fields must be unset: - spec.hostPID - spec.hostIPC @@ -5636,11 +5585,9 @@ spec: will be made available to those containers which consume them by name. - This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. - This field is immutable. items: description: |- @@ -5666,14 +5613,12 @@ spec: ResourceClaimTemplateName is the name of a ResourceClaimTemplate object in the same namespace as this pod. - The template will be used to create a new ResourceClaim, which will be bound to this pod. When this pod is deleted, the ResourceClaim will also be deleted. The pod name and resource name, along with a generated component, will be used to form a unique name for the ResourceClaim, which will be recorded in pod.status.resourceClaimStatuses. - This field is immutable and no changes will be made to the corresponding ResourceClaim by the control plane after creating the ResourceClaim. @@ -5712,7 +5657,6 @@ spec: If schedulingGates is not empty, the pod will stay in the SchedulingGated state and the scheduler will not attempt to schedule the pod. - SchedulingGates can only be set at pod creation time, and be removed only afterwards. items: description: PodSchedulingGate is associated to a Pod to @@ -5764,12 +5708,10 @@ spec: Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod: - 1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw---- - If unset, the Kubelet will not modify the ownership and permissions of any volume. Note that this field cannot be set when spec.os.name is windows. format: int64 @@ -5856,7 +5798,6 @@ spec: type indicates which kind of seccomp profile will be applied. Valid options are: - Localhost - a profile defined in a file on the node should be used. RuntimeDefault - the container runtime default profile should be used. Unconfined - no profile should be applied. @@ -6084,7 +6025,6 @@ spec: Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector. - This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default). items: type: string @@ -6124,7 +6064,6 @@ spec: Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. - For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | @@ -6142,7 +6081,6 @@ spec: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. - If this value is nil, the behavior is equivalent to the Honor policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string @@ -6154,7 +6092,6 @@ spec: has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. - If this value is nil, the behavior is equivalent to the Ignore policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag. type: string @@ -6222,7 +6159,6 @@ spec: Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore - TODO: how do we prevent errors in the filesystem from compromising the machine type: string partition: description: |- @@ -6343,9 +6279,7 @@ spec: This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. - TODO: Add other useful fields. apiVersion, kind, uid? More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic @@ -6387,9 +6321,7 @@ spec: This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. - TODO: Add other useful fields. apiVersion, kind, uid? More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic @@ -6462,9 +6394,7 @@ spec: This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. - TODO: Add other useful fields. apiVersion, kind, uid? More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string optional: description: optional specify whether the ConfigMap @@ -6503,9 +6433,7 @@ spec: This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. - TODO: Add other useful fields. apiVersion, kind, uid? More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic @@ -6646,7 +6574,6 @@ spec: The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed. - Use this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity @@ -6657,17 +6584,14 @@ spec: information on the connection between this volume type and PersistentVolumeClaim). - Use PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod. - Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information. - A pod can use both types of ephemeral volumes and persistent volumes at the same time. properties: @@ -6681,7 +6605,6 @@ spec: entry. Pod validation will reject the pod if the concatenated name is not valid for a PVC (for example, too long). - An existing PVC with that name that is not owned by the pod will *not* be used for the pod to avoid using an unrelated volume by mistake. Starting the pod is then blocked until @@ -6691,11 +6614,9 @@ spec: this should not be necessary, but it may be useful when manually reconstructing a broken cluster. - This field is read-only and no changes will be made by Kubernetes to the PVC after it has been created. - Required, must not be nil. properties: metadata: @@ -6942,7 +6863,6 @@ spec: fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - TODO: how do we prevent errors in the filesystem from compromising the machine type: string lun: description: 'lun is Optional: FC target lun number' @@ -7010,9 +6930,7 @@ spec: This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. - TODO: Add other useful fields. apiVersion, kind, uid? More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic @@ -7046,7 +6964,6 @@ spec: Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk - TODO: how do we prevent errors in the filesystem from compromising the machine type: string partition: description: |- @@ -7127,9 +7044,6 @@ spec: used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath - --- - TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not - mount host directories as read/write. properties: path: description: |- @@ -7166,7 +7080,6 @@ spec: Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi - TODO: how do we prevent errors in the filesystem from compromising the machine type: string initiatorName: description: |- @@ -7210,9 +7123,7 @@ spec: This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. - TODO: Add other useful fields. apiVersion, kind, uid? More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic @@ -7341,14 +7252,11 @@ spec: ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field of ClusterTrustBundle objects in an auto-updating file. - Alpha, gated by the ClusterTrustBundleProjection feature gate. - ClusterTrustBundle objects can either be selected by name, or by the combination of signer name and a label selector. - Kubelet performs aggressive normalization of the PEM contents written into the pod filesystem. Esoteric PEM features such as inter-block comments and block headers are stripped. Certificates are deduplicated. @@ -7483,9 +7391,7 @@ spec: This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. - TODO: Add other useful fields. apiVersion, kind, uid? More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string optional: description: optional specify whether @@ -7627,9 +7533,7 @@ spec: This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. - TODO: Add other useful fields. apiVersion, kind, uid? More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string optional: description: optional field specify whether @@ -7719,7 +7623,6 @@ spec: Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd - TODO: how do we prevent errors in the filesystem from compromising the machine type: string image: description: |- @@ -7766,9 +7669,7 @@ spec: This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. - TODO: Add other useful fields. apiVersion, kind, uid? More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic @@ -7818,9 +7719,7 @@ spec: This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. - TODO: Add other useful fields. apiVersion, kind, uid? More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic @@ -7943,9 +7842,7 @@ spec: This field is effectively required, but due to backwards compatibility is allowed to be empty. Instances of this type with an empty value here are almost certainly wrong. - TODO: Add other useful fields. apiVersion, kind, uid? More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Drop `kubebuilder:default` when controller-gen doesn't need it https://github.com/kubernetes-sigs/kubebuilder/issues/3896. type: string type: object x-kubernetes-map-type: atomic @@ -8010,16 +7907,8 @@ spec: conditions: description: Conditions is an array of current cluster conditions. items: - description: "Condition contains details for one aspect of the current - state of this API Resource.\n---\nThis struct is intended for - direct use as an array at the field path .status.conditions. For - example,\n\n\n\ttype FooStatus struct{\n\t // Represents the - observations of a foo's current state.\n\t // Known .status.conditions.type - are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // - +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t - \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t - \ // other fields\n\t}" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: description: |- @@ -8060,12 +7949,7 @@ spec: - Unknown type: string type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - --- - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be - useful (see .node.status.conditions), the ability to deconflict is important. - The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + description: type of condition in CamelCase or in foo.example.com/CamelCase. maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string diff --git a/examples/customresourceinterpreter/karmada-interpreter-webhook-example.yaml b/examples/customresourceinterpreter/karmada-interpreter-webhook-example.yaml index 317065268ae0..66456f30073e 100644 --- a/examples/customresourceinterpreter/karmada-interpreter-webhook-example.yaml +++ b/examples/customresourceinterpreter/karmada-interpreter-webhook-example.yaml @@ -25,32 +25,31 @@ spec: imagePullPolicy: IfNotPresent command: - /bin/karmada-interpreter-webhook-example - - --kubeconfig=/etc/kubeconfig + - --kubeconfig=/etc/karmada/config/karmada.config - --bind-address=0.0.0.0 - --secure-port=8445 - - --cert-dir=/var/serving-cert + - --cert-dir=/etc/karmada/pki/server - --v=4 ports: - containerPort: 8445 - volumeMounts: - - name: kubeconfig - subPath: kubeconfig - mountPath: /etc/kubeconfig - - name: cert - mountPath: /var/serving-cert - readOnly: true readinessProbe: httpGet: path: /readyz port: 8445 scheme: HTTPS + volumeMounts: + - name: karmada-config + mountPath: /etc/karmada/config + - name: server-cert + mountPath: /etc/karmada/pki/server + readOnly: true volumes: - - name: kubeconfig + - name: karmada-config secret: - secretName: kubeconfig - - name: cert + secretName: karmada-interpreter-webhook-example-config + - name: server-cert secret: - secretName: webhook-cert + secretName: karmada-interpreter-webhook-example-cert --- apiVersion: v1 kind: Service diff --git a/examples/customresourceinterpreter/webhook/app/webhook_test.go b/examples/customresourceinterpreter/webhook/app/webhook_test.go new file mode 100644 index 000000000000..99ac9ea7cb4e --- /dev/null +++ b/examples/customresourceinterpreter/webhook/app/webhook_test.go @@ -0,0 +1,47 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package app + +import ( + "context" + "testing" +) + +func TestNewWebhookCommand(t *testing.T) { + ctx := context.Background() + cmd := NewWebhookCommand(ctx) + + if cmd == nil { + t.Fatal("NewWebhookCommand returned nil") + } + + if cmd.Use != "karmada-interpreter-webhook-example" { + t.Errorf("Expected command use to be 'karmada-interpreter-webhook-example', got %s", cmd.Use) + } + + if cmd.Run == nil { + t.Error("Expected Run function to be set") + } + + flags := cmd.Flags() + expectedFlags := []string{"bind-address", "cert-dir", "secure-port"} + for _, flag := range expectedFlags { + if flags.Lookup(flag) == nil { + t.Errorf("Expected flag %s to be set", flag) + } + } +} diff --git a/examples/customresourceinterpreter/webhook/app/workloadwebhook_test.go b/examples/customresourceinterpreter/webhook/app/workloadwebhook_test.go new file mode 100644 index 000000000000..54ebd33daa1b --- /dev/null +++ b/examples/customresourceinterpreter/webhook/app/workloadwebhook_test.go @@ -0,0 +1,259 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package app + +import ( + "context" + "encoding/json" + "net/http" + "testing" + + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/ptr" + + workloadv1alpha1 "github.com/karmada-io/karmada/examples/customresourceinterpreter/apis/workload/v1alpha1" + configv1alpha1 "github.com/karmada-io/karmada/pkg/apis/config/v1alpha1" + "github.com/karmada-io/karmada/pkg/webhook/interpreter" +) + +func TestWorkloadInterpreter_responseWithExploreReplica(t *testing.T) { + testCases := []struct { + name string + workload *workloadv1alpha1.Workload + expected int32 + }{ + { + name: "Workload with replicas", + workload: &workloadv1alpha1.Workload{ + Spec: workloadv1alpha1.WorkloadSpec{ + Replicas: ptr.To[int32](3), + }, + }, + expected: 3, + }, + { + name: "Workload without replicas", + workload: &workloadv1alpha1.Workload{ + Spec: workloadv1alpha1.WorkloadSpec{}, + }, + expected: 0, + }, + } + + interpreter := &workloadInterpreter{} + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + response := interpreter.responseWithExploreReplica(tc.workload) + if response.Replicas != nil { + assert.Equal(t, tc.expected, *response.Replicas) + } else { + assert.Equal(t, tc.expected, int32(0)) + } + }) + } +} + +func TestWorkloadInterpreter_responseWithExploreDependency(t *testing.T) { + workload := &workloadv1alpha1.Workload{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test-namespace", + }, + Spec: workloadv1alpha1.WorkloadSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + EnvFrom: []corev1.EnvFromSource{ + { + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "test-configmap", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + interpreter := &workloadInterpreter{} + response := interpreter.responseWithExploreDependency(workload) + + expectedDependencies := []configv1alpha1.DependentObjectReference{ + { + APIVersion: "v1", + Kind: "ConfigMap", + Namespace: "test-namespace", + Name: "test-configmap", + }, + } + + assert.Equal(t, expectedDependencies, response.Dependencies) +} + +func TestWorkloadInterpreter_responseWithExploreInterpretHealth(t *testing.T) { + testCases := []struct { + name string + workload *workloadv1alpha1.Workload + expected bool + }{ + { + name: "Healthy workload", + workload: &workloadv1alpha1.Workload{ + Spec: workloadv1alpha1.WorkloadSpec{ + Replicas: ptr.To[int32](3), + }, + Status: workloadv1alpha1.WorkloadStatus{ + ReadyReplicas: 3, + }, + }, + expected: true, + }, + { + name: "Unhealthy workload", + workload: &workloadv1alpha1.Workload{ + Spec: workloadv1alpha1.WorkloadSpec{ + Replicas: ptr.To[int32](3), + }, + Status: workloadv1alpha1.WorkloadStatus{ + ReadyReplicas: 2, + }, + }, + expected: false, + }, + } + + interpreter := &workloadInterpreter{} + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + response := interpreter.responseWithExploreInterpretHealth(tc.workload) + assert.Equal(t, tc.expected, *response.Healthy) + }) + } +} + +func TestWorkloadInterpreter_responseWithExploreInterpretStatus(t *testing.T) { + workload := &workloadv1alpha1.Workload{ + Status: workloadv1alpha1.WorkloadStatus{ + ReadyReplicas: 3, + }, + } + + interpreter := &workloadInterpreter{} + response := interpreter.responseWithExploreInterpretStatus(workload) + + expectedStatus := &workloadv1alpha1.WorkloadStatus{ + ReadyReplicas: 3, + } + expectedBytes, _ := json.Marshal(expectedStatus) + + assert.Equal(t, expectedBytes, response.RawStatus.Raw) +} + +func TestWorkloadInterpreter_Handle(t *testing.T) { + testCases := []struct { + name string + operation configv1alpha1.InterpreterOperation + expectedStatus int32 + checkResponse func(*testing.T, interpreter.Response) + }{ + { + name: "InterpretReplica operation", + operation: configv1alpha1.InterpreterOperationInterpretReplica, + expectedStatus: http.StatusOK, + }, + { + name: "InterpretDependency operation", + operation: configv1alpha1.InterpreterOperationInterpretDependency, + expectedStatus: http.StatusOK, + }, + { + name: "InterpretHealth operation", + operation: configv1alpha1.InterpreterOperationInterpretHealth, + expectedStatus: http.StatusOK, + }, + { + name: "InterpretStatus operation", + operation: configv1alpha1.InterpreterOperationInterpretStatus, + expectedStatus: http.StatusOK, + }, + { + name: "Invalid operation", + operation: "InvalidOperation", + expectedStatus: http.StatusBadRequest, + }, + } + + scheme := runtime.NewScheme() + _ = workloadv1alpha1.Install(scheme) + decoder := interpreter.NewDecoder(scheme) + + interpreterInstance := &workloadInterpreter{ + decoder: decoder, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + workload := &workloadv1alpha1.Workload{ + Spec: workloadv1alpha1.WorkloadSpec{ + Replicas: ptr.To[int32](3), + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + EnvFrom: []corev1.EnvFromSource{ + { + ConfigMapRef: &corev1.ConfigMapEnvSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: "test-configmap", + }, + }, + }, + }, + }, + }, + }, + }, + }, + Status: workloadv1alpha1.WorkloadStatus{ + ReadyReplicas: 3, + }, + } + workloadBytes, _ := json.Marshal(workload) + + req := interpreter.Request{ + ResourceInterpreterRequest: configv1alpha1.ResourceInterpreterRequest{ + Operation: tc.operation, + Object: runtime.RawExtension{ + Raw: workloadBytes, + }, + }, + } + + response := interpreterInstance.Handle(context.Background(), req) + assert.Equal(t, tc.expectedStatus, response.Status.Code) + }) + } +} diff --git a/go.mod b/go.mod index 2383fcb638f1..550fe9d0a37e 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/karmada-io/karmada -go 1.22.6 // keep in sync with .go-version, Readme.md#Prerequisites, hack/util.sh +go 1.22.7 // keep in sync with .go-version, Readme.md#Prerequisites, hack/util.sh require ( github.com/adhocore/gronx v1.6.3 @@ -8,14 +8,15 @@ require ( github.com/emirpasic/gods v1.18.1 github.com/evanphx/json-patch/v5 v5.9.0 github.com/go-co-op/gocron v1.30.1 + github.com/go-openapi/jsonpointer v0.20.2 github.com/gogo/protobuf v1.3.2 github.com/google/go-cmp v0.6.0 github.com/google/uuid v1.5.0 github.com/kr/pretty v0.3.1 github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f github.com/olekukonko/tablewriter v0.0.5 - github.com/onsi/ginkgo/v2 v2.17.1 - github.com/onsi/gomega v1.32.0 + github.com/onsi/ginkgo/v2 v2.17.2 + github.com/onsi/gomega v1.33.1 github.com/opensearch-project/opensearch-go v1.1.0 github.com/prometheus/client_golang v1.18.0 github.com/spf13/cobra v1.8.0 @@ -26,11 +27,11 @@ require ( github.com/vektra/mockery/v2 v2.10.0 github.com/yuin/gopher-lua v0.0.0-20220504180219-658193537a64 go.uber.org/mock v0.4.0 - golang.org/x/net v0.23.0 - golang.org/x/term v0.18.0 + golang.org/x/net v0.24.0 + golang.org/x/term v0.19.0 golang.org/x/text v0.14.0 golang.org/x/time v0.5.0 - golang.org/x/tools v0.18.0 + golang.org/x/tools v0.20.0 gomodules.xyz/jsonpatch/v2 v2.4.0 google.golang.org/grpc v1.60.1 gopkg.in/yaml.v3 v3.0.1 @@ -47,15 +48,15 @@ require ( k8s.io/controller-manager v0.30.2 k8s.io/klog/v2 v2.120.1 k8s.io/kube-aggregator v0.30.2 - k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 + k8s.io/kube-openapi v0.0.0-20240430033511-f0e62f92d13f k8s.io/kubectl v0.30.2 k8s.io/metrics v0.30.2 - k8s.io/utils v0.0.0-20231127182322-b307cd553661 + k8s.io/utils v0.0.0-20240423183400-0849a56e8f22 layeh.com/gopher-json v0.0.0-20201124131017-552bb3c4c3bf sigs.k8s.io/cluster-api v1.7.1 sigs.k8s.io/controller-runtime v0.18.4 - sigs.k8s.io/custom-metrics-apiserver v1.29.0 - sigs.k8s.io/kind v0.22.0 + sigs.k8s.io/custom-metrics-apiserver v1.30.0 + sigs.k8s.io/kind v0.24.0 sigs.k8s.io/mcs-api v0.1.0 sigs.k8s.io/metrics-server v0.7.1 sigs.k8s.io/structured-merge-diff/v4 v4.4.1 @@ -64,10 +65,10 @@ require ( require ( github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect - github.com/BurntSushi/toml v1.0.0 // indirect + github.com/BurntSushi/toml v1.4.0 // indirect github.com/MakeNowJust/heredoc v1.0.0 // indirect github.com/NYTimes/gziphandler v1.1.1 // indirect - github.com/alessio/shellescape v1.4.1 // indirect + github.com/alessio/shellescape v1.4.2 // indirect github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/beorn7/perks v1.0.1 // indirect @@ -79,7 +80,7 @@ require ( github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/emicklei/go-restful/v3 v3.12.0 // indirect github.com/evanphx/json-patch v5.7.0+incompatible // indirect github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect github.com/fatih/camelcase v1.0.0 // indirect @@ -90,17 +91,16 @@ require ( github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/zapr v1.3.0 // indirect - github.com/go-openapi/jsonpointer v0.20.2 // indirect github.com/go-openapi/jsonreference v0.20.4 // indirect github.com/go-openapi/swag v0.22.7 // indirect - github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect + github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/btree v1.0.1 // indirect github.com/google/cel-go v0.17.8 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98 // indirect + github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 // indirect github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/gorilla/websocket v1.5.0 // indirect @@ -129,7 +129,7 @@ require ( github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/pelletier/go-toml v1.9.4 // indirect + github.com/pelletier/go-toml v1.9.5 // indirect github.com/pelletier/go-toml/v2 v2.1.0 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pkg/errors v0.9.1 // indirect @@ -170,12 +170,12 @@ require ( go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.26.0 // indirect - golang.org/x/crypto v0.21.0 // indirect + golang.org/x/crypto v0.22.0 // indirect golang.org/x/exp v0.0.0-20231226003508-02704c960a9b // indirect - golang.org/x/mod v0.15.0 // indirect + golang.org/x/mod v0.17.0 // indirect golang.org/x/oauth2 v0.18.0 // indirect - golang.org/x/sync v0.6.0 // indirect - golang.org/x/sys v0.18.0 // indirect + golang.org/x/sync v0.7.0 // indirect + golang.org/x/sys v0.19.0 // indirect google.golang.org/appengine v1.6.8 // indirect google.golang.org/genproto v0.0.0-20231212172506-995d672761c0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20231212172506-995d672761c0 // indirect diff --git a/go.sum b/go.sum index 67d6cb6db3c6..7791a8d41aa1 100644 --- a/go.sum +++ b/go.sum @@ -65,8 +65,8 @@ github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxB github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.0.0 h1:dtDWrepsVPfW9H/4y7dDgFc2MBUSeJhlaDtK13CxFlU= -github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0= +github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= @@ -88,8 +88,8 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alessio/shellescape v1.2.2/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30= -github.com/alessio/shellescape v1.4.1 h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVKJUX0= -github.com/alessio/shellescape v1.4.1/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30= +github.com/alessio/shellescape v1.4.2 h1:MHPfaU+ddJ0/bYWpgIeUnQUqKrlJ1S7BfEYPM4uEoM0= +github.com/alessio/shellescape v1.4.2/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18= @@ -190,8 +190,8 @@ github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+m github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= -github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.12.0 h1:y2DdzBAURM29NFF94q6RaY4vjIH1rtwDapwQtU84iWk= +github.com/emicklei/go-restful/v3 v3.12.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -212,7 +212,6 @@ github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLi github.com/evanphx/json-patch v5.7.0+incompatible h1:vgGkfT/9f8zE6tvSCe74nfpAVDQ2tG6yudJd8LBksgI= github.com/evanphx/json-patch v5.7.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.0.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= -github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM= @@ -307,8 +306,8 @@ github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+ github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gobuffalo/flect v0.2.0/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -402,8 +401,8 @@ github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98 h1:pUa4ghanp6q4IJHwE9RwLgmVFfReJN+KbQ8ExNEUUoQ= -github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= +github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 h1:k7nVchz72niMH6YLQNvHSdIE7iqsQxK1P41mySCvssg= +github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 h1:SJ+NtwL6QaZ21U+IrK7d0gGgpjGGvd2kz+FzTHVzdqI= github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2/go.mod h1:Tv1PlzqC9t8wNnpPdctvtSUOPUUg4SHeE6vR1Ir2hmg= @@ -590,7 +589,6 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= @@ -601,15 +599,15 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/ginkgo/v2 v2.17.1 h1:V++EzdbhI4ZV4ev0UTIj0PzhzOcReJFyJaLjtSF55M8= -github.com/onsi/ginkgo/v2 v2.17.1/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs= +github.com/onsi/ginkgo/v2 v2.17.2 h1:7eMhcy3GimbsA3hEnVKdw/PQM9XN9krpKVXsZdph0/g= +github.com/onsi/ginkgo/v2 v2.17.2/go.mod h1:nP2DPOQoNsQmsVyv5rDA8JkXQoCs6goXIvr/PRJ1eCc= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.32.0 h1:JRYU78fJ1LPxlckP6Txi/EYqJvjtMrDC04/MM5XRHPk= -github.com/onsi/gomega v1.32.0/go.mod h1:a4x4gW6Pz2yK1MAmvluYme5lvYTn61afQ2ETw/8n4Lg= +github.com/onsi/gomega v1.33.1 h1:dsYjIxxSR755MDmKVsaFQTE22ChNBcuuTWgkUDSubOk= +github.com/onsi/gomega v1.33.1/go.mod h1:U4R44UsT+9eLIaYRB2a5qajjtQYn0hauxvRm16AVYg0= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opensearch-project/opensearch-go v1.1.0 h1:eG5sh3843bbU1itPRjA9QXbxcg8LaZ+DjEzQH9aLN3M= @@ -619,8 +617,9 @@ github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144T github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= -github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM= github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= @@ -721,7 +720,6 @@ github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3 github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/cobra v1.3.0/go.mod h1:BrRVncBjOJa/eUcVVm9CE+oC6as8k+VYr4NY7WCi9V4= -github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= @@ -891,8 +889,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211215165025-cf75a172585e/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= -golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= +golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -933,8 +931,8 @@ golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= -golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -992,8 +990,8 @@ golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= -golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= +golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1025,8 +1023,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1116,13 +1114,13 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= +golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= -golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= +golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= +golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1207,8 +1205,8 @@ golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= golang.org/x/tools v0.1.8/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= -golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= +golang.org/x/tools v0.20.0 h1:hz/CVckiOxybQvFw6h7b/q80NTr9IUQb4s1IIzW7KNY= +golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1381,7 +1379,6 @@ gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLks gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= @@ -1477,16 +1474,16 @@ k8s.io/kube-aggregator v0.30.2 h1:0+yk/ED6foCprY8VmkDPUhngjaAPKsNTXB/UrtvbIz0= k8s.io/kube-aggregator v0.30.2/go.mod h1:EhqCfDdxysNWXk1wRL9SEHAdo1DKl6EULQagztkBcXE= k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= -k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= -k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= +k8s.io/kube-openapi v0.0.0-20240430033511-f0e62f92d13f h1:0LQagt0gDpKqvIkAMPaRGcXawNMouPECM1+F9BVxEaM= +k8s.io/kube-openapi v0.0.0-20240430033511-f0e62f92d13f/go.mod h1:S9tOR0FxgyusSNR+MboCuiDpVWkAifZvaYI1Q2ubgro= k8s.io/kubectl v0.30.2 h1:cgKNIvsOiufgcs4yjvgkK0+aPCfa8pUwzXdJtkbhsH8= k8s.io/kubectl v0.30.2/go.mod h1:rz7GHXaxwnigrqob0lJsiA07Df8RE3n1TSaC2CTeuB4= k8s.io/metrics v0.30.2 h1:zj4kIPTCfEbY0RHEogpA7QtlItU7xaO11+Gz1zVDxlc= k8s.io/metrics v0.30.2/go.mod h1:GpoO5XTy/g8CclVLtgA5WTrr2Cy5vCsqr5Xa/0ETWIk= k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200603063816-c1c6865ac451/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20231127182322-b307cd553661 h1:FepOBzJ0GXm8t0su67ln2wAZjbQ6RxQGZDnzuLcrUTI= -k8s.io/utils v0.0.0-20231127182322-b307cd553661/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20240423183400-0849a56e8f22 h1:ao5hUqGhsqdm+bYbjH/pRkCs0unBGe9UyDahzs9zQzQ= +k8s.io/utils v0.0.0-20240423183400-0849a56e8f22/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= layeh.com/gopher-json v0.0.0-20201124131017-552bb3c4c3bf h1:rRz0YsF7VXj9fXRF6yQgFI7DzST+hsI3TeFSGupntu0= layeh.com/gopher-json v0.0.0-20201124131017-552bb3c4c3bf/go.mod h1:ivKkcY8Zxw5ba0jldhZCYYQfGdb2K6u9tbYK1AwMIBc= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= @@ -1501,13 +1498,13 @@ sigs.k8s.io/controller-runtime v0.6.1/go.mod h1:XRYBPdbf5XJu9kpS84VJiZ7h/u1hF3gE sigs.k8s.io/controller-runtime v0.18.4 h1:87+guW1zhvuPLh1PHybKdYFLU0YJp4FhJRmiHvm5BZw= sigs.k8s.io/controller-runtime v0.18.4/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg= sigs.k8s.io/controller-tools v0.3.0/go.mod h1:enhtKGfxZD1GFEoMgP8Fdbu+uKQ/cq1/WGJhdVChfvI= -sigs.k8s.io/custom-metrics-apiserver v1.29.0 h1:uUoUjbPrE6nVBE82bo8siIkUDMsfbaSTBB6jAx/LJ9M= -sigs.k8s.io/custom-metrics-apiserver v1.29.0/go.mod h1:4XXz92s/SEmP3L2nlUu6lMWorxEQXAD39AdL22IQkDA= +sigs.k8s.io/custom-metrics-apiserver v1.30.0 h1:BCgg2QfInoWXvoJgPK8TxrSS9r5wR4NNvr7M+9sUOYo= +sigs.k8s.io/custom-metrics-apiserver v1.30.0/go.mod h1:QXOKIL83M545uITzoZn4OC1C7nr0WhLh70A38pbzUpk= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/kind v0.8.1/go.mod h1:oNKTxUVPYkV9lWzY6CVMNluVq8cBsyq+UgPJdvA3uu4= -sigs.k8s.io/kind v0.22.0 h1:z/+yr/azoOfzsfooqRsPw1wjJlqT/ukXP0ShkHwNlsI= -sigs.k8s.io/kind v0.22.0/go.mod h1:aBlbxg08cauDgZ612shr017/rZwqd7AS563FvpWKPVs= +sigs.k8s.io/kind v0.24.0 h1:g4y4eu0qa+SCeKESLpESgMmVFBebL0BDa6f777OIWrg= +sigs.k8s.io/kind v0.24.0/go.mod h1:t7ueEpzPYJvHA8aeLtI52rtFftNgUYUaCwvxjk7phfw= sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 h1:XX3Ajgzov2RKUdc5jW3t5jwY7Bo7dcRm+tFxT+NfgY0= sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3/go.mod h1:9n16EZKMhXBNSiUC5kSdFQJkdH3zbxS/JoO619G1VAY= sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 h1:W6cLQc5pnqM7vh3b7HvGNfXrJ/xL6BDMS0v1V/HHg5U= @@ -1522,6 +1519,5 @@ sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+s sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/hack/cli-testing-init-with-config.sh b/hack/cli-testing-init-with-config.sh new file mode 100755 index 000000000000..7de0db44fba9 --- /dev/null +++ b/hack/cli-testing-init-with-config.sh @@ -0,0 +1,155 @@ +#!/usr/bin/env bash +# Copyright 2024 The Karmada Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +# This script starts a local karmada control plane with karmadactl and with a certain number of clusters joined. +# This script depends on utils in: ${REPO_ROOT}/hack/util.sh +# 1. used by developer to setup develop environment quickly. +# 2. used by e2e testing to setup test environment automatically. + +REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. +source "${REPO_ROOT}"/hack/util.sh + +# variable define +KUBECONFIG_PATH=${KUBECONFIG_PATH:-"${HOME}/.kube"} +HOST_CLUSTER_NAME=${HOST_CLUSTER_NAME:-"karmada-host"} +MEMBER_CLUSTER_1_NAME=${MEMBER_CLUSTER_1_NAME:-"config-member1"} +MEMBER_CLUSTER_2_NAME=${MEMBER_CLUSTER_2_NAME:-"config-member2"} +CLUSTER_VERSION=${CLUSTER_VERSION:-"${DEFAULT_CLUSTER_VERSION}"} +BUILD_PATH=${BUILD_PATH:-"_output/bin/linux/amd64"} +CONFIG_FILE_PATH=${CONFIG_FILE_PATH:-"/tmp/karmada-config.yaml"} + +# install kind and kubectl +kind_version=v0.24.0 +echo -n "Preparing: 'kind' existence check - " +if util::cmd_exist kind; then + echo "passed" +else + echo "not pass" + util::install_tools "sigs.k8s.io/kind" $kind_version +fi +# get arch name and os name in bootstrap +BS_ARCH=$(go env GOARCH) +BS_OS=$(go env GOOS) +# check arch and os name before installing +util::install_environment_check "${BS_ARCH}" "${BS_OS}" +echo -n "Preparing: 'kubectl' existence check - " +if util::cmd_exist kubectl; then + echo "passed" +else + echo "not pass" + util::install_kubectl "" "${BS_ARCH}" "${BS_OS}" +fi + +# prepare the newest crds +echo "Prepare the newest crds" +cd charts/karmada/ +cp -r _crds crds +tar -zcvf ../../crds.tar.gz crds +cd - + +# make images +export VERSION="latest" +export REGISTRY="docker.io/karmada" +make images GOOS="linux" --directory="${REPO_ROOT}" + +# make karmadactl binary +make karmadactl + +# create host/member1/member2 cluster +echo "Start create clusters..." +hack/create-cluster.sh ${HOST_CLUSTER_NAME} ${KUBECONFIG_PATH}/${HOST_CLUSTER_NAME}.config > /dev/null 2>&1 & +hack/create-cluster.sh ${MEMBER_CLUSTER_1_NAME} ${KUBECONFIG_PATH}/${MEMBER_CLUSTER_1_NAME}.config > /dev/null 2>&1 & +hack/create-cluster.sh ${MEMBER_CLUSTER_2_NAME} ${KUBECONFIG_PATH}/${MEMBER_CLUSTER_2_NAME}.config > /dev/null 2>&1 & + +# wait cluster ready +echo "Wait clusters ready..." +util::wait_file_exist ${KUBECONFIG_PATH}/${HOST_CLUSTER_NAME}.config 300 +util::wait_context_exist ${HOST_CLUSTER_NAME} ${KUBECONFIG_PATH}/${HOST_CLUSTER_NAME}.config 300 +kubectl wait --for=condition=Ready nodes --all --timeout=800s --kubeconfig=${KUBECONFIG_PATH}/${HOST_CLUSTER_NAME}.config +util::wait_nodes_taint_disappear 800 ${KUBECONFIG_PATH}/${HOST_CLUSTER_NAME}.config + +util::wait_file_exist ${KUBECONFIG_PATH}/${MEMBER_CLUSTER_1_NAME}.config 300 +util::wait_context_exist "${MEMBER_CLUSTER_1_NAME}" ${KUBECONFIG_PATH}/${MEMBER_CLUSTER_1_NAME}.config 300 +kubectl wait --for=condition=Ready nodes --all --timeout=800s --kubeconfig=${KUBECONFIG_PATH}/${MEMBER_CLUSTER_1_NAME}.config +util::wait_nodes_taint_disappear 800 ${KUBECONFIG_PATH}/${MEMBER_CLUSTER_1_NAME}.config + +util::wait_file_exist ${KUBECONFIG_PATH}/${MEMBER_CLUSTER_2_NAME}.config 300 +util::wait_context_exist "${MEMBER_CLUSTER_2_NAME}" ${KUBECONFIG_PATH}/${MEMBER_CLUSTER_2_NAME}.config 300 +kubectl wait --for=condition=Ready nodes --all --timeout=800s --kubeconfig=${KUBECONFIG_PATH}/${MEMBER_CLUSTER_2_NAME}.config +util::wait_nodes_taint_disappear 800 ${KUBECONFIG_PATH}/${MEMBER_CLUSTER_2_NAME}.config + +# load components images to kind cluster +kind load docker-image "${REGISTRY}/karmada-controller-manager:${VERSION}" --name="${HOST_CLUSTER_NAME}" +kind load docker-image "${REGISTRY}/karmada-scheduler:${VERSION}" --name="${HOST_CLUSTER_NAME}" +kind load docker-image "${REGISTRY}/karmada-webhook:${VERSION}" --name="${HOST_CLUSTER_NAME}" +kind load docker-image "${REGISTRY}/karmada-aggregated-apiserver:${VERSION}" --name="${HOST_CLUSTER_NAME}" + +# Ensure the parent directory of CONFIG_FILE_PATH exists +CONFIG_DIR=$(dirname "${CONFIG_FILE_PATH}") +if [ ! -d "${CONFIG_DIR}" ]; then + echo "Creating directory ${CONFIG_DIR}..." + mkdir -p "${CONFIG_DIR}" +fi + +# build Karmada init configuration file +CONFIG_TEMPLATE=$(cat < ${CONFIG_FILE_PATH} + +echo "Karmada init config file generated at ${CONFIG_FILE_PATH}" + +# init Karmada control plane +echo "Start init karmada control plane..." +${BUILD_PATH}/karmadactl init --config=${CONFIG_FILE_PATH} + +# join cluster +echo "Join member clusters..." +${BUILD_PATH}/karmadactl --kubeconfig ${HOME}/karmada/karmada-apiserver.config join ${MEMBER_CLUSTER_1_NAME} --cluster-kubeconfig=${KUBECONFIG_PATH}/${MEMBER_CLUSTER_1_NAME}.config +${BUILD_PATH}/karmadactl --kubeconfig ${HOME}/karmada/karmada-apiserver.config join ${MEMBER_CLUSTER_2_NAME} --cluster-kubeconfig=${KUBECONFIG_PATH}/${MEMBER_CLUSTER_2_NAME}.config +kubectl wait --for=condition=Ready clusters --all --timeout=800s --kubeconfig=${HOME}/karmada/karmada-apiserver.config diff --git a/hack/deploy-karmada-agent.sh b/hack/deploy-karmada-agent.sh index deb6cb55fb8c..fdc36f30eed9 100755 --- a/hack/deploy-karmada-agent.sh +++ b/hack/deploy-karmada-agent.sh @@ -83,7 +83,7 @@ kubectl --context="${MEMBER_CLUSTER_NAME}" apply -f "${REPO_ROOT}/artifacts/agen kubectl --context="${MEMBER_CLUSTER_NAME}" apply -f "${REPO_ROOT}/artifacts/agent/clusterrolebinding.yaml" # create secret -kubectl --context="${MEMBER_CLUSTER_NAME}" create secret generic karmada-kubeconfig --from-file=karmada-kubeconfig="${KARMADA_APISERVER_KUBECONFIG}" -n "${KARMADA_SYSTEM_NAMESPACE}" +kubectl --context="${MEMBER_CLUSTER_NAME}" create secret generic karmada-agent-config --from-file=karmada.config="${KARMADA_APISERVER_KUBECONFIG}" -n "${KARMADA_SYSTEM_NAMESPACE}" # extract api endpoint of member cluster MEMBER_CLUSTER=$(kubectl config view -o jsonpath='{.contexts[?(@.name == "'${MEMBER_CLUSTER_NAME}'")].context.cluster}') diff --git a/hack/deploy-karmada-by-operator.sh b/hack/deploy-karmada-by-operator.sh new file mode 100755 index 000000000000..b6858f809345 --- /dev/null +++ b/hack/deploy-karmada-by-operator.sh @@ -0,0 +1,123 @@ +#!/usr/bin/env bash +# Copyright 2024 The Karmada Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +set -o errexit +set -o nounset + +# This script deploy karmada instance to any cluster you want via karmada-operator. +# This script depends on utils in: ${REPO_ROOT}/hack/util.sh + +REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. +source ${REPO_ROOT}/hack/util.sh + +KARMADA_SYSTEM_NAMESPACE="karmada-system" +KARMADA_INSTANCE_NAME=${KARMADA_INSTANCE_NAME:-"karmada-demo"} +KARMADA_INSTANCE_NAMESPACE=${KARMADA_INSTANCE_NAMESPACE:-"test"} + +CERT_DIR=${CERT_DIR:-"${HOME}/.karmada"} +mkdir -p "${CERT_DIR}" &>/dev/null || mkdir -p "${CERT_DIR}" +rm -f "${CERT_DIR}/*" &>/dev/null || rm -f "${CERT_DIR}/*" + +function usage() { + echo "This script deploys karmada instance to a given cluster via karmada-operator." + echo "Note: This script is an internal script and is not intended used by end-users." + echo "Usage: hack/deploy-karmada-by-operator.sh " + echo "Example: hack/deploy-karmada-by-operator.sh ~/.kube/members.config member1 karmada-apiserver v1.11.0 true https://github.com/karmada-io/karmada/releases/download/v1.11.0/crds.tar.gz" + echo -e "Parameters:\n\tKUBECONFIG\t\tYour cluster's kubeconfig that you want to install to" + echo -e "\tCONTEXT_NAME\t\tThe name of context in 'kubeconfig'" + echo -e "\tKARMADA_CONTEXT_NAME\t\tThe context name of karmada instance, and different Karmada instances must have unique contexts to avoid being overwritten.'" + echo -e "\tKARMADA_IMAGE_TAG\t\tThe tag of image'" + echo -e "\tADDON_NEEDED\t\tWhether you need to install addons(KarmadaSearch&karmadaDescheduler), optional, defaults to false." + echo -e "\tCRD_DOWNLOAD_URL\t\tThe download url for CRDs, optional.'" +} + +if [[ $# -le 4 ]]; then + usage + exit 1 +fi + +# check config file existence +HOST_CLUSTER_KUBECONFIG=$1 +if [[ ! -f "${HOST_CLUSTER_KUBECONFIG}" ]]; then + echo -e "ERROR: failed to get kubernetes config file: '${HOST_CLUSTER_KUBECONFIG}', not existed.\n" + usage + exit 1 +fi + +# check context existence +CONTEXT_NAME=$2 +if ! kubectl --kubeconfig="${HOST_CLUSTER_KUBECONFIG}" config get-contexts "${CONTEXT_NAME}" > /dev/null 2>&1; +then + echo -e "ERROR: failed to get context: '${CONTEXT_NAME}' not in ${HOST_CLUSTER_KUBECONFIG}. \n" + usage + exit 1 +fi + +# check for duplicate karmada context name. +KARMADA_CONTEXT_NAME=$3 +if kubectl --kubeconfig="${HOST_CLUSTER_KUBECONFIG}" config get-contexts "${KARMADA_CONTEXT_NAME}" > /dev/null 2>&1; +then + echo -e "ERROR: context: '${KARMADA_CONTEXT_NAME}' already exists in ${HOST_CLUSTER_KUBECONFIG}. \n" + usage + exit 1 +fi + +TEMP_PATH_BOOTSTRAP=$(mktemp -d) +trap '{ rm -rf ${TEMP_PATH_BOOTSTRAP}; }' EXIT +cp -rf "${REPO_ROOT}"/operator/config/samples/karmada-sample.yaml "${TEMP_PATH_BOOTSTRAP}"/karmada-sample-tmp.yaml + +if kubectl --kubeconfig="${HOST_CLUSTER_KUBECONFIG}" --context="${CONTEXT_NAME}" get namespace ${KARMADA_INSTANCE_NAMESPACE} > /dev/null 2>&1; then + echo "Namespace '${KARMADA_INSTANCE_NAMESPACE}' already exists." +else + echo "Namespace '${KARMADA_INSTANCE_NAMESPACE}' does not exist. Creating now..." + kubectl --kubeconfig="${HOST_CLUSTER_KUBECONFIG}" --context="${CONTEXT_NAME}" create ns ${KARMADA_INSTANCE_NAMESPACE} +fi + +# modify `karmada-sample.yaml` based on custom configuration. +ADDON_NEEDED=${5:-false} +# if choosing install addons, append karmadaSearch and karmadaDescheduler to 'karmada-sample.yaml' +if [ ${ADDON_NEEDED} ]; then + echo -e ' karmadaDescheduler:\n imageRepository: docker.io/karmada/karmada-descheduler\n imageTag: {{image_tag}}\n replicas: 1' >> "${TEMP_PATH_BOOTSTRAP}"/karmada-sample-tmp.yaml + echo -e ' karmadaSearch:\n imageRepository: docker.io/karmada/karmada-search\n imageTag: {{image_tag}}\n replicas: 1' >> "${TEMP_PATH_BOOTSTRAP}"/karmada-sample-tmp.yaml +fi + +IMAGE_TAG=$4 +sed -i'' -e "s/{{image_tag}}/${IMAGE_TAG}/g" "${TEMP_PATH_BOOTSTRAP}"/karmada-sample-tmp.yaml +sed -i'' -e "s/{{karmada_instance_name}}/${KARMADA_INSTANCE_NAME}/g" "${TEMP_PATH_BOOTSTRAP}"/karmada-sample-tmp.yaml +sed -i'' -e "s/{{karmada_instance_namespace}}/${KARMADA_INSTANCE_NAMESPACE}/g" "${TEMP_PATH_BOOTSTRAP}"/karmada-sample-tmp.yaml + +CRD_DOWNLOAD_URL=${6:-""} +if [[ -z ${CRD_DOWNLOAD_URL} ]]; then + sed -i'' -e "s/{{crd_tarball}}/""/g" "${TEMP_PATH_BOOTSTRAP}"/karmada-sample-tmp.yaml +else + CRD_TAR_BALL="\n httpSource:\n url: ${CRD_DOWNLOAD_URL}" + awk -v pattern="{{crd_tarball}}" -v replacement="${CRD_TAR_BALL}" '{ gsub(pattern, replacement); print }' "${TEMP_PATH_BOOTSTRAP}"/karmada-sample-tmp.yaml > "${TEMP_PATH_BOOTSTRAP}"/temp && mv "${TEMP_PATH_BOOTSTRAP}"/temp "${TEMP_PATH_BOOTSTRAP}"/karmada-sample-tmp.yaml +fi + +# create and wait for karmada instance to be ready +kubectl --kubeconfig="${HOST_CLUSTER_KUBECONFIG}" --context="${CONTEXT_NAME}" apply -f "${TEMP_PATH_BOOTSTRAP}"/karmada-sample-tmp.yaml +kubectl --kubeconfig="${HOST_CLUSTER_KUBECONFIG}" --context="${CONTEXT_NAME}" wait --for=condition=Ready --timeout=1000s karmada ${KARMADA_INSTANCE_NAME} -n ${KARMADA_INSTANCE_NAMESPACE} + +# generate kubeconfig for karmada instance +kubectl --kubeconfig="${HOST_CLUSTER_KUBECONFIG}" --context="${CONTEXT_NAME}" get secret -n ${KARMADA_INSTANCE_NAMESPACE} ${KARMADA_INSTANCE_NAME}-admin-config -o jsonpath={.data.kubeconfig} | base64 -d > ~/.kube/${KARMADA_INSTANCE_NAME}-${KARMADA_INSTANCE_NAMESPACE}-tmp-apiserver.config +cat ~/.kube/${KARMADA_INSTANCE_NAME}-${KARMADA_INSTANCE_NAMESPACE}-tmp-apiserver.config| grep "certificate-authority-data"| awk '{print $2}'| base64 -d > ${CERT_DIR}/ca.crt +cat ~/.kube/${KARMADA_INSTANCE_NAME}-${KARMADA_INSTANCE_NAMESPACE}-tmp-apiserver.config| grep "client-certificate-data"| awk '{print $2}'| base64 -d > ${CERT_DIR}/karmada.crt +cat ~/.kube/${KARMADA_INSTANCE_NAME}-${KARMADA_INSTANCE_NAMESPACE}-tmp-apiserver.config| grep "client-key-data"| awk '{print $2}'| base64 -d > ${CERT_DIR}/karmada.key +KARMADA_APISERVER=$(cat ~/.kube/${KARMADA_INSTANCE_NAME}-${KARMADA_INSTANCE_NAMESPACE}-tmp-apiserver.config| grep "server:"| awk '{print $2}') + +# write karmada api server config to kubeconfig file +util::append_client_kubeconfig "${HOST_CLUSTER_KUBECONFIG}" "${CERT_DIR}/ca.crt" "${CERT_DIR}/karmada.crt" "${CERT_DIR}/karmada.key" "${KARMADA_APISERVER}" ${KARMADA_CONTEXT_NAME} +rm ~/.kube/${KARMADA_INSTANCE_NAME}-${KARMADA_INSTANCE_NAMESPACE}-tmp-apiserver.config diff --git a/hack/deploy-karmada-operator.sh b/hack/deploy-karmada-operator.sh new file mode 100755 index 000000000000..d49c149744d4 --- /dev/null +++ b/hack/deploy-karmada-operator.sh @@ -0,0 +1,73 @@ +#!/usr/bin/env bash +# Copyright 2024 The Karmada Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +set -o errexit +set -o nounset + +REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. +source ${REPO_ROOT}/hack/util.sh +KARMADA_SYSTEM_NAMESPACE="karmada-system" + +function usage() { + echo "This script will deploy karmada-operator on the specified cluster" + echo "Usage: hack/deploy-karmada-operator.sh " + echo "Example: hack/deploy-karmada-operator.sh ~/.kube/config karmada-host" +} + +if [[ $# -ne 2 ]]; then + usage + exit 1 +fi + +# check kube config file existence +if [[ ! -f "${1}" ]]; then + echo -e "ERROR: failed to get kubernetes config file: '${1}', not existed.\n" + usage + exit 1 +fi +KUBECONFIG=$1 + +# check context existence +if ! kubectl config get-contexts "${2}" --kubeconfig="${KUBECONFIG}" > /dev/null 2>&1; +then + echo -e "ERROR: failed to get context: '${2}' not in ${KUBECONFIG}. \n" + usage + exit 1 +fi +CONTEXT_NAME=$2 + +# make images +export VERSION="latest" +export REGISTRY="docker.io/karmada" +make image-karmada-operator GOOS="linux" --directory=. + +# load the karmada-operator images +kind load docker-image "${REGISTRY}/karmada-controller-manager:${VERSION}" --name="${CONTEXT_NAME}" + +# create namespace `karmada-system` +kubectl --kubeconfig="${KUBECONFIG}" --context="${CONTEXT_NAME}" apply -f "${REPO_ROOT}/artifacts/deploy/namespace.yaml" + +# install Karmada operator crds +kubectl --kubeconfig="${KUBECONFIG}" --context="${CONTEXT_NAME}" apply -f operator/config/crds/ + +# deploy karmada-operator +kubectl --kubeconfig="${KUBECONFIG}" --context="${CONTEXT_NAME}" apply -f "${REPO_ROOT}/operator/config/deploy/karmada-operator-clusterrole.yaml" +kubectl --kubeconfig="${KUBECONFIG}" --context="${CONTEXT_NAME}" apply -f "${REPO_ROOT}/operator/config/deploy/karmada-operator-clusterrolebinding.yaml" +kubectl --kubeconfig="${KUBECONFIG}" --context="${CONTEXT_NAME}" apply -f "${REPO_ROOT}/operator/config/deploy/karmada-operator-serviceaccount.yaml" +kubectl --kubeconfig="${KUBECONFIG}" --context="${CONTEXT_NAME}" apply -f "${REPO_ROOT}/operator/config/deploy/karmada-operator-deployment.yaml" + +# wait karmada-operator ready +kubectl --kubeconfig="${KUBECONFIG}" --context="${CONTEXT_NAME}" wait --for=condition=Ready --timeout=30s pods -l karmada-app=karmada-operator -n ${KARMADA_SYSTEM_NAMESPACE} diff --git a/hack/deploy-karmada.sh b/hack/deploy-karmada.sh index 9aa39949f4aa..cd411cb917e9 100755 --- a/hack/deploy-karmada.sh +++ b/hack/deploy-karmada.sh @@ -86,7 +86,7 @@ fi HOST_CLUSTER_TYPE=${3:-"local"} # the default of host cluster type is local, i.e. cluster created by kind. # generate a secret to store the certificates -function generate_cert_secret { +function generate_cert_related_secrets { local karmada_ca local karmada_ca_key karmada_ca=$(base64 < "${ROOT_CA_FILE}" | tr -d '\r\n') @@ -94,41 +94,69 @@ function generate_cert_secret { local TEMP_PATH TEMP_PATH=$(mktemp -d) + echo ${TEMP_PATH} + + # 1. generate secret with secret cert + generate_cert_secret karmada-apiserver ${karmada_ca} ${SERVER_CRT} ${SERVER_KEY} + generate_cert_secret karmada-aggregated-apiserver ${karmada_ca} ${SERVER_CRT} ${SERVER_KEY} + generate_cert_secret karmada-metrics-adapter ${karmada_ca} ${SERVER_CRT} ${SERVER_KEY} + generate_cert_secret karmada-search ${karmada_ca} ${SERVER_CRT} ${SERVER_KEY} + generate_cert_secret karmada-webhook ${karmada_ca} ${SERVER_CRT} ${SERVER_KEY} + generate_cert_secret karmada-interpreter-webhook-example ${karmada_ca} ${SERVER_CRT} ${SERVER_KEY} + generate_cert_secret etcd ${karmada_ca} ${ETCD_SERVER_CRT} ${ETCD_SERVER_KEY} + + # 2. generate secret with client cert + generate_cert_secret karmada-apiserver-etcd-client ${karmada_ca} ${ETCD_CLIENT_CRT} ${ETCD_CLIENT_KEY} + generate_cert_secret karmada-apiserver-front-proxy-client ${karmada_ca} ${FRONT_PROXY_CLIENT_CRT} ${FRONT_PROXY_CLIENT_KEY} + generate_cert_secret karmada-aggregated-apiserver-etcd-client ${karmada_ca} ${ETCD_CLIENT_CRT} ${ETCD_CLIENT_KEY} + generate_cert_secret karmada-search-etcd-client ${karmada_ca} ${ETCD_CLIENT_CRT} ${ETCD_CLIENT_KEY} + generate_cert_secret etcd-etcd-client ${karmada_ca} ${ETCD_CLIENT_CRT} ${ETCD_CLIENT_KEY} + generate_cert_secret karmada-scheduler-scheduler-estimator-client ${karmada_ca} ${CLIENT_CRT} ${CLIENT_KEY} + generate_cert_secret karmada-descheduler-scheduler-estimator-client ${karmada_ca} ${CLIENT_CRT} ${CLIENT_KEY} + + # 3. generate secret with ca cert or sa key + generate_ca_cert_secret kube-controller-manager ${karmada_ca} ${karmada_ca_key} + generate_key_pair_secret kube-controller-manager ${SA_PUB} ${SA_KEY} + generate_key_pair_secret karmada-apiserver ${SA_PUB} ${SA_KEY} + + # 4. generate secret with karmada config + components=(karmada-aggregated-apiserver karmada-controller-manager kube-controller-manager karmada-scheduler karmada-descheduler karmada-metrics-adapter karmada-search karmada-webhook karmada-interpreter-webhook-example) + for component in "${components[@]}" + do + generate_config_secret ${component} ${karmada_ca} ${CLIENT_CRT} ${CLIENT_KEY} + done - cp -rf "${REPO_ROOT}"/artifacts/deploy/karmada-cert-secret.yaml "${TEMP_PATH}"/karmada-cert-secret-tmp.yaml - cp -rf "${REPO_ROOT}"/artifacts/deploy/secret.yaml "${TEMP_PATH}"/secret-tmp.yaml - cp -rf "${REPO_ROOT}"/artifacts/deploy/karmada-webhook-cert-secret.yaml "${TEMP_PATH}"/karmada-webhook-cert-secret-tmp.yaml - - sed -i'' -e "s/{{ca_crt}}/${karmada_ca}/g" "${TEMP_PATH}"/karmada-cert-secret-tmp.yaml - sed -i'' -e "s/{{ca_key}}/${karmada_ca_key}/g" "${TEMP_PATH}"/karmada-cert-secret-tmp.yaml - sed -i'' -e "s/{{client_crt}}/${KARMADA_CRT}/g" "${TEMP_PATH}"/karmada-cert-secret-tmp.yaml - sed -i'' -e "s/{{client_key}}/${KARMADA_KEY}/g" "${TEMP_PATH}"/karmada-cert-secret-tmp.yaml - sed -i'' -e "s/{{apiserver_crt}}/${KARMADA_APISERVER_CRT}/g" "${TEMP_PATH}"/karmada-cert-secret-tmp.yaml - sed -i'' -e "s/{{apiserver_key}}/${KARMADA_APISERVER_KEY}/g" "${TEMP_PATH}"/karmada-cert-secret-tmp.yaml - - sed -i'' -e "s/{{front_proxy_ca_crt}}/${FRONT_PROXY_CA_CRT}/g" "${TEMP_PATH}"/karmada-cert-secret-tmp.yaml - sed -i'' -e "s/{{front_proxy_client_crt}}/${FRONT_PROXY_CLIENT_CRT}/g" "${TEMP_PATH}"/karmada-cert-secret-tmp.yaml - sed -i'' -e "s/{{front_proxy_client_key}}/${FRONT_PROXY_CLIENT_KEY}/g" "${TEMP_PATH}"/karmada-cert-secret-tmp.yaml - - sed -i'' -e "s/{{etcd_ca_crt}}/${ETCD_CA_CRT}/g" "${TEMP_PATH}"/karmada-cert-secret-tmp.yaml - sed -i'' -e "s/{{etcd_server_crt}}/${ETCD_SERVER_CRT}/g" "${TEMP_PATH}"/karmada-cert-secret-tmp.yaml - sed -i'' -e "s/{{etcd_server_key}}/${ETCD_SERVER_KEY}/g" "${TEMP_PATH}"/karmada-cert-secret-tmp.yaml - sed -i'' -e "s/{{etcd_client_crt}}/${ETCD_CLIENT_CRT}/g" "${TEMP_PATH}"/karmada-cert-secret-tmp.yaml - sed -i'' -e "s/{{etcd_client_key}}/${ETCD_CLIENT_KEY}/g" "${TEMP_PATH}"/karmada-cert-secret-tmp.yaml - - sed -i'' -e "s/{{ca_crt}}/${karmada_ca}/g" "${TEMP_PATH}"/secret-tmp.yaml - sed -i'' -e "s/{{client_crt}}/${KARMADA_CRT}/g" "${TEMP_PATH}"/secret-tmp.yaml - sed -i'' -e "s/{{client_key}}/${KARMADA_KEY}/g" "${TEMP_PATH}"/secret-tmp.yaml - - sed -i'' -e "s/{{server_key}}/${KARMADA_KEY}/g" "${TEMP_PATH}"/karmada-webhook-cert-secret-tmp.yaml - sed -i'' -e "s/{{server_certificate}}/${KARMADA_CRT}/g" "${TEMP_PATH}"/karmada-webhook-cert-secret-tmp.yaml - - kubectl --context="${HOST_CLUSTER_NAME}" apply -f "${TEMP_PATH}"/karmada-cert-secret-tmp.yaml - kubectl --context="${HOST_CLUSTER_NAME}" apply -f "${TEMP_PATH}"/secret-tmp.yaml - kubectl --context="${HOST_CLUSTER_NAME}" apply -f "${TEMP_PATH}"/karmada-webhook-cert-secret-tmp.yaml rm -rf "${TEMP_PATH}" } +function generate_config_secret() { + export component=$1 ca_crt=$2 client_crt=$3 client_key=$4 + envsubst < "${REPO_ROOT}"/artifacts/deploy/karmada-config-secret.yaml > "${TEMP_PATH}"/${component}-config-secret.yaml + kubectl --context="${HOST_CLUSTER_NAME}" apply -f "${TEMP_PATH}"/${component}-config-secret.yaml + unset component ca_crt client_crt client_key +} + +function generate_cert_secret() { + export name=$1 ca_crt=$2 tls_crt=$3 tls_key=$4 + envsubst < "${REPO_ROOT}"/artifacts/deploy/karmada-cert-secret.yaml > "${TEMP_PATH}"/${name}-cert-secret.yaml + kubectl --context="${HOST_CLUSTER_NAME}" apply -f "${TEMP_PATH}"/${name}-cert-secret.yaml + unset name ca_crt tls_crt tls_key +} + +function generate_ca_cert_secret() { + export component=$1 ca_crt=$2 ca_key=$3 + envsubst < "${REPO_ROOT}"/artifacts/deploy/karmada-ca-cert-secret.yaml > "${TEMP_PATH}"/${component}-ca-cert-secret.yaml + kubectl --context="${HOST_CLUSTER_NAME}" apply -f "${TEMP_PATH}"/${component}-ca-cert-secret.yaml + unset component ca_crt ca_key +} + +function generate_key_pair_secret() { + export component=$1 sa_pub=$2 sa_key=$3 + envsubst < "${REPO_ROOT}"/artifacts/deploy/karmada-key-pair-secret.yaml > "${TEMP_PATH}"/${component}-key-pair-secret.yaml + kubectl --context="${HOST_CLUSTER_NAME}" apply -f "${TEMP_PATH}"/${component}-key-pair-secret.yaml + unset component sa_pub sa_key +} + # install Karmada's APIs function installCRDs() { local context_name=$1 @@ -149,31 +177,31 @@ util::cmd_must_exist "openssl" util::cmd_must_exist_cfssl ${CFSSL_VERSION} # create CA signers util::create_signing_certkey "" "${CERT_DIR}" ca karmada '"client auth","server auth"' -util::create_signing_certkey "" "${CERT_DIR}" front-proxy-ca front-proxy-ca '"client auth","server auth"' -util::create_signing_certkey "" "${CERT_DIR}" etcd-ca etcd-ca '"client auth","server auth"' # signs a certificate -util::create_certkey "" "${CERT_DIR}" "ca" karmada system:admin "system:masters" kubernetes.default.svc "*.etcd.karmada-system.svc.cluster.local" "*.karmada-system.svc.cluster.local" "*.karmada-system.svc" "localhost" "127.0.0.1" "${interpreter_webhook_example_service_external_ip_address}" -util::create_certkey "" "${CERT_DIR}" "ca" apiserver karmada-apiserver "" "*.etcd.karmada-system.svc.cluster.local" "*.karmada-system.svc.cluster.local" "*.karmada-system.svc" "localhost" "127.0.0.1" $(util::get_apiserver_ip_from_kubeconfig "${HOST_CLUSTER_NAME}") -util::create_certkey "" "${CERT_DIR}" "front-proxy-ca" front-proxy-client front-proxy-client "" kubernetes.default.svc "*.etcd.karmada-system.svc.cluster.local" "*.karmada-system.svc.cluster.local" "*.karmada-system.svc" "localhost" "127.0.0.1" -util::create_certkey "" "${CERT_DIR}" "etcd-ca" etcd-server etcd-server "" kubernetes.default.svc "*.etcd.karmada-system.svc.cluster.local" "*.karmada-system.svc.cluster.local" "*.karmada-system.svc" "localhost" "127.0.0.1" -util::create_certkey "" "${CERT_DIR}" "etcd-ca" etcd-client etcd-client "" "*.etcd.karmada-system.svc.cluster.local" "*.karmada-system.svc.cluster.local" "*.karmada-system.svc" "localhost" "127.0.0.1" +karmadaAltNames=("*.karmada-system.svc.cluster.local" "*.karmada-system.svc" "localhost" "127.0.0.1" $(util::get_apiserver_ip_from_kubeconfig "${HOST_CLUSTER_NAME}") "${interpreter_webhook_example_service_external_ip_address}") +util::create_certkey "" "${CERT_DIR}" "ca" server server "" "${karmadaAltNames[@]}" +util::create_certkey "" "${CERT_DIR}" "ca" client system:admin system:masters "${karmadaAltNames[@]}" +util::create_certkey "" "${CERT_DIR}" "ca" front-proxy-client front-proxy-client "" "${karmadaAltNames[@]}" +util::create_certkey "" "${CERT_DIR}" "ca" etcd-server etcd-server "" "${karmadaAltNames[@]}" +util::create_certkey "" "${CERT_DIR}" "ca" etcd-client etcd-client "" "${karmadaAltNames[@]}" +util::create_key_pair "" "${CERT_DIR}" "sa" # create namespace for control plane components kubectl --context="${HOST_CLUSTER_NAME}" apply -f "${REPO_ROOT}/artifacts/deploy/namespace.yaml" -KARMADA_CRT=$(base64 < "${CERT_DIR}/karmada.crt" | tr -d '\r\n') -KARMADA_KEY=$(base64 < "${CERT_DIR}/karmada.key" | tr -d '\r\n') -KARMADA_APISERVER_CRT=$(base64 < "${CERT_DIR}/apiserver.crt" | tr -d '\r\n') -KARMADA_APISERVER_KEY=$(base64 < "${CERT_DIR}/apiserver.key" | tr -d '\r\n') -FRONT_PROXY_CA_CRT=$(base64 < "${CERT_DIR}/front-proxy-ca.crt" | tr -d '\r\n') +SERVER_CRT=$(base64 < "${CERT_DIR}/server.crt" | tr -d '\r\n') +SERVER_KEY=$(base64 < "${CERT_DIR}/server.key" | tr -d '\r\n') +CLIENT_CRT=$(base64 < "${CERT_DIR}/client.crt" | tr -d '\r\n') +CLIENT_KEY=$(base64 < "${CERT_DIR}/client.key" | tr -d '\r\n') FRONT_PROXY_CLIENT_CRT=$(base64 < "${CERT_DIR}/front-proxy-client.crt" | tr -d '\r\n') FRONT_PROXY_CLIENT_KEY=$(base64 < "${CERT_DIR}/front-proxy-client.key" | tr -d '\r\n') -ETCD_CA_CRT=$(base64 < "${CERT_DIR}/etcd-ca.crt" | tr -d '\r\n') ETCD_SERVER_CRT=$(base64 < "${CERT_DIR}/etcd-server.crt" | tr -d '\r\n') ETCD_SERVER_KEY=$(base64 < "${CERT_DIR}/etcd-server.key" | tr -d '\r\n') ETCD_CLIENT_CRT=$(base64 < "${CERT_DIR}/etcd-client.crt" | tr -d '\r\n') ETCD_CLIENT_KEY=$(base64 < "${CERT_DIR}/etcd-client.key" | tr -d '\r\n') -generate_cert_secret +SA_PUB=$(base64 < "${CERT_DIR}/sa.pub" | tr -d '\r\n') +SA_KEY=$(base64 < "${CERT_DIR}/sa.key" | tr -d '\r\n') +generate_cert_related_secrets # deploy karmada etcd kubectl --context="${HOST_CLUSTER_NAME}" apply -f "${REPO_ROOT}/artifacts/deploy/karmada-etcd.yaml" @@ -200,7 +228,7 @@ fi # deploy karmada apiserver TEMP_PATH_APISERVER=$(mktemp -d) trap '{ rm -rf ${TEMP_PATH_APISERVER}; }' EXIT -KARMADA_APISERVER_VERSION=${KARMADA_APISERVER_VERSION:-"v1.29.6"} +KARMADA_APISERVER_VERSION=${KARMADA_APISERVER_VERSION:-"v1.30.4"} cp "${REPO_ROOT}"/artifacts/deploy/karmada-apiserver.yaml "${TEMP_PATH_APISERVER}"/karmada-apiserver.yaml sed -i'' -e "s/{{service_type}}/${KARMADA_APISERVER_SERVICE_TYPE}/g" "${TEMP_PATH_APISERVER}"/karmada-apiserver.yaml sed -i'' -e "s/{{karmada_apiserver_version}}/${KARMADA_APISERVER_VERSION}/g" "${TEMP_PATH_APISERVER}"/karmada-apiserver.yaml @@ -237,7 +265,7 @@ else fi # write karmada api server config to kubeconfig file -util::append_client_kubeconfig "${HOST_CLUSTER_KUBECONFIG}" "${CERT_DIR}/karmada.crt" "${CERT_DIR}/karmada.key" "${KARMADA_APISERVER_IP}" "${KARMADA_APISERVER_SECURE_PORT}" karmada-apiserver +util::append_client_kubeconfig "${HOST_CLUSTER_KUBECONFIG}" "${ROOT_CA_FILE}" "${CERT_DIR}/client.crt" "${CERT_DIR}/client.key" "https://${KARMADA_APISERVER_IP}:${KARMADA_APISERVER_SECURE_PORT}" karmada-apiserver # deploy kube controller manager cp "${REPO_ROOT}"/artifacts/deploy/kube-controller-manager.yaml "${TEMP_PATH_APISERVER}"/kube-controller-manager.yaml diff --git a/hack/local-up-karmada-by-operator.sh b/hack/local-up-karmada-by-operator.sh new file mode 100755 index 000000000000..e2fc56cafefc --- /dev/null +++ b/hack/local-up-karmada-by-operator.sh @@ -0,0 +1,223 @@ +#!/usr/bin/env bash +# Copyright 2024 The Karmada Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +# This script is used in workflow to validate karmada installation by operator. +# It starts a local karmada control plane based on current codebase via karmada-operator and with a certain number of clusters joined. +# This script depends on utils in: ${REPO_ROOT}/hack/util.sh +# 1. used by developer to setup develop environment quickly. +# 2. used by e2e testing to test if the operator installs correctly. + +function usage() { + echo "Usage:" + echo " hack/local-up-karmada-by-operator.sh [-h]" + echo " h: print help information" +} + +function getCrdsDir() { + local path=$1 + local url=$2 + local key=$(echo "$url" | xargs) # Trim whitespace using xargs + local hash=$(echo -n "$key" | sha256sum | awk '{print $1}') # Calculate SHA256 hash + local hashedKey=${hash:0:64} # Take the first 64 characters of the hash + echo "${path}/cache/${hashedKey}" +} + +while getopts 'h' OPT; do + case $OPT in + h) + usage + exit 0 + ;; + ?) + usage + exit 1 + ;; + esac +done + +REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. +source "${REPO_ROOT}"/hack/util.sh + +# variable define +KUBECONFIG_PATH=${KUBECONFIG_PATH:-"${HOME}/.kube"} +MAIN_KUBECONFIG=${MAIN_KUBECONFIG:-"${KUBECONFIG_PATH}/karmada.config"} +HOST_CLUSTER_NAME=${HOST_CLUSTER_NAME:-"karmada-host"} +KARMADA_APISERVER_CLUSTER_NAME=${KARMADA_APISERVER_CLUSTER_NAME:-"karmada-apiserver"} +MEMBER_CLUSTER_KUBECONFIG=${MEMBER_CLUSTER_KUBECONFIG:-"${KUBECONFIG_PATH}/members.config"} +MEMBER_CLUSTER_1_NAME=${MEMBER_CLUSTER_1_NAME:-"member1"} +MEMBER_CLUSTER_2_NAME=${MEMBER_CLUSTER_2_NAME:-"member2"} +PULL_MODE_CLUSTER_NAME=${PULL_MODE_CLUSTER_NAME:-"member3"} +MEMBER_TMP_CONFIG_PREFIX="member-tmp" +MEMBER_CLUSTER_1_TMP_CONFIG="${KUBECONFIG_PATH}/${MEMBER_TMP_CONFIG_PREFIX}-${MEMBER_CLUSTER_1_NAME}.config" +MEMBER_CLUSTER_2_TMP_CONFIG="${KUBECONFIG_PATH}/${MEMBER_TMP_CONFIG_PREFIX}-${MEMBER_CLUSTER_2_NAME}.config" +PULL_MODE_CLUSTER_TMP_CONFIG="${KUBECONFIG_PATH}/${MEMBER_TMP_CONFIG_PREFIX}-${PULL_MODE_CLUSTER_NAME}.config" +CLUSTER_VERSION=${CLUSTER_VERSION:-"${DEFAULT_CLUSTER_VERSION}"} +KIND_LOG_FILE=${KIND_LOG_FILE:-"/tmp/karmada"} +KARMADA_SYSTEM_NAMESPACE="karmada-system" +KARMADA_INSTANCE_NAME=${KARMADA_INSTANCE_NAME:-"karmada-demo"} +KARMADA_INSTANCE_NAMESPACE=${KARMADA_INSTANCE_NAMESPACE:-"test"} + +#step0: prepare +# proxy setting in China mainland +if [[ -n ${CHINA_MAINLAND:-} ]]; then + util::set_mirror_registry_for_china_mainland ${REPO_ROOT} +fi + +# make sure go exists and the go version is a viable version. +util::cmd_must_exist "go" +util::verify_go_version + +# make sure docker exists +util::cmd_must_exist "docker" + +# install kind and kubectl +kind_version=v0.24.0 +echo -n "Preparing: 'kind' existence check - " +if util::cmd_exist kind; then + echo "passed" +else + echo "not pass" + util::install_tools "sigs.k8s.io/kind" $kind_version +fi + +# get arch name and os name in bootstrap +BS_ARCH=$(go env GOARCH) +BS_OS=$(go env GOOS) +# check arch and os name before installing +util::install_environment_check "${BS_ARCH}" "${BS_OS}" +echo -n "Preparing: 'kubectl' existence check - " +if util::cmd_exist kubectl; then + echo "passed" +else + echo "not pass" + util::install_kubectl "" "${BS_ARCH}" "${BS_OS}" +fi + +#step1. create host cluster and member clusters in parallel +#prepare for kindClusterConfig +util::delete_necessary_resources "${MAIN_KUBECONFIG},${MEMBER_CLUSTER_KUBECONFIG}" "${HOST_CLUSTER_NAME},${MEMBER_CLUSTER_1_NAME},${MEMBER_CLUSTER_2_NAME},${PULL_MODE_CLUSTER_NAME}" "${KIND_LOG_FILE}" + +util::create_cluster "${HOST_CLUSTER_NAME}" "${MAIN_KUBECONFIG}" "${CLUSTER_VERSION}" "${KIND_LOG_FILE}" +util::create_cluster "${MEMBER_CLUSTER_1_NAME}" "${MEMBER_CLUSTER_1_TMP_CONFIG}" "${CLUSTER_VERSION}" "${KIND_LOG_FILE}" "${REPO_ROOT}"/artifacts/kindClusterConfig/member1.yaml +util::create_cluster "${MEMBER_CLUSTER_2_NAME}" "${MEMBER_CLUSTER_2_TMP_CONFIG}" "${CLUSTER_VERSION}" "${KIND_LOG_FILE}" "${REPO_ROOT}"/artifacts/kindClusterConfig/member2.yaml +util::create_cluster "${PULL_MODE_CLUSTER_NAME}" "${PULL_MODE_CLUSTER_TMP_CONFIG}" "${CLUSTER_VERSION}" "${KIND_LOG_FILE}" "${REPO_ROOT}"/artifacts/kindClusterConfig/member3.yaml + +#step2. make images and get karmadactl +export VERSION="latest" +export REGISTRY="docker.io/karmada" +export KARMADA_IMAGE_LABEL_VALUE="May_be_pruned_in_local-up-karmada_by_operator.sh" +export DOCKER_BUILD_ARGS="${DOCKER_BUILD_ARGS:-} --label=image.karmada.io=${KARMADA_IMAGE_LABEL_VALUE}" +make images GOOS="linux" --directory="${REPO_ROOT}" +#clean up dangling images +docker image prune --force --filter "label=image.karmada.io=${KARMADA_IMAGE_LABEL_VALUE}" + +GO111MODULE=on go install "github.com/karmada-io/karmada/cmd/karmadactl" +GOPATH=$(go env GOPATH | awk -F ':' '{print $1}') +KARMADACTL_BIN="${GOPATH}/bin/karmadactl" + +#step3. wait until the host cluster ready +echo "Waiting for the host clusters to be ready..." +util::check_clusters_ready "${MAIN_KUBECONFIG}" "${HOST_CLUSTER_NAME}" + +# load components images to host cluster +kind load docker-image "${REGISTRY}/karmada-controller-manager:${VERSION}" --name="${HOST_CLUSTER_NAME}" +kind load docker-image "${REGISTRY}/karmada-scheduler:${VERSION}" --name="${HOST_CLUSTER_NAME}" +kind load docker-image "${REGISTRY}/karmada-descheduler:${VERSION}" --name="${HOST_CLUSTER_NAME}" +kind load docker-image "${REGISTRY}/karmada-webhook:${VERSION}" --name="${HOST_CLUSTER_NAME}" +kind load docker-image "${REGISTRY}/karmada-aggregated-apiserver:${VERSION}" --name="${HOST_CLUSTER_NAME}" +kind load docker-image "${REGISTRY}/karmada-search:${VERSION}" --name="${HOST_CLUSTER_NAME}" +kind load docker-image "${REGISTRY}/karmada-metrics-adapter:${VERSION}" --name="${HOST_CLUSTER_NAME}" + +#step4. deploy karmada-operator +"${REPO_ROOT}"/hack/deploy-karmada-operator.sh "${MAIN_KUBECONFIG}" "${HOST_CLUSTER_NAME}" + +#step5. install karmada instance by karmada-operator +# prepare the local crds +echo "Prepare the local crds" +cd ${REPO_ROOT}/charts/karmada/ +cp -r _crds crds +tar -zcvf ../../crds.tar.gz crds +cd - + +# copy the local crds.tar.gz file to the specified path of the karmada-operator, so that the karmada-operator will skip the step of downloading CRDs. +CRDTARBALL_URL="local" +DATA_DIR="/var/lib/karmada" +CRD_CACHE_DIR=$(getCrdsDir "${DATA_DIR}" "${CRDTARBALL_URL}") +OPERATOR_POD_NAME=$(kubectl --kubeconfig="${MAIN_KUBECONFIG}" --context="${HOST_CLUSTER_NAME}" get pods -n ${KARMADA_SYSTEM_NAMESPACE} -l karmada-app=karmada-operator -o custom-columns=NAME:.metadata.name --no-headers) +kubectl --kubeconfig="${MAIN_KUBECONFIG}" --context="${HOST_CLUSTER_NAME}" exec -i ${OPERATOR_POD_NAME} -n ${KARMADA_SYSTEM_NAMESPACE} -- mkdir -p ${CRD_CACHE_DIR} +kubectl --kubeconfig="${MAIN_KUBECONFIG}" --context="${HOST_CLUSTER_NAME}" cp ${REPO_ROOT}/crds.tar.gz ${KARMADA_SYSTEM_NAMESPACE}/${OPERATOR_POD_NAME}:${CRD_CACHE_DIR} + +# install karmada instance +"${REPO_ROOT}"/hack/deploy-karmada-by-operator.sh "${MAIN_KUBECONFIG}" "${HOST_CLUSTER_NAME}" "${KARMADA_APISERVER_CLUSTER_NAME}" "${VERSION}" true "${CRDTARBALL_URL}" + +#step6. join member clusters +# wait until member clusters ready +util::check_clusters_ready "${MEMBER_CLUSTER_1_TMP_CONFIG}" "${MEMBER_CLUSTER_1_NAME}" +util::check_clusters_ready "${MEMBER_CLUSTER_2_TMP_CONFIG}" "${MEMBER_CLUSTER_2_NAME}" +# connecting networks between karmada-host, member1 and member2 clusters +echo "connecting cluster networks..." +util::add_routes "${MEMBER_CLUSTER_1_NAME}" "${MEMBER_CLUSTER_2_TMP_CONFIG}" "${MEMBER_CLUSTER_2_NAME}" +util::add_routes "${MEMBER_CLUSTER_2_NAME}" "${MEMBER_CLUSTER_1_TMP_CONFIG}" "${MEMBER_CLUSTER_1_NAME}" + +util::add_routes "${HOST_CLUSTER_NAME}" "${MEMBER_CLUSTER_1_TMP_CONFIG}" "${MEMBER_CLUSTER_1_NAME}" +util::add_routes "${MEMBER_CLUSTER_1_NAME}" "${MAIN_KUBECONFIG}" "${HOST_CLUSTER_NAME}" + +util::add_routes "${HOST_CLUSTER_NAME}" "${MEMBER_CLUSTER_2_TMP_CONFIG}" "${MEMBER_CLUSTER_2_NAME}" +util::add_routes "${MEMBER_CLUSTER_2_NAME}" "${MAIN_KUBECONFIG}" "${HOST_CLUSTER_NAME}" +echo "cluster networks connected" + +#join push mode member clusters +export KUBECONFIG="${MAIN_KUBECONFIG}" +${KARMADACTL_BIN} join --karmada-context="${KARMADA_APISERVER_CLUSTER_NAME}" ${MEMBER_CLUSTER_1_NAME} --cluster-kubeconfig="${MEMBER_CLUSTER_1_TMP_CONFIG}" --cluster-context="${MEMBER_CLUSTER_1_NAME}" +${KARMADACTL_BIN} join --karmada-context="${KARMADA_APISERVER_CLUSTER_NAME}" ${MEMBER_CLUSTER_2_NAME} --cluster-kubeconfig="${MEMBER_CLUSTER_2_TMP_CONFIG}" --cluster-context="${MEMBER_CLUSTER_2_NAME}" + +# wait until the pull mode cluster ready +util::check_clusters_ready "${PULL_MODE_CLUSTER_TMP_CONFIG}" "${PULL_MODE_CLUSTER_NAME}" +kind load docker-image "${REGISTRY}/karmada-agent:${VERSION}" --name="${PULL_MODE_CLUSTER_NAME}" + +#step7. deploy karmada agent in pull mode member clusters +"${REPO_ROOT}"/hack/deploy-karmada-agent.sh "${MAIN_KUBECONFIG}" "${KARMADA_APISERVER_CLUSTER_NAME}" "${PULL_MODE_CLUSTER_TMP_CONFIG}" "${PULL_MODE_CLUSTER_NAME}" + +#step8. deploy metrics-server in member clusters +"${REPO_ROOT}"/hack/deploy-k8s-metrics-server.sh "${MEMBER_CLUSTER_1_TMP_CONFIG}" "${MEMBER_CLUSTER_1_NAME}" +"${REPO_ROOT}"/hack/deploy-k8s-metrics-server.sh "${MEMBER_CLUSTER_2_TMP_CONFIG}" "${MEMBER_CLUSTER_2_NAME}" +"${REPO_ROOT}"/hack/deploy-k8s-metrics-server.sh "${PULL_MODE_CLUSTER_TMP_CONFIG}" "${PULL_MODE_CLUSTER_NAME}" + +# wait all of clusters member1, member2 and member3 status is ready +util:wait_cluster_ready "${KARMADA_APISERVER_CLUSTER_NAME}" "${MEMBER_CLUSTER_1_NAME}" +util:wait_cluster_ready "${KARMADA_APISERVER_CLUSTER_NAME}" "${MEMBER_CLUSTER_2_NAME}" +util:wait_cluster_ready "${KARMADA_APISERVER_CLUSTER_NAME}" "${PULL_MODE_CLUSTER_NAME}" + +#step9. merge temporary kubeconfig of member clusters by kubectl +export KUBECONFIG=$(find ${KUBECONFIG_PATH} -maxdepth 1 -type f | grep ${MEMBER_TMP_CONFIG_PREFIX} | tr '\n' ':') +kubectl config view --flatten > ${MEMBER_CLUSTER_KUBECONFIG} +rm $(find ${KUBECONFIG_PATH} -maxdepth 1 -type f | grep ${MEMBER_TMP_CONFIG_PREFIX}) + +function print_success() { + echo -e "$KARMADA_GREETING" + echo "Local Karmada is running." + echo -e "\nTo start using your karmada, run:" + echo -e " export KUBECONFIG=${MAIN_KUBECONFIG}" + echo "Please use 'kubectl config use-context ${HOST_CLUSTER_NAME}/${KARMADA_APISERVER_CLUSTER_NAME}' to switch the host and control plane cluster." + echo -e "\nTo manage your member clusters, run:" + echo -e " export KUBECONFIG=${MEMBER_CLUSTER_KUBECONFIG}" + echo "Please use 'kubectl config use-context ${MEMBER_CLUSTER_1_NAME}/${MEMBER_CLUSTER_2_NAME}/${PULL_MODE_CLUSTER_NAME}' to switch to the different member cluster." +} + +print_success diff --git a/hack/local-up-karmada.sh b/hack/local-up-karmada.sh index 17336e6e7c53..5b6521bd2709 100755 --- a/hack/local-up-karmada.sh +++ b/hack/local-up-karmada.sh @@ -61,6 +61,7 @@ MEMBER_CLUSTER_1_TMP_CONFIG="${KUBECONFIG_PATH}/${MEMBER_TMP_CONFIG_PREFIX}-${ME MEMBER_CLUSTER_2_TMP_CONFIG="${KUBECONFIG_PATH}/${MEMBER_TMP_CONFIG_PREFIX}-${MEMBER_CLUSTER_2_NAME}.config" PULL_MODE_CLUSTER_TMP_CONFIG="${KUBECONFIG_PATH}/${MEMBER_TMP_CONFIG_PREFIX}-${PULL_MODE_CLUSTER_NAME}.config" HOST_IPADDRESS=${1:-} +BUILD_FROM_SOURCE=${BUILD_FROM_SOURCE:-"true"} CLUSTER_VERSION=${CLUSTER_VERSION:-"${DEFAULT_CLUSTER_VERSION}"} KIND_LOG_FILE=${KIND_LOG_FILE:-"/tmp/karmada"} @@ -79,7 +80,7 @@ util::verify_go_version util::verify_docker # install kind and kubectl -kind_version=v0.22.0 +kind_version=v0.24.0 echo -n "Preparing: 'kind' existence check - " if util::cmd_exist kind; then echo "passed" @@ -133,11 +134,13 @@ util::create_cluster "${PULL_MODE_CLUSTER_NAME}" "${PULL_MODE_CLUSTER_TMP_CONFIG #step2. make images and get karmadactl export VERSION="latest" export REGISTRY="docker.io/karmada" -export KARMADA_IMAGE_LABEL_VALUE="May_be_pruned_in_local-up-karmada.sh" -export DOCKER_BUILD_ARGS="${DOCKER_BUILD_ARGS:-} --label=image.karmada.io=${KARMADA_IMAGE_LABEL_VALUE}" -make images GOOS="linux" --directory="${REPO_ROOT}" -#clean up dangling images -docker image prune --force --filter "label=image.karmada.io=${KARMADA_IMAGE_LABEL_VALUE}" +if [[ "${BUILD_FROM_SOURCE}" == "true" ]]; then + export KARMADA_IMAGE_LABEL_VALUE="May_be_pruned_in_local-up-karmada.sh" + export DOCKER_BUILD_ARGS="${DOCKER_BUILD_ARGS:-} --label=image.karmada.io=${KARMADA_IMAGE_LABEL_VALUE}" + make images GOOS="linux" --directory="${REPO_ROOT}" + #clean up dangling images + docker image prune --force --filter "label=image.karmada.io=${KARMADA_IMAGE_LABEL_VALUE}" +fi GO111MODULE=on go install "github.com/karmada-io/karmada/cmd/karmadactl" GOPATH=$(go env GOPATH | awk -F ':' '{print $1}') @@ -148,14 +151,16 @@ echo "Waiting for the host clusters to be ready..." util::check_clusters_ready "${MAIN_KUBECONFIG}" "${HOST_CLUSTER_NAME}" #step4. load components images to kind cluster -kind load docker-image "${REGISTRY}/karmada-controller-manager:${VERSION}" --name="${HOST_CLUSTER_NAME}" -kind load docker-image "${REGISTRY}/karmada-scheduler:${VERSION}" --name="${HOST_CLUSTER_NAME}" -kind load docker-image "${REGISTRY}/karmada-descheduler:${VERSION}" --name="${HOST_CLUSTER_NAME}" -kind load docker-image "${REGISTRY}/karmada-webhook:${VERSION}" --name="${HOST_CLUSTER_NAME}" -kind load docker-image "${REGISTRY}/karmada-scheduler-estimator:${VERSION}" --name="${HOST_CLUSTER_NAME}" -kind load docker-image "${REGISTRY}/karmada-aggregated-apiserver:${VERSION}" --name="${HOST_CLUSTER_NAME}" -kind load docker-image "${REGISTRY}/karmada-search:${VERSION}" --name="${HOST_CLUSTER_NAME}" -kind load docker-image "${REGISTRY}/karmada-metrics-adapter:${VERSION}" --name="${HOST_CLUSTER_NAME}" +if [[ "${BUILD_FROM_SOURCE}" == "true" ]]; then + kind load docker-image "${REGISTRY}/karmada-controller-manager:${VERSION}" --name="${HOST_CLUSTER_NAME}" + kind load docker-image "${REGISTRY}/karmada-scheduler:${VERSION}" --name="${HOST_CLUSTER_NAME}" + kind load docker-image "${REGISTRY}/karmada-descheduler:${VERSION}" --name="${HOST_CLUSTER_NAME}" + kind load docker-image "${REGISTRY}/karmada-webhook:${VERSION}" --name="${HOST_CLUSTER_NAME}" + kind load docker-image "${REGISTRY}/karmada-scheduler-estimator:${VERSION}" --name="${HOST_CLUSTER_NAME}" + kind load docker-image "${REGISTRY}/karmada-aggregated-apiserver:${VERSION}" --name="${HOST_CLUSTER_NAME}" + kind load docker-image "${REGISTRY}/karmada-search:${VERSION}" --name="${HOST_CLUSTER_NAME}" + kind load docker-image "${REGISTRY}/karmada-metrics-adapter:${VERSION}" --name="${HOST_CLUSTER_NAME}" +fi #step5. install karmada control plane components "${REPO_ROOT}"/hack/deploy-karmada.sh "${MAIN_KUBECONFIG}" "${HOST_CLUSTER_NAME}" @@ -185,7 +190,9 @@ ${KARMADACTL_BIN} join --karmada-context="${KARMADA_APISERVER_CLUSTER_NAME}" ${M # wait until the pull mode cluster ready util::check_clusters_ready "${PULL_MODE_CLUSTER_TMP_CONFIG}" "${PULL_MODE_CLUSTER_NAME}" -kind load docker-image "${REGISTRY}/karmada-agent:${VERSION}" --name="${PULL_MODE_CLUSTER_NAME}" +if [[ "${BUILD_FROM_SOURCE}" == "true" ]]; then + kind load docker-image "${REGISTRY}/karmada-agent:${VERSION}" --name="${PULL_MODE_CLUSTER_NAME}" +fi #step7. deploy karmada agent in pull mode member clusters "${REPO_ROOT}"/hack/deploy-agent-and-estimator.sh "${MAIN_KUBECONFIG}" "${HOST_CLUSTER_NAME}" "${MAIN_KUBECONFIG}" "${KARMADA_APISERVER_CLUSTER_NAME}" "${PULL_MODE_CLUSTER_TMP_CONFIG}" "${PULL_MODE_CLUSTER_NAME}" diff --git a/hack/scan-image-vuln.sh b/hack/scan-image-vuln.sh index a8f23174a029..c95cc435f0fc 100755 --- a/hack/scan-image-vuln.sh +++ b/hack/scan-image-vuln.sh @@ -78,7 +78,7 @@ if util::cmd_exist trivy ; then echo "pass" else echo "start installing trivy" - curl -sfL https://raw.githubusercontent.com/aquasecurity/trivy/main/contrib/install.sh | sh -s -- -b /usr/local/bin v0.48.1 + curl -sfL https://raw.githubusercontent.com/aquasecurity/trivy/main/contrib/install.sh | sh -s -- -b /usr/local/bin v0.56.1 fi if [ ${IMAGEREF} ];then diff --git a/hack/tools/genkarmadactldocs/gen_karmadactl_docs.go b/hack/tools/genkarmadactldocs/gen_karmadactl_docs.go index 5a4dbce77335..628acf6daeb1 100644 --- a/hack/tools/genkarmadactldocs/gen_karmadactl_docs.go +++ b/hack/tools/genkarmadactldocs/gen_karmadactl_docs.go @@ -71,7 +71,7 @@ func GenMarkdownTreeForIndex(cmd *cobra.Command, dir string) error { return err } - for _, tp := range []string{util.GroupBasic, util.GroupClusterRegistration, util.GroupClusterManagement, util.GroupClusterTroubleshootingAndDebugging, util.GroupAdvancedCommands} { + for _, tp := range []string{util.GroupBasic, util.GroupClusterRegistration, util.GroupClusterManagement, util.GroupClusterTroubleshootingAndDebugging, util.GroupAdvancedCommands, util.GroupSettingsCommands, util.GroupOtherCommands} { // write header of type _, err = io.WriteString(f, "## "+tp+"\n\n") if err != nil { diff --git a/hack/update-crdgen.sh b/hack/update-crdgen.sh index a29966d7f18c..9b79e077ad8b 100755 --- a/hack/update-crdgen.sh +++ b/hack/update-crdgen.sh @@ -19,7 +19,7 @@ set -o nounset set -o pipefail CONTROLLER_GEN_PKG="sigs.k8s.io/controller-tools/cmd/controller-gen" -CONTROLLER_GEN_VER="v0.14.0" +CONTROLLER_GEN_VER="v0.16.5" source hack/util.sh diff --git a/hack/util.sh b/hack/util.sh index 19aad8cefb33..efc1c0f1ec1d 100755 --- a/hack/util.sh +++ b/hack/util.sh @@ -37,9 +37,9 @@ KARMADA_METRICS_ADAPTER_LABEL="karmada-metrics-adapter" KARMADA_GO_PACKAGE="github.com/karmada-io/karmada" -MIN_Go_VERSION=go1.22.6 +MIN_Go_VERSION=go1.22.7 -DEFAULT_CLUSTER_VERSION="kindest/node:v1.27.3" +DEFAULT_CLUSTER_VERSION="kindest/node:v1.31.0" KARMADA_TARGET_SOURCE=( karmada-aggregated-apiserver=cmd/aggregated-apiserver @@ -243,16 +243,28 @@ function util::create_certkey { EOF } +# util::create_key_pair generates a new public and private key pair. +function util::create_key_pair { + local sudo=$1 + local dest_dir=$2 + local name=$3 + ${sudo} /usr/bin/env bash -e <-` where + `` is the name from the `PodSpec.Volumes` array + entry. Pod validation will reject the pod if the concatenated name + is not valid for a PVC (for example, too long). + + An existing PVC with that name that is not owned by the pod + will *not* be used for the pod to avoid using an unrelated + volume by mistake. Starting the pod is then blocked until + the unrelated PVC is removed. If such a pre-created PVC is + meant to be used by the pod, the PVC has to updated with an + owner reference to the pod once the pod exists. Normally + this should not be necessary, but it may be useful when + manually reconstructing a broken cluster. + + This field is read-only and no changes will be made by Kubernetes + to the PVC after it has been created. + + Required, must not be nil. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. No other fields are allowed and will be rejected during + validation. + properties: + annotations: + additionalProperties: + type: string + type: object + finalizers: + items: + type: string + type: array + labels: + additionalProperties: + type: string + type: object + name: + type: string + namespace: + type: string + type: object + spec: + description: |- + The specification for the PersistentVolumeClaim. The entire content is + copied unchanged into the PVC that gets created from this + template. The same fields as in a PersistentVolumeClaim + are also valid here. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) + * An existing PVC (PersistentVolumeClaim) + If the provisioner or an external controller can support the specified data source, + it will create a new volume based on the contents of the specified data source. + When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, + and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. + If the namespace is specified, then dataSourceRef will not be copied to dataSource. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. This may be any object from a non-empty API group (non + core object) or a PersistentVolumeClaim object. + When this field is specified, volume binding will only succeed if the type of + the specified object matches some installed volume populator or dynamic + provisioner. + This field will replace the functionality of the dataSource field and as such + if both fields are non-empty, they must have the same value. For backwards + compatibility, when namespace isn't specified in dataSourceRef, + both fields (dataSource and dataSourceRef) will be set to the same + value automatically if one of them is empty and the other is non-empty. + When namespace is specified in dataSourceRef, + dataSource isn't set to the same value and must be empty. + There are three important differences between dataSource and dataSourceRef: + * While dataSource only allows two specific types of objects, dataSourceRef + allows any non-core object, as well as PersistentVolumeClaim objects. + * While dataSource ignores disallowed values (dropping them), dataSourceRef + preserves all values, and generates an error if a disallowed value is + specified. + * While dataSource only allows local objects, dataSourceRef allows objects + in any namespaces. + (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. + (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + properties: + apiGroup: + description: |- + APIGroup is the group for the resource being referenced. + If APIGroup is not specified, the specified Kind must be in the core API group. + For any other third-party types, APIGroup is required. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. + (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. + type: string + required: + - kind + - name + type: object + resources: + description: |- + resources represents the minimum resources the volume should have. + If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements + that are lower than previous value but must still be higher than capacity recorded in the + status field of the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + selector: + description: selector is a label query over + volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: + description: |- + volumeAttributesClassName may be used to set the VolumeAttributesClass used by this claim. + If specified, the CSI driver will create or update the volume with the attributes defined + in the corresponding VolumeAttributesClass. This has a different purpose than storageClassName, + it can be changed after the claim is created. An empty string value means that no VolumeAttributesClass + will be applied to the claim but it's not allowed to reset this field to empty string once it is set. + If unspecified and the PersistentVolumeClaim is unbound, the default VolumeAttributesClass + will be set by the persistentvolume controller if it exists. + If the resource referred to by volumeAttributesClass does not exist, this PersistentVolumeClaim will be + set to a Pending state, as reflected by the modifyVolumeStatus field, until such as a resource + exists. + More info: https://kubernetes.io/docs/concepts/storage/volume-attributes-classes/ + (Alpha) Using this field requires the VolumeAttributesClass feature gate to be enabled. + type: string + volumeMode: + description: |- + volumeMode defines what type of volume is required by the claim. + Value of Filesystem is implied when not included in claim spec. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource + that is attached to a kubelet's host machine and then + exposed to the pod. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target + worldwide names (WWNs)' + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + description: |- + wwids Optional: FC volume world wide identifiers (wwids) + Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + description: |- + flexVolume represents a generic volume resource that is + provisioned/attached using an exec based plugin. + properties: + driver: + description: driver is the name of the driver to + use for this volume. + type: string + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds + extra command options if any.' + type: object + readOnly: + description: |- + readOnly is Optional: defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is Optional: secretRef is reference to the secret object containing + sensitive information to pass to the plugin scripts. This may be + empty if no secret object is specified. If the secret object + contains more than one secret, all secrets are passed to the plugin + scripts. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: flocker represents a Flocker volume attached + to a kubelet's host machine. This depends on the Flocker + control service being running + properties: + datasetName: + description: |- + datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker + should be considered as deprecated + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. + This is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: |- + gcePersistentDisk represents a GCE Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + properties: + fsType: + description: |- + fsType is filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + Examples: For volume /dev/sda1, you specify the partition as "1". + Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + format: int32 + type: integer + pdName: + description: |- + pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + type: boolean + required: + - pdName + type: object + gitRepo: + description: |- + gitRepo represents a git repository at a particular revision. + DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + into the Pod's container. + properties: + directory: + description: |- + directory is the target directory name. + Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + git repository. Otherwise, if specified, the volume will contain the git repository in + the subdirectory with the given name. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the + specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: |- + glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + More info: https://examples.k8s.io/volumes/glusterfs/README.md + properties: + endpoints: + description: |- + endpoints is the endpoint name that details Glusterfs topology. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + path: + description: |- + path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + readOnly: + description: |- + readOnly here will force the Glusterfs volume to be mounted with read-only permissions. + Defaults to false. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: |- + hostPath represents a pre-existing file or directory on the host + machine that is directly exposed to the container. This is generally + used for system agents or other privileged things that are allowed + to see the host machine. Most containers will NOT need this. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + properties: + path: + description: |- + path of the directory on the host. + If the path is a symlink, it will follow the link to the real path. + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + type: + description: |- + type for HostPath Volume + Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + required: + - path + type: object + iscsi: + description: |- + iscsi represents an ISCSI Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + More info: https://examples.k8s.io/volumes/iscsi/README.md + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support + iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support + iSCSI Session CHAP authentication + type: boolean + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + type: string + initiatorName: + description: |- + initiatorName is the custom iSCSI Initiator Name. + If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + : will be created for the connection. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + description: |- + iscsiInterface is the interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: |- + portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI + target and initiator authentication + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: |- + targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + is other than default (typically TCP ports 860 and 3260). + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + nfs: + description: |- + nfs represents an NFS mount on the host that shares a pod's lifetime + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + properties: + path: + description: |- + path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + readOnly: + description: |- + readOnly here will force the NFS export to be mounted with read-only permissions. + Defaults to false. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: boolean + server: + description: |- + server is the hostname or IP address of the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + properties: + claimName: + description: |- + claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: photonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host + machine + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + pdID: + description: pdID is the ID that identifies Photon + Controller persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: portworxVolume represents a portworx volume + attached and mounted on kubelets host machine + properties: + fsType: + description: |- + fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx + volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources + secrets, configmaps, and downward API + properties: + defaultMode: + description: |- + defaultMode are the mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + sources: + description: sources is the list of volume projections + items: + description: Projection that may be projected + along with other supported volume types + properties: + clusterTrustBundle: + description: |- + ClusterTrustBundle allows a pod to access the `.spec.trustBundle` field + of ClusterTrustBundle objects in an auto-updating file. + + Alpha, gated by the ClusterTrustBundleProjection feature gate. + + ClusterTrustBundle objects can either be selected by name, or by the + combination of signer name and a label selector. + + Kubelet performs aggressive normalization of the PEM contents written + into the pod filesystem. Esoteric PEM features such as inter-block + comments and block headers are stripped. Certificates are deduplicated. + The ordering of certificates within the file is arbitrary, and Kubelet + may change the order over time. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. Mutually-exclusive with name. If unset, + interpreted as "match nothing". If set but empty, interpreted as "match + everything". + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: |- + matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + map is equivalent to an element of matchExpressions, whose key field is "key", the + operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. If using name, then the named ClusterTrustBundle is + allowed not to exist. If using signerName, then the combination of + signerName and labelSelector is allowed to match zero + ClusterTrustBundles. + type: boolean + path: + description: Relative path from the volume + root to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. The contents of all selected + ClusterTrustBundles will be unified and deduplicated. + type: string + required: + - path + type: object + configMap: + description: configMap information about the + configMap data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the ConfigMap, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional specify whether + the ConfigMap or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about + the downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects + a field of the pod: only annotations, + labels, name, namespace and uid + are supported.' + properties: + apiVersion: + description: Version of the + schema the FieldPath is written + in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field + to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: 'Required: Path is the + relative path name of the file + to be created. Must not be absolute + or contain the ''..'' path. Must + be utf-8 encoded. The first item + of the relative path must not + start with ''..''' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + properties: + containerName: + description: 'Container name: + required for volumes, optional + for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the + secret data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + optional: + description: optional field specify whether + the Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information + about the serviceAccountToken data to project + properties: + audience: + description: |- + audience is the intended audience of the token. A recipient of a token + must identify itself with an identifier specified in the audience of the + token, and otherwise should reject the token. The audience defaults to the + identifier of the apiserver. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. As the token approaches expiration, the kubelet volume + plugin will proactively rotate the service account token. The kubelet will + start trying to rotate the token if the token is older than 80 percent of + its time to live or if the token is older than 24 hours.Defaults to 1 hour + and must be at least 10 minutes. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + description: quobyte represents a Quobyte mount on the + host that shares a pod's lifetime + properties: + group: + description: |- + group to map volume access to + Default is no group + type: string + readOnly: + description: |- + readOnly here will force the Quobyte volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: |- + registry represents a single or multiple Quobyte Registry services + specified as a string as host:port pair (multiple entries are separated with commas) + which acts as the central registry for volumes + type: string + tenant: + description: |- + tenant owning the given Quobyte volume in the Backend + Used with dynamically provisioned Quobyte volumes, value is set by the plugin + type: string + user: + description: |- + user to map volume access to + Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references + an already created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: |- + rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + More info: https://examples.k8s.io/volumes/rbd/README.md + properties: + fsType: + description: |- + fsType is the filesystem type of the volume that you want to mount. + Tip: Ensure that the filesystem type is supported by the host operating system. + Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + type: string + image: + description: |- + image is the rados image name. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + keyring: + description: |- + keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + monitors: + description: |- + monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + description: |- + pool is the rados pool name. + Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: boolean + secretRef: + description: |- + secretRef is name of the authentication secret for RBDUser. If provided + overrides keyring. + Default is nil. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is the rados user name. + Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + required: + - image + - monitors + type: object + scaleIO: + description: scaleIO represents a ScaleIO persistent + volume attached and mounted on Kubernetes nodes. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". + Default is "xfs". + type: string + gateway: + description: gateway is the host address of the + ScaleIO API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the + ScaleIO Protection Domain for the configured storage. + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef references to the secret for ScaleIO user and other + sensitive information. If this is not provided, Login operation will fail. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable SSL + communication with Gateway, default false + type: boolean + storageMode: + description: |- + storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + Default is ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage + Pool associated with the protection domain. + type: string + system: + description: system is the name of the storage system + as configured in ScaleIO. + type: string + volumeName: + description: |- + volumeName is the name of a volume already created in the ScaleIO system + that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + properties: + defaultMode: + description: |- + defaultMode is Optional: mode bits used to set permissions on created files by default. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values + for mode bits. Defaults to 0644. + Directories within the path are not affected by this setting. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume as a file whose name is the + key and content is the value. If specified, the listed keys will be + projected into the specified paths, and unlisted keys will not be + present. If a key is specified which is not present in the Secret, + the volume setup will error unless it is marked optional. Paths must be + relative and may not contain the '..' path or start with '..'. + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: |- + mode is Optional: mode bits used to set permissions on this file. + Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. + YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. + If not specified, the volume defaultMode will be used. + This might be in conflict with other options that affect the file + mode, like fsGroup, and the result can be other mode bits set. + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + May not contain the path element '..'. + May not start with the string '..'. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + description: optional field specify whether the + Secret or its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + type: string + type: object + storageos: + description: storageOS represents a StorageOS volume + attached and mounted on Kubernetes nodes. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef specifies the secret to use for obtaining the StorageOS API + credentials. If not specified, default values will be attempted. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: |- + volumeName is the human-readable name of the StorageOS volume. Volume + names are only unique within a namespace. + type: string + volumeNamespace: + description: |- + volumeNamespace specifies the scope of the volume within StorageOS. If no + namespace is specified then the Pod's namespace will be used. This allows the + Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + Set VolumeName to any name to override the default behaviour. + Set to "default" if you are not using namespaces within StorageOS. + Namespaces that do not pre-exist within StorageOS will be created. + type: string + type: object + vsphereVolume: + description: vsphereVolume represents a vSphere volume + attached and mounted on kubelets host machine + properties: + fsType: + description: |- + fsType is filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy + Based Management (SPBM) profile ID associated + with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy + Based Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies + vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array featureGates: additionalProperties: type: boolean @@ -592,11 +2429,9 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. - This field is immutable. It can only be set for containers. items: description: ResourceClaim references one entry in PodSpec.ResourceClaims. @@ -651,9 +2486,15 @@ spec: Defaults to "10.96.0.0/12". type: string serviceType: - description: |- - ServiceType represents the service type of karmada apiserver. - it is ClusterIP by default. + default: ClusterIP + description: |- + ServiceType represents the service type of Karmada API server. + Valid options are: "ClusterIP", "NodePort", "LoadBalancer". + Defaults to "ClusterIP". + enum: + - ClusterIP + - NodePort + - LoadBalancer type: string type: object karmadaAggregatedAPIServer: @@ -683,14 +2524,12 @@ spec: override. A key in this map is the flag name as it appears on the command line except without leading dash(es). - Note: This is a temporary solution to allow for the configuration of the karmada-aggregated-apiserver component. In the future, we will provide a more structured way to configure the component. Once that is done, this field will be discouraged to be used. Incorrect settings on this field maybe lead to the corresponding component in an unhealthy state. Before you do it, please confirm that you understand the risks of this configuration. - For supported flags, please see https://karmada.io/docs/reference/components/karmada-aggregated-apiserver for details. @@ -744,11 +2583,9 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. - This field is immutable. It can only be set for containers. items: description: ResourceClaim references one entry in PodSpec.ResourceClaims. @@ -811,7 +2648,6 @@ spec: 'foo' enables the controller named 'foo', '-foo' disables the controller named 'foo'. - All controllers: binding, cluster, clusterStatus, endpointSlice, execution, federatedResourceQuotaStatus, federatedResourceQuotaSync, hpa, namespace, serviceExport, serviceImport, unifiedAuth, workStatus. @@ -830,14 +2666,12 @@ spec: override. A key in this map is the flag name as it appears on the command line except without leading dash(es). - Note: This is a temporary solution to allow for the configuration of the karmada-controller-manager component. In the future, we will provide a more structured way to configure the component. Once that is done, this field will be discouraged to be used. Incorrect settings on this field maybe lead to the corresponding component in an unhealthy state. Before you do it, please confirm that you understand the risks of this configuration. - For supported flags, please see https://karmada.io/docs/reference/components/karmada-controller-manager for details. @@ -894,11 +2728,9 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. - This field is immutable. It can only be set for containers. items: description: ResourceClaim references one entry in PodSpec.ResourceClaims. @@ -963,14 +2795,12 @@ spec: A key in this map is the flag name as it appears on the command line except without leading dash(es). - Note: This is a temporary solution to allow for the configuration of the karmada-descheduler component. In the future, we will provide a more structured way to configure the component. Once that is done, this field will be discouraged to be used. Incorrect settings on this field maybe lead to the corresponding component in an unhealthy state. Before you do it, please confirm that you understand the risks of this configuration. - For supported flags, please see https://karmada.io/docs/reference/components/karmada-descheduler for details. @@ -1016,11 +2846,9 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. - This field is immutable. It can only be set for containers. items: description: ResourceClaim references one entry in PodSpec.ResourceClaims. @@ -1085,14 +2913,12 @@ spec: A key in this map is the flag name as it appears on the command line except without leading dash(es). - Note: This is a temporary solution to allow for the configuration of the karmada-metrics-adapter component. In the future, we will provide a more structured way to configure the component. Once that is done, this field will be discouraged to be used. Incorrect settings on this field maybe lead to the corresponding component in an unhealthy state. Before you do it, please confirm that you understand the risks of this configuration. - For supported flags, please see https://karmada.io/docs/reference/components/karmada-metrics-adapter for details. @@ -1138,11 +2964,9 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. - This field is immutable. It can only be set for containers. items: description: ResourceClaim references one entry in PodSpec.ResourceClaims. @@ -1207,14 +3031,12 @@ spec: A key in this map is the flag name as it appears on the command line except without leading dash(es). - Note: This is a temporary solution to allow for the configuration of the karmada-scheduler component. In the future, we will provide a more structured way to configure the component. Once that is done, this field will be discouraged to be used. Incorrect settings on this field maybe lead to the corresponding component in an unhealthy state. Before you do it, please confirm that you understand the risks of this configuration. - For supported flags, please see https://karmada.io/docs/reference/components/karmada-scheduler for details. @@ -1268,11 +3090,9 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. - This field is immutable. It can only be set for containers. items: description: ResourceClaim references one entry in PodSpec.ResourceClaims. @@ -1337,14 +3157,12 @@ spec: A key in this map is the flag name as it appears on the command line except without leading dash(es). - Note: This is a temporary solution to allow for the configuration of the karmada-search component. In the future, we will provide a more structured way to configure the component. Once that is done, this field will be discouraged to be used. Incorrect settings on this field maybe lead to the corresponding component in an unhealthy state. Before you do it, please confirm that you understand the risks of this configuration. - For supported flags, please see https://karmada.io/docs/reference/components/karmada-search for details. @@ -1390,11 +3208,9 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. - This field is immutable. It can only be set for containers. items: description: ResourceClaim references one entry in PodSpec.ResourceClaims. @@ -1459,14 +3275,12 @@ spec: override. A key in this map is the flag name as it appears on the command line except without leading dash(es). - Note: This is a temporary solution to allow for the configuration of the karmada-webhook component. In the future, we will provide a more structured way to configure the component. Once that is done, this field will be discouraged to be used. Incorrect settings on this field maybe lead to the corresponding component in an unhealthy state. Before you do it, please confirm that you understand the risks of this configuration. - For supported flags, please see https://karmada.io/docs/reference/components/karmada-webhook for details. @@ -1512,11 +3326,9 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. - This field is immutable. It can only be set for containers. items: description: ResourceClaim references one entry in PodSpec.ResourceClaims. @@ -1579,7 +3391,6 @@ spec: 'foo' enables the controller named 'foo', '-foo' disables the controller named 'foo'. - All controllers: attachdetach, bootstrapsigner, cloud-node-lifecycle, clusterrole-aggregation, cronjob, csrapproving, csrcleaner, csrsigning, daemonset, deployment, disruption, endpoint, endpointslice, @@ -1594,7 +3405,6 @@ spec: https://kubernetes.io/docs/reference/command-line-tools-reference/kube-controller-manager/ for details. - However, Karmada uses Kubernetes Native API definitions for federated resource template, so it doesn't need enable some resource related controllers like daemonset, deployment etc. On the other hand, Karmada leverages the capabilities of the Kubernetes controller to @@ -1603,13 +3413,11 @@ spec: and the `garbagecollector` controller handles automatic clean-up of redundant items in your karmada. - According to the user feedback and karmada requirements, the following controllers are enabled by default: namespace, garbagecollector, serviceaccount-token, ttl-after-finished, bootstrapsigner,csrapproving,csrcleaner,csrsigning. See https://karmada.io/docs/administrator/configuration/configure-controllers#kubernetes-controllers - Others are disabled by default. If you want to enable or disable other controllers, you have to explicitly specify all the controllers that kube-controller-manager should enable at startup phase. @@ -1624,14 +3432,12 @@ spec: override. A key in this map is the flag name as it appears on the command line except without leading dash(es). - Note: This is a temporary solution to allow for the configuration of the kube-controller-manager component. In the future, we will provide a more structured way to configure the component. Once that is done, this field will be discouraged to be used. Incorrect settings on this field maybe lead to the corresponding component in an unhealthy state. Before you do it, please confirm that you understand the risks of this configuration. - For supported flags, please see https://kubernetes.io/docs/reference/command-line-tools-reference/kube-controller-manager/ for details. @@ -1684,11 +3490,9 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. - This field is immutable. It can only be set for containers. items: description: ResourceClaim references one entry in PodSpec.ResourceClaims. @@ -1732,6 +3536,8 @@ spec: type: object type: object type: object + required: + - etcd type: object crdTarball: description: |- @@ -1830,20 +3636,25 @@ spec: status: description: Most recently observed status of the Karmada. properties: + apiServerService: + description: |- + APIServerService reports the location of the Karmada API server service which + can be used by third-party applications to discover the Karmada Service, e.g. + expose the service outside the cluster by Ingress. + properties: + name: + description: Name represents the name of the Karmada API Server + service. + type: string + required: + - name + type: object conditions: description: Conditions represents the latest available observations of a karmada's current state. items: - description: "Condition contains details for one aspect of the current - state of this API Resource.\n---\nThis struct is intended for - direct use as an array at the field path .status.conditions. For - example,\n\n\n\ttype FooStatus struct{\n\t // Represents the - observations of a foo's current state.\n\t // Known .status.conditions.type - are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // - +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t - \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t - \ // other fields\n\t}" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: description: |- @@ -1884,12 +3695,7 @@ spec: - Unknown type: string type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - --- - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be - useful (see .node.status.conditions), the ability to deconflict is important. - The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + description: type of condition in CamelCase or in foo.example.com/CamelCase. maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string diff --git a/operator/config/deploy/karmada-operator-clusterrole.yaml b/operator/config/deploy/karmada-operator-clusterrole.yaml new file mode 100644 index 000000000000..ce590e364673 --- /dev/null +++ b/operator/config/deploy/karmada-operator-clusterrole.yaml @@ -0,0 +1,33 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: karmada-operator + labels: + karmada-app: karmada-operator +rules: + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] # karmada-operator requires access to the Lease resource for leader election + verbs: ["get", "create", "update"] + - apiGroups: ["operator.karmada.io"] + resources: ["karmadas"] # to manage karmada instances + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["operator.karmada.io"] + resources: ["karmadas/status"] # to update the status subresource of karmada instances + verbs: ["update"] + - apiGroups: [""] + resources: ["events"] # allows karmada-operator to record events in the kubernetes api-server + verbs: ["create"] + - apiGroups: [""] + resources: ["nodes", "pods"] # list cluster nodes and pods to get node information and for health checks + verbs: ["list"] + - apiGroups: [""] + resources: ["namespaces"] # to get information about namespaces, and deploy resources into specific namespaces + verbs: ["get"] + - apiGroups: [""] + resources: ["secrets", "services"] # to manage secrets which might contain sensitive data like credentials and services to expose applications within the cluster + verbs: ["get", "create", "update", "delete"] + - apiGroups: ["apps"] + resources: ["statefulsets", "deployments"] # to manage statefulsets, e.g. etcd, and deployments, e.g. karmada-operator + verbs: ["get", "create", "update", "delete"] + - nonResourceURLs: ["/healthz"] # used to check whether the karmada apiserver is health + verbs: ["get"] diff --git a/operator/config/deploy/karmada-operator-clusterrolebinding.yaml b/operator/config/deploy/karmada-operator-clusterrolebinding.yaml new file mode 100644 index 000000000000..986a6295b676 --- /dev/null +++ b/operator/config/deploy/karmada-operator-clusterrolebinding.yaml @@ -0,0 +1,14 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: karmada-operator + labels: + karmada-app: karmada-operator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: karmada-operator +subjects: + - kind: ServiceAccount + name: karmada-operator + namespace: karmada-system diff --git a/operator/config/deploy/karmada-operator.yaml b/operator/config/deploy/karmada-operator-deployment.yaml similarity index 69% rename from operator/config/deploy/karmada-operator.yaml rename to operator/config/deploy/karmada-operator-deployment.yaml index 2f4a5c8f09f8..b446babf7b4b 100644 --- a/operator/config/deploy/karmada-operator.yaml +++ b/operator/config/deploy/karmada-operator-deployment.yaml @@ -21,13 +21,10 @@ spec: imagePullPolicy: IfNotPresent command: - /bin/karmada-operator - - --kubeconfig=/etc/config + - --leader-elect-resource-namespace=karmada-system - --v=4 - volumeMounts: - - name: kubeconfig - mountPath: /etc/config - subPath: config - volumes: - - name: kubeconfig - secret: - secretName: my-kubeconfig + ports: + - containerPort: 8080 + name: metrics + protocol: TCP + serviceAccountName: karmada-operator diff --git a/operator/config/deploy/karmada-operator-serviceaccount.yaml b/operator/config/deploy/karmada-operator-serviceaccount.yaml new file mode 100644 index 000000000000..7b7caa38bccb --- /dev/null +++ b/operator/config/deploy/karmada-operator-serviceaccount.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: karmada-operator + namespace: karmada-system + labels: + karmada-app: karmada-operator diff --git a/operator/config/samples/karmada-sample.yaml b/operator/config/samples/karmada-sample.yaml new file mode 100644 index 000000000000..ce720307460c --- /dev/null +++ b/operator/config/samples/karmada-sample.yaml @@ -0,0 +1,60 @@ +# used by hack/deploy-karmada-by-operator.sh to deploy a karmada instance +apiVersion: operator.karmada.io/v1alpha1 +kind: Karmada +metadata: + name: {{karmada_instance_name}} + namespace: {{karmada_instance_namespace}} +spec: + hostCluster: + networking: + dnsDomain: cluster.local + crdTarball: {{crd_tarball}} + components: + etcd: + local: + imageRepository: registry.k8s.io/etcd + imageTag: 3.5.13-0 + replicas: 1 + volumeData: + # hostPath: + # type: DirectoryOrCreate + # path: /var/lib/karmada/etcd/karmada-demo + volumeClaim: + metadata: + name: etcd-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 3Gi + karmadaAPIServer: + imageRepository: registry.k8s.io/kube-apiserver + imageTag: v1.30.4 + replicas: 1 + serviceType: NodePort + serviceSubnet: 10.96.0.0/12 + karmadaAggregatedAPIServer: + imageRepository: docker.io/karmada/karmada-aggregated-apiserver + imageTag: {{image_tag}} + replicas: 1 + karmadaControllerManager: + imageRepository: docker.io/karmada/karmada-controller-manager + imageTag: {{image_tag}} + replicas: 1 + karmadaScheduler: + imageRepository: docker.io/karmada/karmada-scheduler + imageTag: {{image_tag}} + replicas: 1 + karmadaWebhook: + imageRepository: docker.io/karmada/karmada-webhook + imageTag: {{image_tag}} + replicas: 1 + kubeControllerManager: + imageRepository: registry.k8s.io/kube-controller-manager + imageTag: v1.30.4 + replicas: 1 + karmadaMetricsAdapter: + imageRepository: docker.io/karmada/karmada-metrics-adapter + imageTag: {{image_tag}} + replicas: 2 diff --git a/operator/config/samples/karmada.yaml b/operator/config/samples/karmada.yaml index adb1d4db3ad4..94f81d19ab74 100644 --- a/operator/config/samples/karmada.yaml +++ b/operator/config/samples/karmada.yaml @@ -25,41 +25,41 @@ spec: storage: 3Gi karmadaAPIServer: imageRepository: registry.k8s.io/kube-apiserver - imageTag: v1.29.6 + imageTag: v1.30.4 replicas: 1 serviceType: NodePort serviceSubnet: 10.96.0.0/12 karmadaAggregatedAPIServer: imageRepository: docker.io/karmada/karmada-aggregated-apiserver - imageTag: v1.8.0 + imageTag: v1.11.1 replicas: 1 karmadaControllerManager: imageRepository: docker.io/karmada/karmada-controller-manager - imageTag: v1.8.0 + imageTag: v1.11.1 replicas: 1 karmadaScheduler: imageRepository: docker.io/karmada/karmada-scheduler - imageTag: v1.8.0 + imageTag: v1.11.1 replicas: 1 karmadaWebhook: imageRepository: docker.io/karmada/karmada-webhook - imageTag: v1.8.0 + imageTag: v1.11.1 replicas: 1 kubeControllerManager: imageRepository: registry.k8s.io/kube-controller-manager - imageTag: v1.29.6 + imageTag: v1.30.4 replicas: 1 karmadaMetricsAdapter: imageRepository: docker.io/karmada/karmada-metrics-adapter - imageTag: v1.8.0 + imageTag: v1.11.1 replicas: 2 # karmadaSearch: # the component `Karmadasearch` is not installed by default, if you need to install it, uncomment it and note the formatting # imageRepository: docker.io/karmada/karmada-search - # imageTag: v1.8.0 + # imageTag: v1.11.1 # replicas: 1 # karmadaDescheduler: # the component `KarmadaDescheduler` is not installed by default, if you need to install it, uncomment it and note the formatting # imageRepository: docker.io/karmada/karmada-descheduler - # imageTag: v1.8.0 + # imageTag: v1.11.1 # replicas: 1 hostCluster: networking: diff --git a/operator/pkg/apis/operator/v1alpha1/type.go b/operator/pkg/apis/operator/v1alpha1/type.go index dd2ebb518333..bbf0a8776310 100644 --- a/operator/pkg/apis/operator/v1alpha1/type.go +++ b/operator/pkg/apis/operator/v1alpha1/type.go @@ -239,19 +239,32 @@ type VolumeData struct { // operator has no knowledge of where certificate files live, and they must be supplied. type ExternalEtcd struct { // Endpoints of etcd members. Required for ExternalEtcd. + // +required Endpoints []string `json:"endpoints"` // CAData is an SSL Certificate Authority file used to secure etcd communication. // Required if using a TLS connection. - CAData []byte `json:"caData"` + // Deprecated: This field is deprecated and will be removed in a future version. Use SecretRef for providing client connection credentials. + CAData []byte `json:"caData,omitempty"` // CertData is an SSL certification file used to secure etcd communication. // Required if using a TLS connection. - CertData []byte `json:"certData"` + // Deprecated: This field is deprecated and will be removed in a future version. Use SecretRef for providing client connection credentials. + CertData []byte `json:"certData,omitempty"` // KeyData is an SSL key file used to secure etcd communication. // Required if using a TLS connection. - KeyData []byte `json:"keyData"` + // Deprecated: This field is deprecated and will be removed in a future version. Use SecretRef for providing client connection credentials. + KeyData []byte `json:"keyData,omitempty"` + + // SecretRef references a Kubernetes secret containing the etcd connection credentials. + // The secret must contain the following data keys: + // ca.crt: The Certificate Authority (CA) certificate data. + // tls.crt: The TLS certificate data used for verifying the etcd server's certificate. + // tls.key: The TLS private key. + // Required to configure the connection to an external etcd cluster. + // +required + SecretRef LocalSecretReference `json:"secretRef"` } // KarmadaAPIServer holds settings to kube-apiserver component of the kubernetes. @@ -264,8 +277,12 @@ type KarmadaAPIServer struct { // +optional ServiceSubnet *string `json:"serviceSubnet,omitempty"` - // ServiceType represents the service type of karmada apiserver. - // it is ClusterIP by default. + // ServiceType represents the service type of Karmada API server. + // Valid options are: "ClusterIP", "NodePort", "LoadBalancer". + // Defaults to "ClusterIP". + // + // +kubebuilder:default="ClusterIP" + // +kubebuilder:validation:Enum=ClusterIP;NodePort;LoadBalancer // +optional ServiceType corev1.ServiceType `json:"serviceType,omitempty"` @@ -290,6 +307,24 @@ type KarmadaAPIServer struct { // +optional ExtraArgs map[string]string `json:"extraArgs,omitempty"` + // ExtraVolumes specifies a list of extra volumes for the API server's pod + // To fulfil the base functionality required for a functioning control plane, when provisioning a new Karmada instance, + // the operator will automatically attach volumes for the API server pod needed to configure things such as TLS, + // SA token issuance/signing and secured connection to etcd, amongst others. However, given the wealth of options for configurability, + // there are additional features (e.g., encryption at rest and custom AuthN webhook) that can be configured. ExtraVolumes, in conjunction + // with ExtraArgs and ExtraVolumeMounts can be used to fulfil those use cases. + // +optional + ExtraVolumes []corev1.Volume `json:"extraVolumes,omitempty"` + + // ExtraVolumeMounts specifies a list of extra volume mounts to be mounted into the API server's container + // To fulfil the base functionality required for a functioning control plane, when provisioning a new Karmada instance, + // the operator will automatically mount volumes into the API server container needed to configure things such as TLS, + // SA token issuance/signing and secured connection to etcd, amongst others. However, given the wealth of options for configurability, + // there are additional features (e.g., encryption at rest and custom AuthN webhook) that can be configured. ExtraVolumeMounts, in conjunction + // with ExtraArgs and ExtraVolumes can be used to fulfil those use cases. + // +optional + ExtraVolumeMounts []corev1.VolumeMount `json:"extraVolumeMounts,omitempty"` + // CertSANs sets extra Subject Alternative Names for the API Server signing cert. // +optional CertSANs []string `json:"certSANs,omitempty"` @@ -659,6 +694,21 @@ type KarmadaStatus struct { // Conditions represents the latest available observations of a karmada's current state. // +optional Conditions []metav1.Condition `json:"conditions,omitempty"` + + // APIServerService reports the location of the Karmada API server service which + // can be used by third-party applications to discover the Karmada Service, e.g. + // expose the service outside the cluster by Ingress. + // +optional + APIServerService *APIServerService `json:"apiServerService,omitempty"` +} + +// APIServerService tells the location of Karmada API server service. +// Currently, it only includes the name of the service. The namespace +// of the service is the same as the namespace of the current Karmada object. +type APIServerService struct { + // Name represents the name of the Karmada API Server service. + // +required + Name string `json:"name"` } // LocalSecretReference is a reference to a secret within the enclosing diff --git a/operator/pkg/apis/operator/v1alpha1/zz_generated.deepcopy.go b/operator/pkg/apis/operator/v1alpha1/zz_generated.deepcopy.go index 465f0cf1a622..c57aca85c33c 100644 --- a/operator/pkg/apis/operator/v1alpha1/zz_generated.deepcopy.go +++ b/operator/pkg/apis/operator/v1alpha1/zz_generated.deepcopy.go @@ -22,11 +22,27 @@ limitations under the License. package v1alpha1 import ( - corev1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServerService) DeepCopyInto(out *APIServerService) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServerService. +func (in *APIServerService) DeepCopy() *APIServerService { + if in == nil { + return nil + } + out := new(APIServerService) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CRDTarball) DeepCopyInto(out *CRDTarball) { *out = *in @@ -139,6 +155,7 @@ func (in *ExternalEtcd) DeepCopyInto(out *ExternalEtcd) { *out = make([]byte, len(*in)) copy(*out, *in) } + out.SecretRef = in.SecretRef return } @@ -277,6 +294,20 @@ func (in *KarmadaAPIServer) DeepCopyInto(out *KarmadaAPIServer) { (*out)[key] = val } } + if in.ExtraVolumes != nil { + in, out := &in.ExtraVolumes, &out.ExtraVolumes + *out = make([]v1.Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ExtraVolumeMounts != nil { + in, out := &in.ExtraVolumeMounts, &out.ExtraVolumeMounts + *out = make([]v1.VolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } if in.CertSANs != nil { in, out := &in.CertSANs, &out.CertSANs *out = make([]string, len(*in)) @@ -629,11 +660,16 @@ func (in *KarmadaStatus) DeepCopyInto(out *KarmadaStatus) { } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make([]v1.Condition, len(*in)) + *out = make([]metav1.Condition, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.APIServerService != nil { + in, out := &in.APIServerService, &out.APIServerService + *out = new(APIServerService) + **out = **in + } return } @@ -781,17 +817,17 @@ func (in *VolumeData) DeepCopyInto(out *VolumeData) { *out = *in if in.VolumeClaim != nil { in, out := &in.VolumeClaim, &out.VolumeClaim - *out = new(corev1.PersistentVolumeClaimTemplate) + *out = new(v1.PersistentVolumeClaimTemplate) (*in).DeepCopyInto(*out) } if in.HostPath != nil { in, out := &in.HostPath, &out.HostPath - *out = new(corev1.HostPathVolumeSource) + *out = new(v1.HostPathVolumeSource) (*in).DeepCopyInto(*out) } if in.EmptyDir != nil { in, out := &in.EmptyDir, &out.EmptyDir - *out = new(corev1.EmptyDirVolumeSource) + *out = new(v1.EmptyDirVolumeSource) (*in).DeepCopyInto(*out) } return diff --git a/operator/pkg/certs/certs.go b/operator/pkg/certs/certs.go index d58c4db3415a..30dae79c5a39 100644 --- a/operator/pkg/certs/certs.go +++ b/operator/pkg/certs/certs.go @@ -25,7 +25,6 @@ import ( "crypto/x509" "crypto/x509/pkix" "encoding/pem" - "errors" "fmt" "math" "math/big" @@ -247,14 +246,20 @@ func (cert *KarmadaCert) KeyName() string { return pair + keyExtension } -// GeneratePrivateKey generates cert key with default size if 1024. it supports -// ECDSA and RAS algorithm. +// GeneratePrivateKey generates a certificate key. It supports both +// ECDSA (using the P-256 elliptic curve) and RSA algorithms. For RSA, +// the key is generated with a size of 3072 bits. If the keyType is +// x509.UnknownPublicKeyAlgorithm, the function defaults to generating +// an RSA key. func GeneratePrivateKey(keyType x509.PublicKeyAlgorithm) (crypto.Signer, error) { - if keyType == x509.ECDSA { + switch keyType { + case x509.ECDSA: return ecdsa.GenerateKey(elliptic.P256(), cryptorand.Reader) + case x509.RSA, x509.UnknownPublicKeyAlgorithm: + return rsa.GenerateKey(cryptorand.Reader, rsaKeySize) + default: + return nil, fmt.Errorf("unsupported key type: %T, supported key types are RSA and ECDSA", keyType) } - - return rsa.GenerateKey(cryptorand.Reader, rsaKeySize) } // NewCertificateAuthority creates new certificate and private key for the certificate authority @@ -428,7 +433,7 @@ func ParsePrivateKeyPEM(keyData []byte) (crypto.Signer, error) { case *ecdsa.PrivateKey: key = k default: - return nil, errors.New("the private key is neither in RSA nor ECDSA format") + return nil, fmt.Errorf("the private key is in an unsupported format: %s, supported formats are RSA and ECDSA", caPrivateKey) } return key, nil @@ -455,7 +460,7 @@ func etcdServerAltNamesMutator(cfg *AltNamesMutatorConfig) (*certutil.AltNames, IPs: []net.IP{net.IPv4(127, 0, 0, 1)}, } - if cfg.Components.Etcd.Local != nil { + if cfg.Components != nil && cfg.Components.Etcd != nil && cfg.Components.Etcd.Local != nil { appendSANsToAltNames(altNames, cfg.Components.Etcd.Local.ServerCertSANs) } @@ -488,7 +493,7 @@ func apiServerAltNamesMutator(cfg *AltNamesMutatorConfig) (*certutil.AltNames, e fmt.Sprintf("*.%s.svc", cfg.Namespace)}) } - if len(cfg.Components.KarmadaAPIServer.CertSANs) > 0 { + if cfg.Components != nil && cfg.Components.KarmadaAPIServer != nil && len(cfg.Components.KarmadaAPIServer.CertSANs) > 0 { appendSANsToAltNames(altNames, cfg.Components.KarmadaAPIServer.CertSANs) } if len(cfg.ControlplaneAddress) > 0 { diff --git a/operator/pkg/certs/certs_test.go b/operator/pkg/certs/certs_test.go new file mode 100644 index 000000000000..c0eb1dd5f011 --- /dev/null +++ b/operator/pkg/certs/certs_test.go @@ -0,0 +1,807 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package certs + +import ( + "bytes" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rsa" + "crypto/x509" + "encoding/base64" + "encoding/pem" + "fmt" + "net" + "reflect" + "testing" + "time" + + certutil "k8s.io/client-go/util/cert" + "k8s.io/client-go/util/keyutil" + + "github.com/karmada-io/karmada/operator/pkg/apis/operator/v1alpha1" + "github.com/karmada-io/karmada/operator/pkg/constants" + "github.com/karmada-io/karmada/operator/pkg/util" +) + +var ( + expectedAPIServerAltDNSNames = []string{ + "localhost", + "kubernetes", + "kubernetes.default", + "kubernetes.default.svc", + fmt.Sprintf("*.%s.svc.cluster.local", constants.KarmadaSystemNamespace), + fmt.Sprintf("*.%s.svc", constants.KarmadaSystemNamespace), + } + expectedAPIServerAltIPs = []net.IP{net.IPv4(127, 0, 0, 1)} +) + +func TestCertConfig_defaultPublicKeyAlgorithm(t *testing.T) { + c := &CertConfig{} + c.defaultPublicKeyAlgorithm() + + if c.PublicKeyAlgorithm != x509.RSA { + t.Errorf("expected PublicKeyAlgorithm to be RSA, got %v", c.PublicKeyAlgorithm) + } +} + +func TestCertConfig_defaultNotAfter(t *testing.T) { + c := &CertConfig{} + c.defaultNotAfter() + + if c.NotAfter == nil { + t.Error("expected NotAfter to be set, but it was nil") + } + + if !c.NotAfter.After(time.Now()) { + t.Errorf("expected NotAfter to be a future time, got %v", c.NotAfter) + } + + expectedTime := time.Now().Add(constants.CertificateValidity).UTC() + if c.NotAfter.Sub(expectedTime) > time.Minute { + t.Errorf("NotAfter time is too far from expected, got %v, expected %v", c.NotAfter, expectedTime) + } +} + +func TestKarmadaCertRootCA(t *testing.T) { + certConfig := KarmadaCertRootCA() + + expectedCommonName := "karmada" + + if certConfig.Name != constants.CaCertAndKeyName { + t.Errorf("expected Name to be %s, got %s", constants.CaCertAndKeyName, certConfig.Name) + } + + if certConfig.Config.CommonName != expectedCommonName { + t.Errorf("expected CommonName to be %s, got %s", expectedCommonName, certConfig.Config.CommonName) + } +} + +func TestKarmadaCertAdmin(t *testing.T) { + certConfig := KarmadaCertAdmin() + + err := certConfig.AltNamesMutatorFunc(&AltNamesMutatorConfig{}, certConfig) + if err != nil { + t.Fatalf("AltNamesMutatorFunc() returned error: %v", err) + } + + expectedCommonName := "system:admin" + expectedOrganization := []string{"system:masters"} + expectedUsages := []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth} + + if certConfig.Name != constants.KarmadaCertAndKeyName { + t.Errorf("expected Name to be %s, got %s", constants.KarmadaCertAndKeyName, certConfig.Name) + } + + if certConfig.CAName != constants.CaCertAndKeyName { + t.Errorf("expected CAName to be %s, got %s", constants.CaCertAndKeyName, certConfig.CAName) + } + + if certConfig.Config.CommonName != expectedCommonName { + t.Errorf("expected CommonName to be %s, got %s", expectedCommonName, certConfig.Config.CommonName) + } + + if !util.ContainsAllValues(certConfig.Config.Organization, expectedOrganization) { + t.Errorf("expected Organization to contain %v, got %v", expectedOrganization, certConfig.Config.Organization) + } + + if !util.ContainsAllValues(certConfig.Config.Usages, expectedUsages) { + t.Errorf("expected Usages to contain ExtKeyUsageServerAuth and ExtKeyUsageClientAuth for mutual TLS, got %v", certConfig.Config.Usages) + } + + if !util.ContainsAllValues(certConfig.Config.AltNames.DNSNames, expectedAPIServerAltDNSNames) { + t.Errorf("expected DNSNames to contain %v, got %v", expectedAPIServerAltDNSNames, certConfig.Config.AltNames.DNSNames) + } + + if !util.ContainsAllValues(certConfig.Config.AltNames.IPs, expectedAPIServerAltIPs) { + t.Errorf("expected IPs to contain %v, got %v", expectedAPIServerAltIPs, certConfig.Config.AltNames.IPs) + } +} + +func TestKarmadaCertApiserver(t *testing.T) { + certConfig := KarmadaCertApiserver() + + err := certConfig.AltNamesMutatorFunc(&AltNamesMutatorConfig{}, certConfig) + if err != nil { + t.Fatalf("AltNamesMutatorFunc() returned error: %v", err) + } + + expectedCommonName := "karmada-apiserver" + expectedUsages := []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth} + + if certConfig.Name != constants.ApiserverCertAndKeyName { + t.Errorf("expected Name to be %s, got %s", constants.ApiserverCertAndKeyName, certConfig.Name) + } + + if certConfig.CAName != constants.CaCertAndKeyName { + t.Errorf("expected CAName to be %s, got %s", constants.CaCertAndKeyName, certConfig.CAName) + } + + if certConfig.Config.CommonName != expectedCommonName { + t.Errorf("expected CommonName to be %s, got %s", expectedCommonName, certConfig.Config.CommonName) + } + + if !util.ContainsAllValues(certConfig.Config.Usages, expectedUsages) { + t.Errorf("expected Usages to contain ExtKeyUsageServerAuth, got %v", certConfig.Config.Usages) + } + + if !util.ContainsAllValues(certConfig.Config.AltNames.DNSNames, expectedAPIServerAltDNSNames) { + t.Errorf("expected DNSNames to contain %v, got %v", expectedAPIServerAltDNSNames, certConfig.Config.AltNames.DNSNames) + } + + if !util.ContainsAllValues(certConfig.Config.AltNames.IPs, expectedAPIServerAltIPs) { + t.Errorf("expected IPs to contain %v, got %v", expectedAPIServerAltIPs, certConfig.Config.AltNames.IPs) + } +} + +func TestKarmadaCertClient(t *testing.T) { + newControlPlaneAddress := "192.168.1.101" + newCertSAN := "10.0.0.15" + + certConfig := KarmadaCertClient() + + err := certConfig.AltNamesMutatorFunc(&AltNamesMutatorConfig{ + ControlplaneAddress: newControlPlaneAddress, + Components: &v1alpha1.KarmadaComponents{ + KarmadaAPIServer: &v1alpha1.KarmadaAPIServer{ + CertSANs: []string{newCertSAN}, + }, + }, + }, certConfig) + if err != nil { + t.Fatalf("AltNamesMutatorFunc() returned error: %v", err) + } + + expectedCertName := "karmada-client" + expectedCommonName := "system:admin" + expectedOrganization := []string{"system:masters"} + expectedUsages := []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth} + + newIPs := []net.IP{net.ParseIP(newControlPlaneAddress), net.ParseIP(newCertSAN)} + expectedNewIPs := append(newIPs, expectedAPIServerAltIPs...) + + if certConfig.Name != expectedCertName { + t.Errorf("expected Name to be %s, got %s", expectedCertName, certConfig.Name) + } + + if certConfig.CAName != constants.CaCertAndKeyName { + t.Errorf("expected CAName to be %s, got %s", constants.CaCertAndKeyName, certConfig.CAName) + } + + if certConfig.Config.CommonName != expectedCommonName { + t.Errorf("expected CommonName to be %s, got %s", expectedCommonName, certConfig.Config.CommonName) + } + + if !util.ContainsAllValues(certConfig.Config.Organization, expectedOrganization) { + t.Errorf("expected Organization to contain %v, got %v", expectedOrganization, certConfig.Config.Organization) + } + + if !util.ContainsAllValues(certConfig.Config.Usages, expectedUsages) { + t.Errorf("expected Usages to contain ExtKeyUsageClientAuth, got %v", certConfig.Config.Usages) + } + + if !util.ContainsAllValues(certConfig.Config.AltNames.DNSNames, expectedAPIServerAltDNSNames) { + t.Errorf("expected DNSNames to contain %v, got %v", expectedAPIServerAltDNSNames, certConfig.Config.AltNames.DNSNames) + } + + if !util.ContainsAllValues(certConfig.Config.AltNames.IPs, expectedNewIPs) { + t.Errorf("expected IPs to contain %v, got %v", expectedNewIPs, certConfig.Config.AltNames.IPs) + } +} + +func TestKarmadaCertFrontProxyCA(t *testing.T) { + certConfig := KarmadaCertFrontProxyCA() + + expectedCommonName := "front-proxy-ca" + + if certConfig.Name != constants.FrontProxyCaCertAndKeyName { + t.Errorf("expected Name to be %s, got %s", constants.FrontProxyCaCertAndKeyName, certConfig.Name) + } + + if certConfig.Config.CommonName != expectedCommonName { + t.Errorf("expected CommonName to be %s, got %s", expectedCommonName, certConfig.Config.CommonName) + } +} + +func TestKarmadaCertFrontProxyClient(t *testing.T) { + certConfig := KarmadaCertFrontProxyClient() + + expectedCommonName := "front-proxy-client" + expectedUsages := []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth} + + if certConfig.Name != constants.FrontProxyClientCertAndKeyName { + t.Errorf("expected Name to be %s, got %s", constants.FrontProxyClientCertAndKeyName, certConfig.Name) + } + + if certConfig.CAName != constants.FrontProxyCaCertAndKeyName { + t.Errorf("expected CAName to be %s, got %s", constants.FrontProxyCaCertAndKeyName, certConfig.Name) + } + + if certConfig.Config.CommonName != expectedCommonName { + t.Errorf("expected CommonName to be %s, got %s", expectedCommonName, certConfig.Config.CommonName) + } + + if !util.ContainsAllValues(certConfig.Config.Usages, expectedUsages) { + t.Errorf("expected Usages to contain ExtKeyUsageClientAuth, got %v", certConfig.Config.Usages) + } +} + +func TestKarmadaCertEtcdCA(t *testing.T) { + certConfig := KarmadaCertEtcdCA() + + expectedCommonName := "karmada-etcd-ca" + + if certConfig.Name != constants.EtcdCaCertAndKeyName { + t.Errorf("expected Name to be %s, got %s", constants.EtcdCaCertAndKeyName, certConfig.Name) + } + + if certConfig.Config.CommonName != expectedCommonName { + t.Errorf("expected CommonName to be %s, got %s", expectedCommonName, certConfig.Config.CommonName) + } +} + +func TestKarmadaCertEtcdServer(t *testing.T) { + certConfig := KarmadaCertEtcdServer() + + cfg := &AltNamesMutatorConfig{Namespace: constants.KarmadaSystemNamespace} + err := certConfig.AltNamesMutatorFunc(cfg, certConfig) + if err != nil { + t.Fatalf("AltNamesMutatorFunc() returned error: %v", err) + } + + expectedCommonName := "karmada-etcd-server" + expectedUsages := []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth} + expectedDNSNames := []string{ + "localhost", + fmt.Sprintf("%s.%s.svc.cluster.local", util.KarmadaEtcdClientName(cfg.Name), cfg.Namespace), + fmt.Sprintf("*.%s.%s.svc.cluster.local", util.KarmadaEtcdName(cfg.Name), cfg.Namespace), + } + expectedIPs := []net.IP{net.IPv4(127, 0, 0, 1)} + + if certConfig.Name != constants.EtcdServerCertAndKeyName { + t.Errorf("expected Name to be %s, got %s", constants.EtcdServerCertAndKeyName, certConfig.Name) + } + + if certConfig.CAName != constants.EtcdCaCertAndKeyName { + t.Errorf("expected CAName to be %s, got %s", constants.EtcdCaCertAndKeyName, certConfig.CAName) + } + + if certConfig.Config.CommonName != expectedCommonName { + t.Errorf("expected CommonName to be %s, got %s", expectedCommonName, certConfig.Config.CommonName) + } + + if !util.ContainsAllValues(certConfig.Config.Usages, expectedUsages) { + t.Errorf("expected Usages to contain ExtKeyUsageServerAuth and ExtKeyUsageClientAuth, got %v", certConfig.Config.Usages) + } + + if !util.ContainsAllValues(certConfig.Config.AltNames.DNSNames, expectedDNSNames) { + t.Errorf("expected DNSNames to contain %v, got %v", expectedDNSNames, certConfig.Config.AltNames.DNSNames) + } + + if !util.ContainsAllValues(certConfig.Config.AltNames.IPs, expectedIPs) { + t.Errorf("expected IPs to contain %v, got %v", expectedIPs, certConfig.Config.AltNames.IPs) + } +} + +func TestKarmadaCertEtcdClient(t *testing.T) { + certConfig := KarmadaCertEtcdClient() + + expectedCommonName := "karmada-etcd-client" + expectedUsages := []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth} + + if certConfig.Name != constants.EtcdClientCertAndKeyName { + t.Errorf("expected Name to be %s, got %s", constants.EtcdClientCertAndKeyName, certConfig.Name) + } + + if certConfig.CAName != constants.EtcdCaCertAndKeyName { + t.Errorf("expected CAName to be %s, got %s", constants.EtcdCaCertAndKeyName, certConfig.CAName) + } + + if certConfig.Config.CommonName != expectedCommonName { + t.Errorf("expected CommonName to be %s, got %s", expectedCommonName, certConfig.Config.CommonName) + } + + if !util.ContainsAllValues(certConfig.Config.Usages, expectedUsages) { + t.Errorf("expected Usages to contain ExtKeyUsageServerAuth and ExtKeyUsageClientAuth, got %v", certConfig.Config.Usages) + } +} + +func TestGeneratePrivateKey_ECDSAKey_ShouldReturnValidECDSAKey(t *testing.T) { + key, err := GeneratePrivateKey(x509.ECDSA) + if err != nil { + t.Fatalf("GeneratePrivateKey() returned error: %v", err) + } + + // Check that the key is of type *ecdsa.PrivateKey. + if _, ok := key.(*ecdsa.PrivateKey); !ok { + t.Errorf("GeneratePrivateKey() returned key of type %T, expected *ecdsa.PrivateKey", key) + } + + // Verify that the elliptic curve is P-256 (secp256r1). + ecdsaKey := key.(*ecdsa.PrivateKey) + if ecdsaKey.Curve != elliptic.P256() { + t.Errorf("GeneratePrivateKey() returned ECDSA key with elliptic curve %v, expected P-256", ecdsaKey.Curve) + } +} + +func TestGeneratePrivateKey_RSAKey_ShouldReturnValidRSAKey(t *testing.T) { + expectedRSAKeySizeInBytes := rsaKeySize / 8 + + key, err := GeneratePrivateKey(x509.RSA) + if err != nil { + t.Fatalf("GeneratePrivateKey() returned error: %v", err) + } + + // Check that the key is of type *rsa.PrivateKey. + if _, ok := key.(*rsa.PrivateKey); !ok { + t.Errorf("GeneratePrivateKey() returned key of type %T, expected *rsa.PrivateKey", key) + } + + // Verify that the key size is correct. + rsaKey := key.(*rsa.PrivateKey) + if rsaKey.Size() != expectedRSAKeySizeInBytes { + t.Errorf("GeneratePrivateKey() returned RSA key of size %d, expected %d", rsaKey.Size()*8, rsaKeySize) + } +} + +func TestGeneratePrivateKey_InvalidKeyType_ShouldReturnError(t *testing.T) { + _, err := GeneratePrivateKey(x509.DSA) + if err == nil { + t.Error("GeneratePrivateKey() expected error for unsupported key type 'DSA', got nil") + } +} + +func TestEncodeCertPEM_CorrectEncoding(t *testing.T) { + cert := &x509.Certificate{ + Raw: []byte("dummy_certificate_data"), + } + + // Base64 encoding of the dummy certificate data. + base64EncodedCert := base64.StdEncoding.EncodeToString(cert.Raw) + + // Construct the expected PEM format. + expectedPEM := fmt.Sprintf( + "-----BEGIN %s-----\n%s\n-----END %s-----\n", + CertificateBlockType, + base64EncodedCert, + CertificateBlockType, + ) + got := EncodeCertPEM(cert) + + // Check if the encoded PEM matches the expected PEM format. + if !bytes.Equal(got, []byte(expectedPEM)) { + t.Errorf("EncodeCertPEM() = %s; want %s", got, expectedPEM) + } +} + +func TestNewCertificateAuthority(t *testing.T) { + cc := &CertConfig{ + Name: "test-ca", + CAName: "test-ca", + PublicKeyAlgorithm: x509.RSA, + Config: certutil.Config{ + CommonName: "test-ca", + Organization: []string{"test-org"}, + }, + } + + cert, err := NewCertificateAuthority(cc) + if err != nil { + t.Fatalf("NewCertificateAuthority() returned an error: %v", err) + } + + if cert == nil { + t.Fatal("NewCertificateAuthority() returned nil cert") + } + + if cert.pairName != cc.Name { + t.Errorf("expected pairName to be %s, got %s", cc.Name, cert.pairName) + } + + if cert.caName != cc.CAName { + t.Errorf("expected caName to be %s, got %s", cc.CAName, cert.caName) + } + + if cert.cert == nil { + t.Error("expected cert to be non-nil") + } + + if cert.key == nil { + t.Error("expected key to be non-nil") + } + + block, _ := pem.Decode(cert.cert) + if block == nil || block.Type != CertificateBlockType { + t.Errorf("expected PEM block type to be %s, got %v", CertificateBlockType, block) + } +} + +func TestParsePrivateKeyPEM_ValidRSAKey(t *testing.T) { + key, err := GeneratePrivateKey(x509.RSA) + if err != nil { + t.Fatalf("Failed to generate RSA key: %v", err) + } + + keyPEM, err := keyutil.MarshalPrivateKeyToPEM(key) + if err != nil { + t.Errorf("unable to marshal private key to PEM, err: %v", err) + } + + signer, err := ParsePrivateKeyPEM(keyPEM) + if err != nil { + t.Fatalf("ParsePrivateKeyPEM() returned an error: %v", err) + } + + if _, ok := signer.(*rsa.PrivateKey); !ok { + t.Errorf("ParsePrivateKeyPEM() returned key of type %T, expected *rsa.PrivateKey", signer) + } +} + +func TestParsePrivateKeyPEM_InvalidKeyType(t *testing.T) { + invalidKeyPEM := pem.EncodeToMemory(&pem.Block{ + Type: "INVALID PRIVATE KEY", + Bytes: []byte("invalid"), + }) + + signer, err := ParsePrivateKeyPEM(invalidKeyPEM) + if err == nil { + t.Errorf("ParsePrivateKeyPEM() expected error for unsupported key type, got nil") + } + if signer != nil { + t.Errorf("ParsePrivateKeyPEM() expected nil signer, got %v", signer) + } +} + +func TestCreateCertAndKeyFilesWithCA(t *testing.T) { + caName, caCommonName := "test-ca", "test-ca" + caCertConfig := &CertConfig{ + Name: caName, + CAName: caName, + PublicKeyAlgorithm: x509.RSA, + Config: certutil.Config{ + CommonName: caCommonName, + Organization: []string{"test-org"}, + }, + } + + caCert, err := NewCertificateAuthority(caCertConfig) + if err != nil { + t.Fatalf("NewCertificateAuthority() returned an error: %v", err) + } + + certName, certCommonName := "test-cert", "test-common-name" + certConfig := &CertConfig{ + Name: certName, + CAName: caName, + PublicKeyAlgorithm: x509.RSA, + Config: certutil.Config{ + CommonName: certCommonName, + Organization: []string{"test-org"}, + Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + }, + } + + cert, err := CreateCertAndKeyFilesWithCA(certConfig, caCert.CertData(), caCert.KeyData()) + if err != nil { + t.Fatalf("CreateCertAndKeyFilesWithCA() returned an error: %v", err) + } + + if cert == nil { + t.Fatal("CreateCertAndKeyFilesWithCA() returned nil cert") + } + + if cert.cert == nil || cert.key == nil { + t.Error("Expected cert and key to be non-nil") + } + + if cert.pairName != certConfig.Name { + t.Errorf("expected pairName to be %s, got %s", certConfig.Name, cert.pairName) + } + + if cert.caName != certConfig.CAName { + t.Errorf("expected caName to be %s, got %s", certConfig.CAName, cert.caName) + } + + block, _ := pem.Decode(cert.cert) + if block == nil || block.Type != CertificateBlockType { + t.Errorf("expected PEM block type to be %s, got %v", CertificateBlockType, block) + } +} + +func TestNewSignedCert_Success(t *testing.T) { + // Create a CA certificate. + caName, caCommonName := "test-ca", "test-ca" + caCertConfig := &CertConfig{ + Name: caName, + CAName: caName, + PublicKeyAlgorithm: x509.RSA, + Config: certutil.Config{ + CommonName: caCommonName, + Organization: []string{"test-org"}, + }, + } + + caKarmadaCert, err := NewCertificateAuthority(caCertConfig) + if err != nil { + t.Fatalf("NewCertificateAuthority() returned an error: %v", err) + } + + caCerts, err := certutil.ParseCertsPEM(caKarmadaCert.CertData()) + if err != nil { + t.Error(err) + } + caCert := caCerts[0] + + caKey, err := ParsePrivateKeyPEM(caKarmadaCert.key) + if err != nil { + t.Error(err) + } + + // Create a key pair for the certificate. + key, err := GeneratePrivateKey(x509.RSA) + if err != nil { + t.Fatalf("failed to generate key: %v", err) + } + + // Create a CertConfig for the test certificate. + expectedCertCommonName := "test-cert" + expectedCertOrganization := []string{"test-org"} + expectedCertDNSNames := []string{"localhost"} + expectedUsages := []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth} + certNotAfter := time.Now().Add(constants.CertificateValidity) + cc := &CertConfig{ + Config: certutil.Config{ + CommonName: expectedCertCommonName, + Organization: expectedCertOrganization, + AltNames: certutil.AltNames{DNSNames: expectedCertDNSNames}, + Usages: expectedUsages, + }, + NotAfter: &certNotAfter, + } + + cert, err := NewSignedCert(cc, key, caCert, caKey, false) + if err != nil { + t.Fatalf("NewSignedCert returned an error: %v", err) + } + + if cert.IsCA { + t.Errorf("expected certificate to not be a CA, but it was") + } + + if cert.Subject.CommonName != expectedCertCommonName { + t.Errorf("expected CommonName to be %s, got %s", expectedCertCommonName, cert.Subject.CommonName) + } + + if !util.ContainsAllValues(cert.Subject.Organization, expectedCertOrganization) { + t.Errorf("expected Organization to contain %v, got %v", expectedCertOrganization, cert.Subject.Organization) + } + + if !util.ContainsAllValues(cert.DNSNames, expectedCertDNSNames) { + t.Errorf("expected DNSNames to contain %v, got %v", expectedCertDNSNames, cert.DNSNames) + } + + if !util.ContainsAllValues(cert.ExtKeyUsage, expectedUsages) { + t.Errorf("expected Usages to contain ExtKeyUsageServerAuth, got %v", cert.ExtKeyUsage) + } +} + +func TestNewSignedCert_ErrorOnEmptyCommonName(t *testing.T) { + // Create a key pair for the certificate and CA. + key, err := GeneratePrivateKey(x509.RSA) + if err != nil { + t.Fatalf("failed to generate key: %v", err) + } + + // Create a CertConfig for the test certificate. + expectedCertOrganization := []string{"test-org"} + expectedCertDNSNames := []string{"localhost"} + expectedUsages := []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth} + certNotAfter := time.Now().Add(constants.CertificateValidity) + cc := &CertConfig{ + Config: certutil.Config{ + CommonName: "", + Organization: expectedCertOrganization, + AltNames: certutil.AltNames{DNSNames: expectedCertDNSNames}, + Usages: expectedUsages, + }, + NotAfter: &certNotAfter, + } + + _, err = NewSignedCert(cc, key, &x509.Certificate{}, key, false) + if err == nil { + t.Fatalf("expected error for empty CommonName, but got nil") + } + + expectedErrorMsg := "must specify a CommonName" + if err.Error() != expectedErrorMsg { + t.Errorf("expected error %s, got %v", expectedErrorMsg, err) + } +} + +func TestAppendSANsToAltNames(t *testing.T) { + tests := []struct { + name string + SANs []string + wantIPs []net.IP + wantDNS []string + }{ + { + name: "AppendSANsToAltNames_ValidIPAddress_ShouldReturnCorrectIPs", + SANs: []string{"192.168.1.1"}, + wantIPs: []net.IP{net.ParseIP("192.168.1.1")}, + wantDNS: nil, + }, + { + name: "AppendSANsToAltNames_ValidDNSName_ShouldReturnCorrectDNS", + SANs: []string{"example.com"}, + wantIPs: nil, + wantDNS: []string{"example.com"}, + }, + { + name: "AppendSANsToAltNames_InvalidDNSAndValidIP_ShouldIgnoreInvalidDNSAndReturnCorrectIP", + SANs: []string{"invalid!.com", "10.0.0.1"}, + wantIPs: []net.IP{net.ParseIP("10.0.0.1")}, + wantDNS: nil, + }, + { + name: "AppendSANsToAltNames_ValidWildcardDNS_ShouldReturnCorrectWildcardDNS", + SANs: []string{"*.example.com"}, + wantIPs: nil, + wantDNS: []string{"*.example.com"}, + }, + { + name: "AppendSANsToAltNames_MixedValidIPAndDNS_ShouldReturnCorrectIPAndDNS", + SANs: []string{"example.com", "10.0.0.1"}, + wantIPs: []net.IP{net.ParseIP("10.0.0.1")}, + wantDNS: []string{"example.com"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + altNames := &certutil.AltNames{} + appendSANsToAltNames(altNames, tt.SANs) + + if !reflect.DeepEqual(altNames.IPs, tt.wantIPs) { + t.Errorf("AppendSANsToAltNames() IPs = %+v, want %+v", altNames.IPs, tt.wantIPs) + } + if !reflect.DeepEqual(altNames.DNSNames, tt.wantDNS) { + t.Errorf("AppendSANsToAltNames() DNS = %+v, want %+v", altNames.IPs, tt.wantIPs) + } + }) + } +} + +func TestRemoveDuplicateAltNames(t *testing.T) { + tests := []struct { + name string + input *certutil.AltNames + want *certutil.AltNames + }{ + { + name: "RemoveDuplicateAltNames_DuplicateDNSNames_ShouldReturnUniqueDNSNames", + input: &certutil.AltNames{ + DNSNames: []string{"example.com", "example.com", "example.org"}, + IPs: []net.IP{net.ParseIP("192.168.1.1"), net.ParseIP("192.168.1.2")}, + }, + want: &certutil.AltNames{ + DNSNames: []string{"example.com", "example.org"}, + IPs: []net.IP{net.ParseIP("192.168.1.1"), net.ParseIP("192.168.1.2")}, + }, + }, + { + name: "RemoveDuplicateAltNames_DuplicateIPs_ShouldReturnUniqueIPs", + input: &certutil.AltNames{ + DNSNames: []string{"example.com", "example.org"}, + IPs: []net.IP{net.ParseIP("192.168.1.1"), net.ParseIP("192.168.1.1")}, + }, + want: &certutil.AltNames{ + DNSNames: []string{"example.com", "example.org"}, + IPs: []net.IP{net.ParseIP("192.168.1.1")}, + }, + }, + { + name: "RemoveDuplicateAltNames_DuplicateDNSNamesAndIPs_ShouldReturnUniqueDNSNamesAndIPs", + input: &certutil.AltNames{ + DNSNames: []string{"example.com", "example.com", "example.org"}, + IPs: []net.IP{net.ParseIP("192.168.1.1"), net.ParseIP("192.168.1.1"), net.ParseIP("192.168.1.2")}, + }, + want: &certutil.AltNames{ + DNSNames: []string{"example.com", "example.org"}, + IPs: []net.IP{net.ParseIP("192.168.1.1"), net.ParseIP("192.168.1.2")}, + }, + }, + { + name: "RemoveDuplicateAltNames_NoDuplicates_ShouldReturnSameAltNames", + input: &certutil.AltNames{ + DNSNames: []string{"example.com", "example.org"}, + IPs: []net.IP{net.ParseIP("192.168.1.1"), net.ParseIP("192.168.1.2")}, + }, + want: &certutil.AltNames{ + DNSNames: []string{"example.com", "example.org"}, + IPs: []net.IP{net.ParseIP("192.168.1.1"), net.ParseIP("192.168.1.2")}, + }, + }, + { + name: "RemoveDuplicateAltNames_EmptyAltNames_ShouldReturnEmptyAltNames", + input: &certutil.AltNames{ + DNSNames: []string{}, + IPs: []net.IP{}, + }, + want: &certutil.AltNames{ + DNSNames: []string{}, + IPs: nil, + }, + }, + { + name: "RemoveDuplicateAltNames_NilAltNames_ShouldReturnNil", + input: nil, + want: nil, + }, + { + name: "RemoveDuplicateAltNames_EmptyDNSNamesWithNonEmptyIPs_ShouldReturnUniqueIPs", + input: &certutil.AltNames{ + DNSNames: []string{}, + IPs: []net.IP{net.ParseIP("192.168.1.1"), net.ParseIP("192.168.1.1")}, + }, + want: &certutil.AltNames{ + DNSNames: []string{}, + IPs: []net.IP{net.ParseIP("192.168.1.1")}, + }, + }, + { + name: "RemoveDuplicateAltNames_EmptyIPsWithNonEmptyDNSNames_ShouldReturnUniqueDNSNames", + input: &certutil.AltNames{ + DNSNames: []string{"example.com", "example.com"}, + IPs: []net.IP{}, + }, + want: &certutil.AltNames{ + DNSNames: []string{"example.com"}, + IPs: nil, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + RemoveDuplicateAltNames(tt.input) + if !reflect.DeepEqual(tt.input, tt.want) { + t.Errorf("RemoveDuplicateAltNames() = %+v, want %+v", tt.input, tt.want) + } + }) + } +} diff --git a/operator/pkg/certs/store_test.go b/operator/pkg/certs/store_test.go new file mode 100644 index 000000000000..552ee246394b --- /dev/null +++ b/operator/pkg/certs/store_test.go @@ -0,0 +1,153 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package certs + +import ( + "testing" + + corev1 "k8s.io/api/core/v1" +) + +// Helper function to create a new KarmadaCert with given pairName. +func newKarmadaCert(pairName string, certData, keyData []byte) *KarmadaCert { + return &KarmadaCert{ + pairName: pairName, + cert: certData, + key: keyData, + } +} + +func TestNewCertStore(t *testing.T) { + store := NewCertStore() + if store == nil { + t.Fatalf("expected a non-nil CertStore") + } + if len(store.(*KarmadaCertStore).certs) != 0 { + t.Errorf("expected an empty cert store") + } +} + +func TestAddAndGetCert(t *testing.T) { + store := NewCertStore() + + cert := newKarmadaCert("testCert", []byte("certData"), []byte("keyData")) + store.AddCert(cert) + + retrievedCert := store.GetCert("testCert") + if retrievedCert == nil { + t.Fatalf("expected to retrieve cert but got nil") + } + if string(retrievedCert.cert) != "certData" { + t.Errorf("expected certData but got %s", string(retrievedCert.cert)) + } + if string(retrievedCert.key) != "keyData" { + t.Errorf("expected keyData but got %s", string(retrievedCert.key)) + } +} + +func TestCertList(t *testing.T) { + cert1Name, cert2Name := "cert1", "cert2" + store := NewCertStore() + + cert1 := newKarmadaCert(cert1Name, []byte("cert1Data"), []byte("key1Data")) + cert2 := newKarmadaCert(cert2Name, []byte("cert2Data"), []byte("key2Data")) + store.AddCert(cert1) + store.AddCert(cert2) + + certs := store.CertList() + if len(certs) != 2 { + t.Errorf("expected 2 certs but got %d", len(certs)) + } + if cert1 = store.GetCert(cert1Name); cert1 == nil { + t.Errorf("expected to retrieve %s, but got nil", cert1Name) + } + if cert2 = store.GetCert(cert2Name); cert2 == nil { + t.Errorf("expected to retrieve %s, but got nil", cert2Name) + } +} + +func TestLoadCertFromSecret(t *testing.T) { + store := NewCertStore() + + secret := &corev1.Secret{ + Data: map[string][]byte{ + "cert1.crt": []byte("cert1CertData"), + "cert1.key": []byte("cert1KeyData"), + "cert2.crt": []byte("cert2CertData"), + "cert2.key": []byte("cert2KeyData"), + }, + } + + err := store.LoadCertFromSecret(secret) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + + cert1 := store.GetCert("cert1") + if cert1 == nil || string(cert1.cert) != "cert1CertData" || string(cert1.key) != "cert1KeyData" { + t.Errorf("cert1 content is incorrect expected cert %s key %s, got cert %s key %s", "cert1CertData", "cert1KeyData", string(cert1.cert), string(cert1.key)) + } + + cert2 := store.GetCert("cert2") + if cert2 == nil || string(cert2.cert) != "cert2CertData" || string(cert2.key) != "cert2KeyData" { + t.Errorf("cert2 content is incorrect expected cert %s key %s, got cert %s key %s", "cert2CertData", "cert2KeyData", string(cert2.cert), string(cert2.key)) + } +} + +func TestLoadCertFromSecret_EmptyData(t *testing.T) { + store := NewCertStore() + + secret := &corev1.Secret{ + Data: map[string][]byte{}, + } + + err := store.LoadCertFromSecret(secret) + if err == nil { + t.Error("expected error that cert data is empty") + } + if len(store.CertList()) != 0 { + t.Errorf("expected 0 certs but got %d", len(store.CertList())) + } +} + +func TestLoadCertFromSecret_InvalidFormat(t *testing.T) { + pairName := "invalid.data" + + store := NewCertStore() + + secret := &corev1.Secret{ + Data: map[string][]byte{ + pairName: []byte("invalidData"), + }, + } + + err := store.LoadCertFromSecret(secret) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if len(store.CertList()) != 1 { + t.Errorf("expected 1 cert but got %d", len(store.CertList())) + } + + karmadaCert := store.GetCert(pairName) + if len(karmadaCert.key) != 0 { + t.Errorf("expected the cert data content to be empty but got %v", karmadaCert.cert) + } + if len(karmadaCert.key) != 0 { + t.Errorf("expected the key data content to be empty but got %v", karmadaCert.key) + } +} diff --git a/operator/pkg/constants/constants.go b/operator/pkg/constants/constants.go index 2ded4157f2db..525b27d743fe 100644 --- a/operator/pkg/constants/constants.go +++ b/operator/pkg/constants/constants.go @@ -31,7 +31,7 @@ const ( // EtcdDefaultVersion defines the default of the karmada etcd image tag EtcdDefaultVersion = "3.5.13-0" // KubeDefaultVersion defines the default of the karmada apiserver and kubeControllerManager image tag - KubeDefaultVersion = "v1.29.6" + KubeDefaultVersion = "v1.30.4" // KarmadaDefaultServiceSubnet defines the default of the subnet used by k8s services. KarmadaDefaultServiceSubnet = "10.96.0.0/12" // KarmadaDefaultDNSDomain defines the default of the DNSDomain @@ -77,6 +77,16 @@ const ( KarmadaAPIserverListenClientPort = 5443 // EtcdDataVolumeName defines the name to etcd data volume EtcdDataVolumeName = "etcd-data" + // EtcdClientCredentialsVolumeName defines the name of the volume for the etcd client credentials + EtcdClientCredentialsVolumeName = "etcd-client-cert" // #nosec G101 + // EtcdClientCredentialsMountPath defines the mount path for the etcd client credentials data + EtcdClientCredentialsMountPath = "/etc/karmada/pki/etcd-client" // #nosec G101 + // CaCertDataKey defines the data key for a CA cert + CaCertDataKey = "ca.crt" + // TLSCertDataKey defines the data key for a TLS cert + TLSCertDataKey = "tls.crt" + // TLSPrivateKeyDataKey defines the data key for a TLS cert private key + TLSPrivateKeyDataKey = "tls.key" // CertificateValidity Certificate validity period CertificateValidity = time.Hour * 24 * 365 diff --git a/operator/pkg/controller/karmada/controller.go b/operator/pkg/controller/karmada/controller.go index bd56007acf4d..49870782179e 100644 --- a/operator/pkg/controller/karmada/controller.go +++ b/operator/pkg/controller/karmada/controller.go @@ -18,11 +18,15 @@ package karmada import ( "context" + "fmt" "reflect" "strconv" "time" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/rest" "k8s.io/client-go/tools/record" @@ -37,10 +41,11 @@ import ( operatorv1alpha1 "github.com/karmada-io/karmada/operator/pkg/apis/operator/v1alpha1" operatorscheme "github.com/karmada-io/karmada/operator/pkg/scheme" + "github.com/karmada-io/karmada/operator/pkg/util" ) const ( - // ControllerName is the controller name that will be used when reporting events. + // ControllerName is the controller name that will be used when reporting events and metrics. ControllerName = "karmada-operator-controller" // ControllerFinalizerName is the name of the karmada controller finalizer @@ -48,6 +53,9 @@ const ( // DisableCascadingDeletionLabel is the label that determine whether to perform cascade deletion DisableCascadingDeletionLabel = "operator.karmada.io/disable-cascading-deletion" + + // ValidationErrorReason is the reason for a validation error + ValidationErrorReason = "ValidationError" ) // Controller controls the Karmada resource. @@ -77,6 +85,11 @@ func (ctrl *Controller) Reconcile(ctx context.Context, req controllerruntime.Req return controllerruntime.Result{}, err } + if err := ctrl.validateKarmada(karmada); err != nil { + klog.Error(err, "Validation failed for karmada") + return controllerruntime.Result{}, nil + } + // The object is being deleted if !karmada.DeletionTimestamp.IsZero() { val, ok := karmada.Labels[DisableCascadingDeletionLabel] @@ -96,6 +109,31 @@ func (ctrl *Controller) Reconcile(ctx context.Context, req controllerruntime.Req return controllerruntime.Result{}, ctrl.syncKarmada(karmada) } +// validateKarmada ensures the Karmada resource adheres to validation rules +func (ctrl *Controller) validateKarmada(karmada *operatorv1alpha1.Karmada) error { + if karmada.Spec.Components.Etcd != nil && karmada.Spec.Components.Etcd.External != nil { + expectedSecretName := util.EtcdCertSecretName(karmada.Name) + if karmada.Spec.Components.Etcd.External.SecretRef.Name != expectedSecretName { + errorMessage := fmt.Sprintf("Secret name for external etcd client must be %s, but got %s", expectedSecretName, karmada.Spec.Components.Etcd.External.SecretRef.Name) + ctrl.EventRecorder.Event(karmada, corev1.EventTypeWarning, ValidationErrorReason, errorMessage) + + newCondition := metav1.Condition{ + Type: string(operatorv1alpha1.Ready), + Status: metav1.ConditionFalse, + Reason: ValidationErrorReason, + Message: errorMessage, + LastTransitionTime: metav1.Now(), + } + meta.SetStatusCondition(&karmada.Status.Conditions, newCondition) + if err := ctrl.Status().Update(context.TODO(), karmada); err != nil { + return err + } + return fmt.Errorf(errorMessage) + } + } + return nil +} + func (ctrl *Controller) syncKarmada(karmada *operatorv1alpha1.Karmada) error { klog.V(2).InfoS("Reconciling karmada", "name", karmada.Name) planner, err := NewPlannerFor(karmada, ctrl.Client, ctrl.Config) @@ -147,6 +185,7 @@ func (ctrl *Controller) ensureKarmada(ctx context.Context, karmada *operatorv1al // SetupWithManager creates a controller and register to controller manager. func (ctrl *Controller) SetupWithManager(mgr controllerruntime.Manager) error { return controllerruntime.NewControllerManagedBy(mgr). + Named(ControllerName). For(&operatorv1alpha1.Karmada{}, builder.WithPredicates(predicate.Funcs{ UpdateFunc: ctrl.onKarmadaUpdate, diff --git a/operator/pkg/controller/karmada/planner.go b/operator/pkg/controller/karmada/planner.go index b3455c322c74..c6418d8331ee 100644 --- a/operator/pkg/controller/karmada/planner.go +++ b/operator/pkg/controller/karmada/planner.go @@ -180,6 +180,9 @@ func (p *Planner) afterRunJob() error { Namespace: p.karmada.GetNamespace(), Name: util.AdminKubeconfigSecretName(p.karmada.GetName()), } + p.karmada.Status.APIServerService = &operatorv1alpha1.APIServerService{ + Name: util.KarmadaAPIServerName(p.karmada.GetName()), + } return p.Client.Status().Update(context.TODO(), p.karmada) } // if it is deInit workflow, the cr will be deleted with karmada is be deleted, so we need not to diff --git a/operator/pkg/controller/karmada/planner_test.go b/operator/pkg/controller/karmada/planner_test.go new file mode 100644 index 000000000000..0f564304cc7c --- /dev/null +++ b/operator/pkg/controller/karmada/planner_test.go @@ -0,0 +1,442 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package karmada + +import ( + "errors" + "fmt" + "strings" + "testing" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + operatorv1alpha1 "github.com/karmada-io/karmada/operator/pkg/apis/operator/v1alpha1" + "github.com/karmada-io/karmada/operator/pkg/util" + "github.com/karmada-io/karmada/pkg/util/names" +) + +func TestNewPlannerFor(t *testing.T) { + name := "karmada-demo" + tests := []struct { + name string + karmada *operatorv1alpha1.Karmada + client client.Client + config *rest.Config + wantAction Action + wantErr bool + }{ + { + name: "NewPlannerFor_WithInitAction_PlannerConstructed", + karmada: &operatorv1alpha1.Karmada{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: operatorv1alpha1.KarmadaSpec{ + Components: &operatorv1alpha1.KarmadaComponents{ + Etcd: &operatorv1alpha1.Etcd{ + Local: &operatorv1alpha1.LocalEtcd{}, + }, + }, + }, + }, + client: fake.NewFakeClient(), + config: &rest.Config{}, + wantAction: InitAction, + wantErr: false, + }, + { + name: "NewPlannerFor_WithDeInitAction_PlannerConstructed", + karmada: &operatorv1alpha1.Karmada{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + DeletionTimestamp: &metav1.Time{ + Time: time.Now().Add(-5 * time.Minute), + }, + + Finalizers: []string{ControllerFinalizerName}, + }, + Spec: operatorv1alpha1.KarmadaSpec{ + Components: &operatorv1alpha1.KarmadaComponents{ + Etcd: &operatorv1alpha1.Etcd{ + Local: &operatorv1alpha1.LocalEtcd{}, + }, + }, + }, + }, + client: fake.NewFakeClient(), + config: &rest.Config{}, + wantAction: DeInitAction, + wantErr: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + planner, err := NewPlannerFor(test.karmada, test.client, test.config) + if err == nil && test.wantErr { + t.Fatal("expected an error, but got none") + } + if err != nil && !test.wantErr { + t.Errorf("unexpected error, got: %v", err) + } + if planner.action != test.wantAction { + t.Errorf("expected planner action to be %s, but got %s", test.wantAction, planner.action) + } + }) + } +} + +func TestPreRunJob(t *testing.T) { + name, namespace := "karmada-demo", names.NamespaceDefault + tests := []struct { + name string + karmada *operatorv1alpha1.Karmada + config *rest.Config + action Action + verify func(p *Planner, conditionStatus metav1.ConditionStatus, conditionMsg, conditionReason string) error + wantErr bool + }{ + { + name: "PreRunJob_WithInitActionPlanned_PreRunJobCompleted", + karmada: &operatorv1alpha1.Karmada{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: operatorv1alpha1.KarmadaSpec{ + Components: &operatorv1alpha1.KarmadaComponents{ + Etcd: &operatorv1alpha1.Etcd{ + Local: &operatorv1alpha1.LocalEtcd{}, + }, + }, + }, + }, + config: &rest.Config{}, + action: InitAction, + verify: verifyJobInCommon, + wantErr: false, + }, + { + name: "PreRunJob_WithDeInitActionPlanned_PreRunJobCompleted", + karmada: &operatorv1alpha1.Karmada{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + DeletionTimestamp: &metav1.Time{ + Time: time.Now().Add(-5 * time.Minute), + }, + Finalizers: []string{ControllerFinalizerName}, + }, + Spec: operatorv1alpha1.KarmadaSpec{ + Components: &operatorv1alpha1.KarmadaComponents{ + Etcd: &operatorv1alpha1.Etcd{ + Local: &operatorv1alpha1.LocalEtcd{}, + }, + }, + }, + }, + config: &rest.Config{}, + action: DeInitAction, + verify: verifyJobInCommon, + wantErr: false, + }, + { + name: "PreRunJob_WithUnknownActionPlanned_PreRunJobCompleted", + karmada: &operatorv1alpha1.Karmada{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: operatorv1alpha1.KarmadaSpec{ + Components: &operatorv1alpha1.KarmadaComponents{ + Etcd: &operatorv1alpha1.Etcd{ + Local: &operatorv1alpha1.LocalEtcd{}, + }, + }, + }, + }, + config: &rest.Config{}, + action: "UnknownAction", + verify: func(planner *Planner, _ metav1.ConditionStatus, _, _ string) error { + // Check the status conditions. + conditions := planner.karmada.Status.Conditions + if len(conditions) != 0 { + return fmt.Errorf("expected no conditions, but got %d conditions", len(conditions)) + } + return nil + }, + wantErr: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + client, err := prepJobInCommon(test.karmada) + if err != nil { + t.Fatalf("failed to prep before creating planner, got error: %v", err) + } + + planner, err := NewPlannerFor(test.karmada, client, test.config) + if err != nil { + t.Fatalf("failed to create planner, got error: %v", err) + } + planner.action = test.action + + err = planner.preRunJob() + if err == nil && test.wantErr { + t.Fatal("expected an error, but got none") + } + if err != nil && !test.wantErr { + t.Errorf("unexpected error, got: %v", err) + } + + conditionMsg := fmt.Sprintf("karmada %s job is in progressing", strings.ToLower(string(test.action))) + if err := test.verify(planner, metav1.ConditionFalse, conditionMsg, "Progressing"); err != nil { + t.Errorf("failed to verify the pre running job, got error: %v", err) + } + }) + } +} + +func TestAfterRunJob(t *testing.T) { + name, namespace := "karmada-demo", names.NamespaceDefault + tests := []struct { + name string + karmada *operatorv1alpha1.Karmada + config *rest.Config + action Action + verify func(*operatorv1alpha1.Karmada, *Planner, Action) error + wantErr bool + }{ + { + name: "AfterRunJob_WithInitActionPlannedAndHostClusterIsLocal_AfterRunJobCompleted", + karmada: &operatorv1alpha1.Karmada{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: operatorv1alpha1.KarmadaSpec{ + Components: &operatorv1alpha1.KarmadaComponents{ + Etcd: &operatorv1alpha1.Etcd{ + Local: &operatorv1alpha1.LocalEtcd{}, + }, + }, + }, + }, + config: &rest.Config{}, + action: InitAction, + verify: func(karmada *operatorv1alpha1.Karmada, planner *Planner, action Action) error { + secretRefNameExpected := util.AdminKubeconfigSecretName(karmada.GetName()) + if planner.karmada.Status.SecretRef == nil { + return fmt.Errorf("expected SecretRef to be set, but got nil") + } + if planner.karmada.Status.SecretRef.Name != secretRefNameExpected { + return fmt.Errorf("expected SecretRef Name to be %s, but got %s", secretRefNameExpected, planner.karmada.Status.SecretRef.Name) + } + if planner.karmada.Status.SecretRef.Namespace != names.NamespaceDefault { + return fmt.Errorf("expected SecretRef Namespace to be %s, but got %s", names.NamespaceDefault, planner.karmada.Status.SecretRef.Namespace) + } + + conditionMsg := fmt.Sprintf("karmada %s job is completed", strings.ToLower(string(action))) + if err := verifyJobInCommon(planner, metav1.ConditionTrue, conditionMsg, "Completed"); err != nil { + return fmt.Errorf("failed to verify after run job, got error: %v", err) + } + if planner.karmada.Status.APIServerService == nil { + return fmt.Errorf("expected API Server service ref to be set, but got nil") + } + expectedAPIServerName := util.KarmadaAPIServerName(karmada.GetName()) + if planner.karmada.Status.APIServerService.Name != expectedAPIServerName { + return fmt.Errorf("expected API Server service Name to be %s, but got %s", expectedAPIServerName, planner.karmada.Status.APIServerService.Name) + } + + return nil + }, + wantErr: false, + }, + { + name: "AfterRunJob_WithDeInitActionPlanned_AfterRunJobCompleted", + karmada: &operatorv1alpha1.Karmada{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + DeletionTimestamp: &metav1.Time{ + Time: time.Now().Add(-5 * time.Minute), + }, + Finalizers: []string{ControllerFinalizerName}, + }, + Spec: operatorv1alpha1.KarmadaSpec{ + Components: &operatorv1alpha1.KarmadaComponents{ + Etcd: &operatorv1alpha1.Etcd{ + Local: &operatorv1alpha1.LocalEtcd{}, + }, + }, + }, + }, + config: &rest.Config{}, + action: DeInitAction, + verify: func(_ *operatorv1alpha1.Karmada, planner *Planner, _ Action) error { + conditions := planner.karmada.Status.Conditions + if len(conditions) != 0 { + t.Errorf("expected no conditions, but got %d conditions", len(conditions)) + } + if planner.karmada.Status.SecretRef != nil { + t.Errorf("expected SecretRef to be nil, but got %v", planner.karmada.Status.SecretRef) + } + return nil + }, + wantErr: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + client, err := prepJobInCommon(test.karmada) + if err != nil { + t.Fatalf("failed to prep before creating planner, got error: %v", err) + } + + planner, err := NewPlannerFor(test.karmada, client, test.config) + if err != nil { + t.Fatalf("failed to create planner, got error: %v", err) + } + planner.action = test.action + + err = planner.afterRunJob() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if err := test.verify(test.karmada, planner, test.action); err != nil { + t.Errorf("failed to verify the after running job, got error: %v", err) + } + }) + } +} +func TestRunJobErr(t *testing.T) { + name, namespace := "karmada-demo", names.NamespaceDefault + tests := []struct { + name string + karmada *operatorv1alpha1.Karmada + config *rest.Config + jobErr error + wantErr bool + }{ + { + name: "RunJobErr_WithInitActionPlannedAndHostClusterIsLocal_AfterRunJobCompleted", + karmada: &operatorv1alpha1.Karmada{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: operatorv1alpha1.KarmadaSpec{ + Components: &operatorv1alpha1.KarmadaComponents{ + Etcd: &operatorv1alpha1.Etcd{ + Local: &operatorv1alpha1.LocalEtcd{}, + }, + }, + }, + }, + config: &rest.Config{}, + jobErr: errors.New("test error"), + wantErr: true, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + client, err := prepJobInCommon(test.karmada) + if err != nil { + t.Fatalf("failed to prep before creating planner, got error: %v", err) + } + + planner, err := NewPlannerFor(test.karmada, client, test.config) + if err != nil { + t.Fatalf("failed to create planner, got error: %v", err) + } + + err = planner.runJobErr(test.jobErr) + if err == nil && test.wantErr { + t.Fatalf("expected an error, but got none") + } + if err != nil && !test.wantErr { + t.Errorf("unexpected error: %v", err) + } + if !containsError(err, test.jobErr) { + t.Errorf("expected job error to contain: %v, but got: %v", test.jobErr, err) + } + if err := verifyJobInCommon(planner, metav1.ConditionFalse, test.jobErr.Error(), "Failed"); err != nil { + t.Errorf("failed to verify run job err, got error: %v", err) + } + }) + } +} + +// prepJobInCommon prepares a fake Kubernetes client for testing purposes. +// It creates a new scheme and adds the operatorv1alpha1 types to it. +// A fake client is then built using the provided Karmada object. +func prepJobInCommon(karmada *operatorv1alpha1.Karmada) (client.Client, error) { + // Create a scheme and add operatorv1alpha1 type to it. + scheme := runtime.NewScheme() + err := operatorv1alpha1.AddToScheme(scheme) + if err != nil { + return nil, fmt.Errorf("error adding operatorv1alpha1 to k8s scheme %v", err) + } + + // Create a fake client with the scheme. + client := fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(karmada).WithStatusSubresource(karmada).Build() + return client, nil +} + +// verifyJobInCommon verifies the conditions of a Karmada job by checking its status conditions. +// It ensures that the job has at least one condition and that its type, status, reason, and message +// match the expected values provided as arguments. +func verifyJobInCommon(planner *Planner, conditionStatus metav1.ConditionStatus, conditionMsg, conditionReason string) error { + conditions := planner.karmada.Status.Conditions + if len(conditions) < 1 { + return fmt.Errorf("expected at least one condition, but got %d", len(conditions)) + } + if conditions[0].Type != string(operatorv1alpha1.Ready) { + return fmt.Errorf("expected condition type to be %s, but got %s", operatorv1alpha1.Ready, conditions[0].Type) + } + if conditions[0].Status != conditionStatus { + return fmt.Errorf("expected condition status to be %s, but got %s", conditionStatus, conditions[0].Status) + } + if conditions[0].Reason != conditionReason { + return fmt.Errorf("expected condition reason to be %s, but got %s", conditionReason, conditions[0].Reason) + } + if conditions[0].Message != conditionMsg { + return fmt.Errorf("expected condition message to be %s, but got %s", conditionMsg, conditions[0].Message) + } + + return nil +} + +// containsError checks if a target error is present within an aggregated error. +// It verifies if the aggregated error is non-nil and if it contains the specified target error. +func containsError(aggErr error, targetErr error) bool { + if aggErr != nil { + if agg, ok := aggErr.(utilerrors.Aggregate); ok { + for _, err := range agg.Errors() { + if err.Error() == targetErr.Error() { + return true + } + } + } + } + return false +} diff --git a/operator/pkg/controlplane/apiserver/apiserver.go b/operator/pkg/controlplane/apiserver/apiserver.go index 5ae253f3910b..feae902c95a5 100644 --- a/operator/pkg/controlplane/apiserver/apiserver.go +++ b/operator/pkg/controlplane/apiserver/apiserver.go @@ -27,7 +27,7 @@ import ( clientsetscheme "k8s.io/client-go/kubernetes/scheme" operatorv1alpha1 "github.com/karmada-io/karmada/operator/pkg/apis/operator/v1alpha1" - "github.com/karmada-io/karmada/operator/pkg/constants" + "github.com/karmada-io/karmada/operator/pkg/controlplane/etcd" "github.com/karmada-io/karmada/operator/pkg/util" "github.com/karmada-io/karmada/operator/pkg/util/apiclient" "github.com/karmada-io/karmada/operator/pkg/util/patcher" @@ -35,7 +35,7 @@ import ( // EnsureKarmadaAPIServer creates karmada apiserver deployment and service resource func EnsureKarmadaAPIServer(client clientset.Interface, cfg *operatorv1alpha1.KarmadaComponents, name, namespace string, featureGates map[string]bool) error { - if err := installKarmadaAPIServer(client, cfg.KarmadaAPIServer, name, namespace, featureGates); err != nil { + if err := installKarmadaAPIServer(client, cfg.KarmadaAPIServer, cfg.Etcd, name, namespace, featureGates); err != nil { return fmt.Errorf("failed to install karmada apiserver, err: %w", err) } @@ -44,29 +44,25 @@ func EnsureKarmadaAPIServer(client clientset.Interface, cfg *operatorv1alpha1.Ka // EnsureKarmadaAggregatedAPIServer creates karmada aggregated apiserver deployment and service resource func EnsureKarmadaAggregatedAPIServer(client clientset.Interface, cfg *operatorv1alpha1.KarmadaComponents, name, namespace string, featureGates map[string]bool) error { - if err := installKarmadaAggregatedAPIServer(client, cfg.KarmadaAggregatedAPIServer, name, namespace, featureGates); err != nil { + if err := installKarmadaAggregatedAPIServer(client, cfg.KarmadaAggregatedAPIServer, cfg.Etcd, name, namespace, featureGates); err != nil { return err } return createKarmadaAggregatedAPIServerService(client, name, namespace) } -func installKarmadaAPIServer(client clientset.Interface, cfg *operatorv1alpha1.KarmadaAPIServer, name, namespace string, _ map[string]bool) error { +func installKarmadaAPIServer(client clientset.Interface, cfg *operatorv1alpha1.KarmadaAPIServer, etcdCfg *operatorv1alpha1.Etcd, name, namespace string, _ map[string]bool) error { apiserverDeploymentBytes, err := util.ParseTemplate(KarmadaApiserverDeployment, struct { - DeploymentName, Namespace, Image, ImagePullPolicy, EtcdClientService string - ServiceSubnet, KarmadaCertsSecret, EtcdCertsSecret string - Replicas *int32 - EtcdListenClientPort int32 + DeploymentName, Namespace, Image, ImagePullPolicy string + ServiceSubnet, KarmadaCertsSecret string + Replicas *int32 }{ - DeploymentName: util.KarmadaAPIServerName(name), - Namespace: namespace, - Image: cfg.Image.Name(), - ImagePullPolicy: string(cfg.ImagePullPolicy), - EtcdClientService: util.KarmadaEtcdClientName(name), - ServiceSubnet: *cfg.ServiceSubnet, - KarmadaCertsSecret: util.KarmadaCertSecretName(name), - EtcdCertsSecret: util.EtcdCertSecretName(name), - Replicas: cfg.Replicas, - EtcdListenClientPort: constants.EtcdListenClientPort, + DeploymentName: util.KarmadaAPIServerName(name), + Namespace: namespace, + Image: cfg.Image.Name(), + ImagePullPolicy: string(cfg.ImagePullPolicy), + ServiceSubnet: *cfg.ServiceSubnet, + KarmadaCertsSecret: util.KarmadaCertSecretName(name), + Replicas: cfg.Replicas, }) if err != nil { return fmt.Errorf("error when parsing karmadaApiserver deployment template: %w", err) @@ -76,8 +72,15 @@ func installKarmadaAPIServer(client clientset.Interface, cfg *operatorv1alpha1.K if err := kuberuntime.DecodeInto(clientsetscheme.Codecs.UniversalDecoder(), apiserverDeploymentBytes, apiserverDeployment); err != nil { return fmt.Errorf("error when decoding karmadaApiserver deployment: %w", err) } + + err = etcd.ConfigureClientCredentials(apiserverDeployment, etcdCfg, name, namespace) + if err != nil { + return err + } + patcher.NewPatcher().WithAnnotations(cfg.Annotations).WithLabels(cfg.Labels). - WithExtraArgs(cfg.ExtraArgs).WithResources(cfg.Resources).ForDeployment(apiserverDeployment) + WithExtraArgs(cfg.ExtraArgs).WithExtraVolumeMounts(cfg.ExtraVolumeMounts). + WithExtraVolumes(cfg.ExtraVolumes).WithResources(cfg.Resources).ForDeployment(apiserverDeployment) if err := apiclient.CreateOrUpdateDeployment(client, apiserverDeployment); err != nil { return fmt.Errorf("error when creating deployment for %s, err: %w", apiserverDeployment.Name, err) @@ -111,23 +114,19 @@ func createKarmadaAPIServerService(client clientset.Interface, cfg *operatorv1al return nil } -func installKarmadaAggregatedAPIServer(client clientset.Interface, cfg *operatorv1alpha1.KarmadaAggregatedAPIServer, name, namespace string, featureGates map[string]bool) error { +func installKarmadaAggregatedAPIServer(client clientset.Interface, cfg *operatorv1alpha1.KarmadaAggregatedAPIServer, etcdCfg *operatorv1alpha1.Etcd, name, namespace string, featureGates map[string]bool) error { aggregatedAPIServerDeploymentBytes, err := util.ParseTemplate(KarmadaAggregatedAPIServerDeployment, struct { - DeploymentName, Namespace, Image, ImagePullPolicy, EtcdClientService string - KubeconfigSecret, KarmadaCertsSecret, EtcdCertsSecret string - Replicas *int32 - EtcdListenClientPort int32 + DeploymentName, Namespace, Image, ImagePullPolicy string + KubeconfigSecret, KarmadaCertsSecret string + Replicas *int32 }{ - DeploymentName: util.KarmadaAggregatedAPIServerName(name), - Namespace: namespace, - Image: cfg.Image.Name(), - ImagePullPolicy: string(cfg.ImagePullPolicy), - EtcdClientService: util.KarmadaEtcdClientName(name), - KubeconfigSecret: util.AdminKubeconfigSecretName(name), - KarmadaCertsSecret: util.KarmadaCertSecretName(name), - EtcdCertsSecret: util.EtcdCertSecretName(name), - Replicas: cfg.Replicas, - EtcdListenClientPort: constants.EtcdListenClientPort, + DeploymentName: util.KarmadaAggregatedAPIServerName(name), + Namespace: namespace, + Image: cfg.Image.Name(), + ImagePullPolicy: string(cfg.ImagePullPolicy), + KubeconfigSecret: util.AdminKubeconfigSecretName(name), + KarmadaCertsSecret: util.KarmadaCertSecretName(name), + Replicas: cfg.Replicas, }) if err != nil { return fmt.Errorf("error when parsing karmadaAggregatedAPIServer deployment template: %w", err) @@ -138,6 +137,11 @@ func installKarmadaAggregatedAPIServer(client clientset.Interface, cfg *operator return fmt.Errorf("err when decoding karmadaApiserver deployment: %w", err) } + err = etcd.ConfigureClientCredentials(aggregatedAPIServerDeployment, etcdCfg, name, namespace) + if err != nil { + return err + } + patcher.NewPatcher().WithAnnotations(cfg.Annotations).WithLabels(cfg.Labels). WithExtraArgs(cfg.ExtraArgs).WithFeatureGates(featureGates).WithResources(cfg.Resources).ForDeployment(aggregatedAPIServerDeployment) diff --git a/operator/pkg/controlplane/apiserver/apiserver_test.go b/operator/pkg/controlplane/apiserver/apiserver_test.go new file mode 100644 index 000000000000..6e1446fa7efe --- /dev/null +++ b/operator/pkg/controlplane/apiserver/apiserver_test.go @@ -0,0 +1,454 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apiserver + +import ( + "fmt" + "testing" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + fakeclientset "k8s.io/client-go/kubernetes/fake" + coretesting "k8s.io/client-go/testing" + "k8s.io/utils/ptr" + + operatorv1alpha1 "github.com/karmada-io/karmada/operator/pkg/apis/operator/v1alpha1" + "github.com/karmada-io/karmada/operator/pkg/constants" + "github.com/karmada-io/karmada/operator/pkg/util" +) + +func TestEnsureKarmadaAPIServer(t *testing.T) { + var replicas int32 = 3 + image := "karmada-apiserver-image" + imagePullPolicy := corev1.PullIfNotPresent + annotations := map[string]string{"annotationKey": "annotationValue"} + labels := map[string]string{"labelKey": "labelValue"} + name := "karmada-apiserver" + namespace := "test-namespace" + serviceSubnet := "10.96.0.0/12" + + cfg := &operatorv1alpha1.KarmadaComponents{ + KarmadaAPIServer: &operatorv1alpha1.KarmadaAPIServer{ + CommonSettings: operatorv1alpha1.CommonSettings{ + Image: operatorv1alpha1.Image{ImageTag: image}, + Replicas: ptr.To[int32](replicas), + Annotations: annotations, + Labels: labels, + Resources: corev1.ResourceRequirements{}, + ImagePullPolicy: imagePullPolicy, + }, + ServiceSubnet: ptr.To(serviceSubnet), + ExtraArgs: map[string]string{"cmd1": "arg1", "cmd2": "arg2"}, + }, + Etcd: &operatorv1alpha1.Etcd{ + Local: &operatorv1alpha1.LocalEtcd{}, + }, + } + + fakeClient := fakeclientset.NewSimpleClientset() + + err := EnsureKarmadaAPIServer(fakeClient, cfg, name, namespace, map[string]bool{}) + if err != nil { + t.Fatalf("expected no error, but got: %v", err) + } + + actions := fakeClient.Actions() + if len(actions) != 2 { + t.Fatalf("expected 2 actions, but got %d", len(actions)) + } +} + +func TestEnsureKarmadaAggregatedAPIServer(t *testing.T) { + var replicas int32 = 2 + image := "karmada-aggregated-apiserver-image" + imagePullPolicy := corev1.PullIfNotPresent + annotationValues := map[string]string{"annotationKey": "annotationValue"} + labelValues := map[string]string{"labelKey": "labelValue"} + name := "test-agg-server" + namespace := "test-namespace" + + cfg := &operatorv1alpha1.KarmadaComponents{ + KarmadaAggregatedAPIServer: &operatorv1alpha1.KarmadaAggregatedAPIServer{ + CommonSettings: operatorv1alpha1.CommonSettings{ + Image: operatorv1alpha1.Image{ImageTag: image}, + Replicas: ptr.To[int32](replicas), + Annotations: annotationValues, + Labels: labelValues, + Resources: corev1.ResourceRequirements{}, + ImagePullPolicy: imagePullPolicy, + }, + ExtraArgs: map[string]string{"cmd1": "arg1", "cmd2": "arg2"}, + }, + Etcd: &operatorv1alpha1.Etcd{ + Local: &operatorv1alpha1.LocalEtcd{}, + }, + } + + featureGates := map[string]bool{"FeatureA": true} + + fakeClient := fakeclientset.NewSimpleClientset() + + err := EnsureKarmadaAggregatedAPIServer(fakeClient, cfg, name, namespace, featureGates) + if err != nil { + t.Fatalf("expected no error, but got: %v", err) + } + + actions := fakeClient.Actions() + if len(actions) != 2 { + t.Fatalf("expected 2 actions, but got %d", len(actions)) + } +} + +func TestInstallKarmadaAPIServer(t *testing.T) { + var replicas int32 = 3 + image := "karmada-apiserver-image" + imagePullPolicy := corev1.PullIfNotPresent + annotations := map[string]string{"annotationKey": "annotationValue"} + labels := map[string]string{"labelKey": "labelValue"} + name := "karmada-apiserver" + namespace := "test-namespace" + serviceSubnet := "10.96.0.0/12" + + // Create fake clientset. + fakeClient := fakeclientset.NewSimpleClientset() + + // Define a valid KarmadaAPIServer configuration. + cfg := &operatorv1alpha1.KarmadaAPIServer{ + CommonSettings: operatorv1alpha1.CommonSettings{ + Image: operatorv1alpha1.Image{ImageTag: image}, + Replicas: ptr.To[int32](replicas), + Annotations: annotations, + Labels: labels, + Resources: corev1.ResourceRequirements{}, + ImagePullPolicy: imagePullPolicy, + }, + ServiceSubnet: ptr.To(serviceSubnet), + ExtraArgs: map[string]string{"cmd1": "arg1", "cmd2": "arg2"}, + } + etcdCfg := &operatorv1alpha1.Etcd{ + Local: &operatorv1alpha1.LocalEtcd{}, + } + featureGates := map[string]bool{"FeatureA": true} + + // Call the function under test. + err := installKarmadaAPIServer(fakeClient, cfg, etcdCfg, name, namespace, featureGates) + if err != nil { + t.Fatalf("expected no error, but got: %v", err) + } + + deployment, err := verifyDeploymentCreation(fakeClient, &replicas, imagePullPolicy, cfg.ExtraArgs, name, namespace, image, util.KarmadaAPIServerName(name)) + if err != nil { + t.Fatalf("failed to verify karmada apiserver correct deployment creation correct details: %v", err) + } + + err = verifyAPIServerDeploymentAdditionalDetails(deployment, name, serviceSubnet) + if err != nil { + t.Errorf("failed to verify karmada apiserver additional deployment details: %v", err) + } +} + +func TestCreateKarmadaAPIServerService(t *testing.T) { + // Initialize fake clientset. + client := fakeclientset.NewSimpleClientset() + + // Define inputs. + name := "test-apiserver" + namespace := "test-namespace" + cfg := &operatorv1alpha1.KarmadaAPIServer{ + ServiceAnnotations: map[string]string{"annotationKey": "annotationValue"}, + } + + // Call the function under test. + err := createKarmadaAPIServerService(client, cfg, name, namespace) + if err != nil { + t.Fatalf("expected no error, but got: %v", err) + } + + // Ensure the expected action (service creation) occurred. + actions := client.Actions() + if len(actions) != 1 { + t.Fatalf("expected 1 action, but got %d actions", len(actions)) + } + + // Validate the action is a CreateAction and it's for the correct resource (Service). + createAction, ok := actions[0].(coretesting.CreateAction) + if !ok { + t.Fatalf("expected CreateAction, but got %T", actions[0]) + } + + if createAction.GetResource().Resource != "services" { + t.Fatalf("expected action on 'services', but got '%s'", createAction.GetResource().Resource) + } + + // Validate the created service object. + service := createAction.GetObject().(*corev1.Service) + expectedServiceName := util.KarmadaAPIServerName(name) + if service.Name != expectedServiceName { + t.Fatalf("expected service name '%s', but got '%s'", expectedServiceName, service.Name) + } + + if service.Namespace != namespace { + t.Fatalf("expected service namespace '%s', but got '%s'", namespace, service.Namespace) + } + + if _, exists := service.Annotations["annotationKey"]; !exists { + t.Errorf("expected annotation with key 'annotationKey' and value 'annotationValue', but it was missing") + } +} + +func TestInstallKarmadaAggregatedAPIServer(t *testing.T) { + var replicas int32 = 2 + image := "karmada-aggregated-apiserver-image" + imagePullPolicy := corev1.PullIfNotPresent + annotationValues := map[string]string{"annotationKey": "annotationValue"} + labelValues := map[string]string{"labelKey": "labelValue"} + name := "test-agg-server" + namespace := "test-namespace" + + // Use fake clientset. + fakeClient := fakeclientset.NewSimpleClientset() + + // Define valid inputs. + cfg := &operatorv1alpha1.KarmadaAggregatedAPIServer{ + CommonSettings: operatorv1alpha1.CommonSettings{ + Image: operatorv1alpha1.Image{ImageTag: image}, + Replicas: ptr.To[int32](replicas), + Annotations: annotationValues, + Labels: labelValues, + Resources: corev1.ResourceRequirements{}, + ImagePullPolicy: imagePullPolicy, + }, + ExtraArgs: map[string]string{"cmd1": "arg1", "cmd2": "arg2"}, + } + + featureGates := map[string]bool{"FeatureA": true} + etcdCfg := &operatorv1alpha1.Etcd{ + Local: &operatorv1alpha1.LocalEtcd{}, + } + err := installKarmadaAggregatedAPIServer(fakeClient, cfg, etcdCfg, name, namespace, featureGates) + if err != nil { + t.Fatalf("Failed to install Karmada Aggregated API Server: %v", err) + } + + deployment, err := verifyDeploymentCreation(fakeClient, &replicas, imagePullPolicy, cfg.ExtraArgs, name, namespace, image, util.KarmadaAggregatedAPIServerName(name)) + if err != nil { + t.Fatalf("failed to verify karmada aggregated apiserver deployment creation correct details: %v", err) + } + + err = verifyAggregatedAPIServerDeploymentAdditionalDetails(featureGates, deployment, name) + if err != nil { + t.Errorf("failed to verify karmada aggregated apiserver additional deployment details: %v", err) + } +} + +func TestCreateKarmadaAggregatedAPIServerService(t *testing.T) { + // Initialize fake clientset. + client := fakeclientset.NewSimpleClientset() + + // Define inputs. + name := "test-agg-server" + namespace := "test-namespace" + + // Call the function under test. + err := createKarmadaAggregatedAPIServerService(client, name, namespace) + if err != nil { + t.Fatalf("expected no error, but got: %v", err) + } + + // Ensure the expected action (service creation) occurred. + actions := client.Actions() + if len(actions) != 1 { + t.Fatalf("expected 1 action, but got %d actions", len(actions)) + } + + // Validate the action is a CreateAction and it's for the correct resource (Service). + createAction, ok := actions[0].(coretesting.CreateAction) + if !ok { + t.Fatalf("expected CreateAction, but got %T", actions[0]) + } + + if createAction.GetResource().Resource != "services" { + t.Fatalf("expected action on 'services', but got '%s'", createAction.GetResource().Resource) + } + + // Validate the created service object. + service := createAction.GetObject().(*corev1.Service) + expectedServiceName := util.KarmadaAggregatedAPIServerName(name) + if service.Name != expectedServiceName { + t.Fatalf("expected service name '%s', but got '%s'", expectedServiceName, service.Name) + } + + if service.Namespace != namespace { + t.Fatalf("expected service namespace '%s', but got '%s'", namespace, service.Namespace) + } +} + +// contains check if a slice contains a specific string. +func contains(slice []string, item string) bool { + for _, s := range slice { + if s == item { + return true + } + } + return false +} + +// verifyDeploymentCreation verifies the creation of a Kubernetes deployment +// based on the given parameters. It ensures that the deployment has the correct +// number of replicas, image pull policy, extra arguments, and labels, as well +// as the correct image for the Karmada API server. +func verifyDeploymentCreation(client *fakeclientset.Clientset, replicas *int32, imagePullPolicy corev1.PullPolicy, extraArgs map[string]string, name, namespace, image, expectedDeploymentName string) (*appsv1.Deployment, error) { + // Assert that a Deployment was created. + actions := client.Actions() + if len(actions) != 1 { + return nil, fmt.Errorf("expected exactly 1 action either create or update, but got %d actions", len(actions)) + } + + // Check that the action was a Deployment creation. + createAction, ok := actions[0].(coretesting.CreateAction) + if !ok { + return nil, fmt.Errorf("expected a CreateAction, but got %T", actions[0]) + } + + // Check that the action was performed on the correct resource. + if createAction.GetResource().Resource != "deployments" { + return nil, fmt.Errorf("expected action on 'deployments', but got '%s'", createAction.GetResource().Resource) + } + + deployment := createAction.GetObject().(*appsv1.Deployment) + err := verifyDeploymentDetails(deployment, replicas, imagePullPolicy, extraArgs, name, namespace, image, expectedDeploymentName) + if err != nil { + return nil, err + } + + return deployment, nil +} + +// verifyDeploymentDetails ensures that the specified deployment contains the +// correct configuration for replicas, image pull policy, extra args, and image. +// It validates that the deployment matches the expected Karmada API server settings. +func verifyDeploymentDetails(deployment *appsv1.Deployment, replicas *int32, imagePullPolicy corev1.PullPolicy, extraArgs map[string]string, name, namespace, image, expectedDeploymentName string) error { + if deployment.Name != expectedDeploymentName { + return fmt.Errorf("expected deployment name '%s', but got '%s'", expectedDeploymentName, deployment.Name) + } + + expectedNamespace := "test-namespace" + if deployment.Namespace != expectedNamespace { + return fmt.Errorf("expected deployment namespace '%s', but got '%s'", expectedNamespace, deployment.Namespace) + } + + if _, exists := deployment.Annotations["annotationKey"]; !exists { + return fmt.Errorf("expected annotation with key 'annotationKey' and value 'annotationValue', but it was missing") + } + + if _, exists := deployment.Labels["labelKey"]; !exists { + return fmt.Errorf("expected label with key 'labelKey' and value 'labelValue', but it was missing") + } + + if deployment.Spec.Replicas == nil || *deployment.Spec.Replicas != *replicas { + return fmt.Errorf("expected replicas to be %d, but got %d", replicas, *deployment.Spec.Replicas) + } + + containers := deployment.Spec.Template.Spec.Containers + if len(containers) != 1 { + return fmt.Errorf("expected exactly 1 container, but got %d", len(containers)) + } + + expectedImage := fmt.Sprintf(":%s", image) + container := containers[0] + if container.Image != expectedImage { + return fmt.Errorf("expected container image '%s', but got '%s'", expectedImage, container.Image) + } + + if container.ImagePullPolicy != imagePullPolicy { + return fmt.Errorf("expected image pull policy '%s', but got '%s'", imagePullPolicy, container.ImagePullPolicy) + } + + for key, value := range extraArgs { + expectedArg := fmt.Sprintf("--%s=%s", key, value) + if !contains(container.Command, expectedArg) { + return fmt.Errorf("expected container commands to include '%s', but it was missing", expectedArg) + } + } + + etcdServersArg := fmt.Sprintf("https://%s.%s.svc.cluster.local:%d,", util.KarmadaEtcdClientName(name), namespace, constants.EtcdListenClientPort) + etcdServersArg = fmt.Sprintf("--etcd-servers=%s", etcdServersArg[:len(etcdServersArg)-1]) + if !contains(container.Command, etcdServersArg) { + return fmt.Errorf("etcd servers argument '%s' not found in container command", etcdServersArg) + } + + return nil +} + +// verifyAggregatedAPIServerDeploymentAdditionalDetails validates the additional +// details of the Karmada Aggregated API server deployment, such as the service +// subnet and configuration related to aggregated API server behavior. +func verifyAggregatedAPIServerDeploymentAdditionalDetails(featureGates map[string]bool, deployment *appsv1.Deployment, expectedDeploymentName string) error { + var featureGatesArg string + for key, value := range featureGates { + featureGatesArg += fmt.Sprintf("%s=%t,", key, value) + } + featureGatesArg = fmt.Sprintf("--feature-gates=%s", featureGatesArg[:len(featureGatesArg)-1]) + if !contains(deployment.Spec.Template.Spec.Containers[0].Command, featureGatesArg) { + return fmt.Errorf("expected container commands to include '%s', but it was missing", featureGatesArg) + } + + if len(deployment.Spec.Template.Spec.Volumes) != 3 { + return fmt.Errorf("expected 3 volumes, but found %d", len(deployment.Spec.Template.Spec.Volumes)) + } + + var extractedSecrets []string + for _, volume := range deployment.Spec.Template.Spec.Volumes { + extractedSecrets = append(extractedSecrets, volume.Secret.SecretName) + } + expectedSecrets := []string{util.AdminKubeconfigSecretName(expectedDeploymentName), util.KarmadaCertSecretName(expectedDeploymentName), util.EtcdCertSecretName(expectedDeploymentName)} + for _, expectedSecret := range expectedSecrets { + if !contains(extractedSecrets, expectedSecret) { + return fmt.Errorf("expected secret '%s' not found in extracted secrets", expectedSecret) + } + } + + return nil +} + +// verifyAPIServerDeploymentAdditionalDetails checks the additional configuration +// details of a Kubernetes deployment for the Karmada API server. It validates the +// service cluster IP range, the number of volumes, and ensures that the required +// secret volumes are mounted in the deployment. +func verifyAPIServerDeploymentAdditionalDetails(deployment *appsv1.Deployment, expectedDeploymentName, serviceSubnet string) error { + serviceClusterIPRangeArg := fmt.Sprintf("--service-cluster-ip-range=%s", serviceSubnet) + if !contains(deployment.Spec.Template.Spec.Containers[0].Command, serviceClusterIPRangeArg) { + return fmt.Errorf("service cluster IP range argument '%s' not found in container command", serviceClusterIPRangeArg) + } + + if len(deployment.Spec.Template.Spec.Volumes) != 2 { + return fmt.Errorf("expected 2 volumes, but found %d", len(deployment.Spec.Template.Spec.Volumes)) + } + + var extractedSecrets []string + for _, volume := range deployment.Spec.Template.Spec.Volumes { + extractedSecrets = append(extractedSecrets, volume.Secret.SecretName) + } + expectedSecrets := []string{util.KarmadaCertSecretName(expectedDeploymentName), util.EtcdCertSecretName(expectedDeploymentName)} + for _, expectedSecret := range expectedSecrets { + if !contains(extractedSecrets, expectedSecret) { + return fmt.Errorf("expected secret '%s' not found in extracted secrets", expectedSecret) + } + } + + return nil +} diff --git a/operator/pkg/controlplane/apiserver/mainfests.go b/operator/pkg/controlplane/apiserver/manifests.go similarity index 85% rename from operator/pkg/controlplane/apiserver/mainfests.go rename to operator/pkg/controlplane/apiserver/manifests.go index 0cdea33afbd9..fb1f99c4e0c4 100644 --- a/operator/pkg/controlplane/apiserver/mainfests.go +++ b/operator/pkg/controlplane/apiserver/manifests.go @@ -50,14 +50,7 @@ spec: - --disable-admission-plugins=StorageObjectInUseProtection,ServiceAccount - --enable-admission-plugins=NodeRestriction - --enable-bootstrap-token-auth=true - - --etcd-cafile=/etc/etcd/pki/etcd-ca.crt - - --etcd-certfile=/etc/etcd/pki/etcd-client.crt - - --etcd-keyfile=/etc/etcd/pki/etcd-client.key - - --etcd-servers=https://{{ .EtcdClientService }}.{{ .Namespace }}.svc.cluster.local:{{ .EtcdListenClientPort }} - --bind-address=0.0.0.0 - - --kubelet-client-certificate=/etc/karmada/pki/karmada.crt - - --kubelet-client-key=/etc/karmada/pki/karmada.key - - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname - --secure-port=5443 - --service-account-issuer=https://kubernetes.default.svc.cluster.local - --service-account-key-file=/etc/karmada/pki/karmada.key @@ -115,17 +108,11 @@ spec: - mountPath: /etc/karmada/pki name: apiserver-cert readOnly: true - - mountPath: /etc/etcd/pki - name: etcd-cert - readOnly: true priorityClassName: system-node-critical volumes: - name: apiserver-cert secret: secretName: {{ .KarmadaCertsSecret }} - - name: etcd-cert - secret: - secretName: {{ .EtcdCertsSecret }} ` // KarmadaApiserverService is karmada apiserver service manifest @@ -179,10 +166,6 @@ spec: - --kubeconfig=/etc/karmada/kubeconfig - --authentication-kubeconfig=/etc/karmada/kubeconfig - --authorization-kubeconfig=/etc/karmada/kubeconfig - - --etcd-cafile=/etc/etcd/pki/etcd-ca.crt - - --etcd-certfile=/etc/etcd/pki/etcd-client.crt - - --etcd-keyfile=/etc/etcd/pki/etcd-client.key - - --etcd-servers=https://{{ .EtcdClientService }}.{{ .Namespace }}.svc.cluster.local:{{ .EtcdListenClientPort }} - --tls-cert-file=/etc/karmada/pki/karmada.crt - --tls-private-key-file=/etc/karmada/pki/karmada.key - --tls-min-version=VersionTLS13 @@ -193,9 +176,6 @@ spec: - mountPath: /etc/karmada/kubeconfig name: kubeconfig subPath: kubeconfig - - mountPath: /etc/etcd/pki - name: etcd-cert - readOnly: true - mountPath: /etc/karmada/pki name: apiserver-cert readOnly: true @@ -206,9 +186,6 @@ spec: - name: apiserver-cert secret: secretName: {{ .KarmadaCertsSecret }} - - name: etcd-cert - secret: - secretName: {{ .EtcdCertsSecret }} ` // KarmadaAggregatedAPIServerService is karmada aggregated APIServer Service manifest KarmadaAggregatedAPIServerService = ` diff --git a/operator/pkg/controlplane/controlplane.go b/operator/pkg/controlplane/controlplane.go index 007788ab4947..693d58bd6770 100644 --- a/operator/pkg/controlplane/controlplane.go +++ b/operator/pkg/controlplane/controlplane.go @@ -140,17 +140,18 @@ func getKarmadaControllerManagerManifest(name, namespace string, featureGates ma func getKarmadaSchedulerManifest(name, namespace string, featureGates map[string]bool, cfg *operatorv1alpha1.KarmadaScheduler) (*appsv1.Deployment, error) { karmadaSchedulerBytes, err := util.ParseTemplate(KarmadaSchedulerDeployment, struct { - Replicas *int32 - DeploymentName, Namespace, SystemNamespace string - Image, ImagePullPolicy, KubeconfigSecret string + Replicas *int32 + DeploymentName, Namespace, SystemNamespace string + Image, ImagePullPolicy, KubeconfigSecret, KarmadaCertsSecret string }{ - DeploymentName: util.KarmadaSchedulerName(name), - Namespace: namespace, - SystemNamespace: constants.KarmadaSystemNamespace, - Image: cfg.Image.Name(), - ImagePullPolicy: string(cfg.ImagePullPolicy), - KubeconfigSecret: util.AdminKubeconfigSecretName(name), - Replicas: cfg.Replicas, + DeploymentName: util.KarmadaSchedulerName(name), + Namespace: namespace, + SystemNamespace: constants.KarmadaSystemNamespace, + Image: cfg.Image.Name(), + ImagePullPolicy: string(cfg.ImagePullPolicy), + KubeconfigSecret: util.AdminKubeconfigSecretName(name), + KarmadaCertsSecret: util.KarmadaCertSecretName(name), + Replicas: cfg.Replicas, }) if err != nil { return nil, fmt.Errorf("error when parsing karmada-scheduler deployment template: %w", err) @@ -168,17 +169,18 @@ func getKarmadaSchedulerManifest(name, namespace string, featureGates map[string func getKarmadaDeschedulerManifest(name, namespace string, featureGates map[string]bool, cfg *operatorv1alpha1.KarmadaDescheduler) (*appsv1.Deployment, error) { karmadaDeschedulerBytes, err := util.ParseTemplate(KarmadaDeschedulerDeployment, struct { - Replicas *int32 - DeploymentName, Namespace, SystemNamespace string - Image, ImagePullPolicy, KubeconfigSecret string + Replicas *int32 + DeploymentName, Namespace, SystemNamespace string + Image, ImagePullPolicy, KubeconfigSecret, KarmadaCertsSecret string }{ - DeploymentName: util.KarmadaDeschedulerName(name), - Namespace: namespace, - SystemNamespace: constants.KarmadaSystemNamespace, - Image: cfg.Image.Name(), - ImagePullPolicy: string(cfg.ImagePullPolicy), - KubeconfigSecret: util.AdminKubeconfigSecretName(name), - Replicas: cfg.Replicas, + DeploymentName: util.KarmadaDeschedulerName(name), + Namespace: namespace, + SystemNamespace: constants.KarmadaSystemNamespace, + Image: cfg.Image.Name(), + ImagePullPolicy: string(cfg.ImagePullPolicy), + KubeconfigSecret: util.AdminKubeconfigSecretName(name), + KarmadaCertsSecret: util.KarmadaCertSecretName(name), + Replicas: cfg.Replicas, }) if err != nil { return nil, fmt.Errorf("error when parsing karmada-descheduler deployment template: %w", err) diff --git a/operator/pkg/controlplane/controlplane_test.go b/operator/pkg/controlplane/controlplane_test.go new file mode 100644 index 000000000000..4ccd14df9a04 --- /dev/null +++ b/operator/pkg/controlplane/controlplane_test.go @@ -0,0 +1,454 @@ +/* +Copyright 2024 The Karmada Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controlplane + +import ( + "fmt" + "testing" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + fakeclientset "k8s.io/client-go/kubernetes/fake" + coretesting "k8s.io/client-go/testing" + "k8s.io/utils/ptr" + + operatorv1alpha1 "github.com/karmada-io/karmada/operator/pkg/apis/operator/v1alpha1" + "github.com/karmada-io/karmada/operator/pkg/constants" + "github.com/karmada-io/karmada/operator/pkg/util" +) + +func TestEnsureAllControlPlaneComponents(t *testing.T) { + var replicas int32 = 2 + name, namespace := "karmada-demo", "test" + imagePullPolicy := corev1.PullIfNotPresent + annotations := map[string]string{"annotationKey": "annotationValue"} + labels := map[string]string{"labelKey": "labelValue"} + extraArgs := map[string]string{"cmd1": "arg1", "cmd2": "arg2"} + + cfg := &operatorv1alpha1.KarmadaComponents{ + KubeControllerManager: &operatorv1alpha1.KubeControllerManager{ + CommonSettings: operatorv1alpha1.CommonSettings{ + Image: operatorv1alpha1.Image{ + ImageRepository: "registry.k8s.io/kube-controller-manager", + ImageTag: "latest", + }, + Replicas: ptr.To[int32](replicas), + Annotations: annotations, + Labels: labels, + Resources: corev1.ResourceRequirements{}, + ImagePullPolicy: imagePullPolicy, + }, + ExtraArgs: extraArgs, + }, + KarmadaControllerManager: &operatorv1alpha1.KarmadaControllerManager{ + CommonSettings: operatorv1alpha1.CommonSettings{ + Image: operatorv1alpha1.Image{ + ImageRepository: "docker.io/karmada/karmada-controller-manager", + ImageTag: "latest", + }, + Replicas: ptr.To[int32](replicas), + Annotations: annotations, + Labels: labels, + ImagePullPolicy: imagePullPolicy, + }, + ExtraArgs: extraArgs, + }, + KarmadaScheduler: &operatorv1alpha1.KarmadaScheduler{ + CommonSettings: operatorv1alpha1.CommonSettings{ + Image: operatorv1alpha1.Image{ + ImageRepository: "docker.io/karmada/karmada-scheduler", + ImageTag: "latest", + }, + Replicas: ptr.To[int32](replicas), + Annotations: annotations, + Labels: labels, + Resources: corev1.ResourceRequirements{}, + ImagePullPolicy: imagePullPolicy, + }, + ExtraArgs: extraArgs, + }, + KarmadaDescheduler: &operatorv1alpha1.KarmadaDescheduler{ + CommonSettings: operatorv1alpha1.CommonSettings{ + Image: operatorv1alpha1.Image{ + ImageRepository: "docker.io/karmada/karmada-descheduler", + ImageTag: "latest", + }, + Replicas: ptr.To[int32](replicas), + Annotations: annotations, + Labels: labels, + Resources: corev1.ResourceRequirements{}, + ImagePullPolicy: imagePullPolicy, + }, + ExtraArgs: extraArgs, + }, + } + + fakeClient := fakeclientset.NewSimpleClientset() + + components := []string{ + constants.KubeControllerManagerComponent, + constants.KarmadaControllerManagerComponent, + constants.KarmadaSchedulerComponent, + constants.KarmadaDeschedulerComponent, + } + + for _, component := range components { + err := EnsureControlPlaneComponent(component, name, namespace, map[string]bool{}, fakeClient, cfg) + if err != nil { + t.Fatalf("failed to ensure %s controlplane component: %v", component, err) + } + } + + actions := fakeClient.Actions() + if len(actions) != len(components) { + t.Fatalf("expected %d actions, but got %d", len(components), len(actions)) + } + + for _, action := range actions { + createAction, ok := action.(coretesting.CreateAction) + if !ok { + t.Errorf("expected CreateAction, but got %T", action) + } + + if createAction.GetResource().Resource != "deployments" { + t.Errorf("expected action on 'deployments', but got '%s'", createAction.GetResource().Resource) + } + } +} + +func TestGetKubeControllerManagerManifest(t *testing.T) { + var replicas int32 = 2 + name, namespace := "karmada-demo", "test" + image, imageTag := "registry.k8s.io/kube-controller-manager", "latest" + imagePullPolicy := corev1.PullIfNotPresent + annotations := map[string]string{"annotationKey": "annotationValue"} + labels := map[string]string{"labelKey": "labelValue"} + extraArgs := map[string]string{"cmd1": "arg1", "cmd2": "arg2"} + + cfg := &operatorv1alpha1.KubeControllerManager{ + CommonSettings: operatorv1alpha1.CommonSettings{ + Image: operatorv1alpha1.Image{ + ImageRepository: image, + ImageTag: imageTag, + }, + Replicas: ptr.To[int32](replicas), + Annotations: annotations, + Labels: labels, + Resources: corev1.ResourceRequirements{}, + ImagePullPolicy: imagePullPolicy, + }, + ExtraArgs: extraArgs, + } + + deployment, err := getKubeControllerManagerManifest(name, namespace, cfg) + if err != nil { + t.Fatalf("failed to get kube controller manager manifest: %v", err) + } + + deployment, _, err = verifyDeploymentDetails( + deployment, replicas, imagePullPolicy, extraArgs, namespace, + image, imageTag, util.KubeControllerManagerName(name), + ) + if err != nil { + t.Errorf("failed to verify kube controller manager deployment details: %v", err) + } + + expectedSecrets := []string{ + util.AdminKubeconfigSecretName(name), + util.KarmadaCertSecretName(name), + } + err = verifySecrets(deployment, expectedSecrets) + if err != nil { + t.Errorf("failed to verify kube controller manager secrets: %v", err) + } +} + +func TestGetKarmadaControllerManagerManifest(t *testing.T) { + var replicas int32 = 2 + name, namespace := "karmada-demo", "test" + image, imageTag := "docker.io/karmada/karmada-controller-manager", "latest" + imagePullPolicy := corev1.PullIfNotPresent + annotations := map[string]string{"annotationKey": "annotationValue"} + labels := map[string]string{"labelKey": "labelValue"} + extraArgs := map[string]string{"cmd1": "arg1", "cmd2": "arg2"} + + cfg := &operatorv1alpha1.KarmadaControllerManager{ + CommonSettings: operatorv1alpha1.CommonSettings{ + Image: operatorv1alpha1.Image{ + ImageRepository: image, + ImageTag: imageTag, + }, + Replicas: ptr.To[int32](replicas), + Annotations: annotations, + Labels: labels, + ImagePullPolicy: imagePullPolicy, + }, + ExtraArgs: extraArgs, + } + + featureGates := map[string]bool{"FeatureA": true} + + deployment, err := getKarmadaControllerManagerManifest(name, namespace, featureGates, cfg) + if err != nil { + t.Fatalf("failed to get karmada controller manager manifest: %v", err) + } + + deployment, container, err := verifyDeploymentDetails( + deployment, replicas, imagePullPolicy, extraArgs, namespace, + image, imageTag, util.KarmadaControllerManagerName(name), + ) + if err != nil { + t.Errorf("failed to verify karmada controller manager deployment details: %v", err) + } + + err = verifyFeatureGates(container, featureGates) + if err != nil { + t.Errorf("failed to verify karmada controller manager feature gates: %v", err) + } + + err = verifySystemNamespace(container) + if err != nil { + t.Errorf("failed to verify karmada controller manager system namespace: %v", err) + } + + expectedSecrets := []string{util.AdminKubeconfigSecretName(name)} + err = verifySecrets(deployment, expectedSecrets) + if err != nil { + t.Errorf("failed to verify karmada controller manager secrets: %v", err) + } +} + +func TestGetKarmadaSchedulerManifest(t *testing.T) { + var replicas int32 = 2 + name, namespace := "karmada-demo", "test" + image, imageTag := "docker.io/karmada/karmada-scheduler", "latest" + imagePullPolicy := corev1.PullIfNotPresent + annotations := map[string]string{"annotationKey": "annotationValue"} + labels := map[string]string{"labelKey": "labelValue"} + extraArgs := map[string]string{"cmd1": "arg1", "cmd2": "arg2"} + + cfg := &operatorv1alpha1.KarmadaScheduler{ + CommonSettings: operatorv1alpha1.CommonSettings{ + Image: operatorv1alpha1.Image{ + ImageRepository: image, + ImageTag: imageTag, + }, + Replicas: ptr.To[int32](replicas), + Annotations: annotations, + Labels: labels, + Resources: corev1.ResourceRequirements{}, + ImagePullPolicy: imagePullPolicy, + }, + ExtraArgs: extraArgs, + } + + featureGates := map[string]bool{"FeatureA": true} + + deployment, err := getKarmadaSchedulerManifest(name, namespace, featureGates, cfg) + if err != nil { + t.Fatalf("failed to get karmada scheduler manifest: %v", err) + } + + deployment, container, err := verifyDeploymentDetails( + deployment, replicas, imagePullPolicy, extraArgs, namespace, + image, imageTag, util.KarmadaSchedulerName(name), + ) + if err != nil { + t.Errorf("failed to verify karmada scheduler deployment details: %v", err) + } + + err = verifyFeatureGates(container, featureGates) + if err != nil { + t.Errorf("failed to verify karmada scheduler feature gates: %v", err) + } + + err = verifySystemNamespace(container) + if err != nil { + t.Errorf("failed to verify karmada scheduler system namespace: %v", err) + } + + expectedSecrets := []string{ + util.AdminKubeconfigSecretName(name), + util.KarmadaCertSecretName(name), + } + err = verifySecrets(deployment, expectedSecrets) + if err != nil { + t.Errorf("failed to verify karmada scheduler secrets: %v", err) + } +} + +func TestGetKarmadaDeschedulerManifest(t *testing.T) { + var replicas int32 = 2 + name, namespace := "karmada-demo", "test" + image, imageTag := "docker.io/karmada/karmada-descheduler", "latest" + imagePullPolicy := corev1.PullIfNotPresent + annotations := map[string]string{"annotationKey": "annotationValue"} + labels := map[string]string{"labelKey": "labelValue"} + extraArgs := map[string]string{"cmd1": "arg1", "cmd2": "arg2"} + + cfg := &operatorv1alpha1.KarmadaDescheduler{ + CommonSettings: operatorv1alpha1.CommonSettings{ + Image: operatorv1alpha1.Image{ + ImageRepository: image, + ImageTag: imageTag, + }, + Replicas: ptr.To[int32](replicas), + Annotations: annotations, + Labels: labels, + Resources: corev1.ResourceRequirements{}, + ImagePullPolicy: imagePullPolicy, + }, + ExtraArgs: extraArgs, + } + + featureGates := map[string]bool{"FeatureA": true} + + deployment, err := getKarmadaDeschedulerManifest(name, namespace, featureGates, cfg) + if err != nil { + t.Fatalf("failed to get karmada descheduler manifest: %v", err) + } + + deployment, container, err := verifyDeploymentDetails( + deployment, replicas, imagePullPolicy, extraArgs, namespace, + image, imageTag, util.KarmadaDeschedulerName(name), + ) + if err != nil { + t.Errorf("failed to verify karmada descheduler deployment details: %v", err) + } + + err = verifyFeatureGates(container, featureGates) + if err != nil { + t.Errorf("failed to verify karmada descheduler feature gates: %v", err) + } + + err = verifySystemNamespace(container) + if err != nil { + t.Errorf("failed to verify karmada descheduler system namespace: %v", err) + } + + expectedSecrets := []string{ + util.AdminKubeconfigSecretName(name), + util.KarmadaCertSecretName(name), + } + err = verifySecrets(deployment, expectedSecrets) + if err != nil { + t.Errorf("failed to verify karmada descheduler secrets: %v", err) + } +} + +// verifyDeploymentDetails ensures that the specified deployment contains the +// correct configuration for replicas, image pull policy, extra args, and image. +// It validates that the deployment matches the expected Karmada Controlplane settings. +// It could be against Kube Controller Manager, Karmada Controller Manager, Karmada Scheduler, +// and Karmada Descheduler. +func verifyDeploymentDetails(deployment *appsv1.Deployment, replicas int32, imagePullPolicy corev1.PullPolicy, extraArgs map[string]string, namespace, image, imageTag, expectedDeploymentName string) (*appsv1.Deployment, *corev1.Container, error) { + if deployment.Name != expectedDeploymentName { + return nil, nil, fmt.Errorf("expected deployment name '%s', but got '%s'", expectedDeploymentName, deployment.Name) + } + + if deployment.Namespace != namespace { + return nil, nil, fmt.Errorf("expected deployment namespace '%s', but got '%s'", namespace, deployment.Namespace) + } + + if _, exists := deployment.Annotations["annotationKey"]; !exists { + return nil, nil, fmt.Errorf("expected annotation with key 'annotationKey' and value 'annotationValue', but it was missing") + } + + if _, exists := deployment.Labels["labelKey"]; !exists { + return nil, nil, fmt.Errorf("expected label with key 'labelKey' and value 'labelValue', but it was missing") + } + + if deployment.Spec.Replicas == nil || *deployment.Spec.Replicas != replicas { + return nil, nil, fmt.Errorf("expected replicas to be %d, but got %d", replicas, deployment.Spec.Replicas) + } + + containers := deployment.Spec.Template.Spec.Containers + if len(containers) != 1 { + return nil, nil, fmt.Errorf("expected exactly 1 container, but got %d", len(containers)) + } + + expectedImage := fmt.Sprintf("%s:%s", image, imageTag) + container := containers[0] + if container.Image != expectedImage { + return nil, nil, fmt.Errorf("expected container image '%s', but got '%s'", expectedImage, container.Image) + } + + if container.ImagePullPolicy != imagePullPolicy { + return nil, nil, fmt.Errorf("expected image pull policy '%s', but got '%s'", imagePullPolicy, container.ImagePullPolicy) + } + + err := verifyExtraArgs(&container, extraArgs) + if err != nil { + return nil, nil, fmt.Errorf("failed to verify extra args: %v", err) + } + + return deployment, &container, nil +} + +// verifySystemNamespace validates that expected system namespace is present in the container commands. +func verifySystemNamespace(container *corev1.Container) error { + leaderElectResourceSystemNamespaceArg := fmt.Sprintf("--leader-elect-resource-namespace=%s", constants.KarmadaSystemNamespace) + if !contains(container.Command, leaderElectResourceSystemNamespaceArg) { + return fmt.Errorf("leader elect resource namespace argument '%s' not found in container command with value %s", leaderElectResourceSystemNamespaceArg, constants.KarmadaSystemNamespace) + } + return nil +} + +// verifySecrets validates that the expected secrets are present in the Deployment's volumes. +func verifySecrets(deployment *appsv1.Deployment, expectedSecrets []string) error { + var extractedSecrets []string + for _, volume := range deployment.Spec.Template.Spec.Volumes { + extractedSecrets = append(extractedSecrets, volume.Secret.SecretName) + } + for _, expectedSecret := range expectedSecrets { + if !contains(extractedSecrets, expectedSecret) { + return fmt.Errorf("expected secret '%s' not found in extracted secrets", expectedSecret) + } + } + return nil +} + +// verifyExtraArgs checks that the container command includes the extra arguments. +func verifyExtraArgs(container *corev1.Container, extraArgs map[string]string) error { + for key, value := range extraArgs { + expectedArg := fmt.Sprintf("--%s=%s", key, value) + if !contains(container.Command, expectedArg) { + return fmt.Errorf("expected container commands to include '%s', but it was missing", expectedArg) + } + } + return nil +} + +// verifyFeatureGates ensures the container's command includes the specified feature gates. +func verifyFeatureGates(container *corev1.Container, featureGates map[string]bool) error { + var featureGatesArg string + for key, value := range featureGates { + featureGatesArg += fmt.Sprintf("%s=%t,", key, value) + } + featureGatesArg = fmt.Sprintf("--feature-gates=%s", featureGatesArg[:len(featureGatesArg)-1]) + if !contains(container.Command, featureGatesArg) { + return fmt.Errorf("expected container commands to include '%s', but it was missing", featureGatesArg) + } + return nil +} + +// contains check if a slice contains a specific string. +func contains(slice []string, item string) bool { + for _, s := range slice { + if s == item { + return true + } + } + return false +} diff --git a/operator/pkg/controlplane/etcd/etcd_test.go b/operator/pkg/controlplane/etcd/etcd_test.go new file mode 100644 index 000000000000..4df0d60c7a92 --- /dev/null +++ b/operator/pkg/controlplane/etcd/etcd_test.go @@ -0,0 +1,402 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package etcd + +import ( + "fmt" + "strconv" + "strings" + "testing" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/intstr" + fakeclientset "k8s.io/client-go/kubernetes/fake" + coretesting "k8s.io/client-go/testing" + "k8s.io/utils/ptr" + + operatorv1alpha1 "github.com/karmada-io/karmada/operator/pkg/apis/operator/v1alpha1" + "github.com/karmada-io/karmada/operator/pkg/constants" + "github.com/karmada-io/karmada/operator/pkg/util" +) + +func TestEnsureKarmadaEtcd(t *testing.T) { + var replicas int32 = 2 + image, imageTag := "registry.k8s.io/etcd", "latest" + name := "karmada-demo" + namespace := "test" + imagePullPolicy := corev1.PullIfNotPresent + annotations := map[string]string{"annotationKey": "annotationValue"} + labels := map[string]string{"labelKey": "labelValue"} + + cfg := &operatorv1alpha1.LocalEtcd{ + CommonSettings: operatorv1alpha1.CommonSettings{ + Image: operatorv1alpha1.Image{ + ImageRepository: image, + ImageTag: imageTag, + }, + Replicas: ptr.To[int32](replicas), + Annotations: annotations, + Labels: labels, + Resources: corev1.ResourceRequirements{}, + ImagePullPolicy: imagePullPolicy, + }, + } + + // Create fake clientset. + fakeClient := fakeclientset.NewSimpleClientset() + + err := EnsureKarmadaEtcd(fakeClient, cfg, name, namespace) + if err != nil { + t.Fatalf("expected no error, but got: %v", err) + } + + actions := fakeClient.Actions() + if len(actions) != 3 { + t.Fatalf("expected 3 actions, but got %d", len(actions)) + } +} + +func TestInstallKarmadaEtcd(t *testing.T) { + var replicas int32 = 2 + image, imageTag := "registry.k8s.io/etcd", "latest" + name := "karmada-demo" + namespace := "test" + imagePullPolicy := corev1.PullIfNotPresent + annotations := map[string]string{"annotationKey": "annotationValue"} + labels := map[string]string{"labelKey": "labelValue"} + + // Define a valid Etcd configuration. + cfg := &operatorv1alpha1.LocalEtcd{ + CommonSettings: operatorv1alpha1.CommonSettings{ + Image: operatorv1alpha1.Image{ + ImageRepository: image, + ImageTag: imageTag, + }, + Replicas: ptr.To[int32](replicas), + Annotations: annotations, + Labels: labels, + Resources: corev1.ResourceRequirements{}, + ImagePullPolicy: imagePullPolicy, + }, + } + + // Create fake clientset. + fakeClient := fakeclientset.NewSimpleClientset() + + err := installKarmadaEtcd(fakeClient, name, namespace, cfg) + if err != nil { + t.Fatalf("failed to install karmada etcd, got: %v", err) + } + + err = verifyStatefulSetCreation( + fakeClient, replicas, imagePullPolicy, name, namespace, image, imageTag, + ) + if err != nil { + t.Fatalf("failed to verify statefulset creation: %v", err) + } +} + +func TestCreateEtcdService(t *testing.T) { + // Define inputs. + name := "karmada-demo" + namespace := "test" + + // Initialize fake clientset. + client := fakeclientset.NewSimpleClientset() + + err := createEtcdService(client, name, namespace) + if err != nil { + t.Fatalf("failed to create etcd service %v", err) + } + + // Ensure the expected actions are two creations for etcd peer and client services. + actions := client.Actions() + if len(actions) != 2 { + t.Fatalf("expected 2 actions, but got %d actions", len(actions)) + } + + // Validate the actions is of type CreateAction and it's for the correct resource (Service). + for i, action := range actions { + createAction, ok := action.(coretesting.CreateAction) + if !ok { + t.Fatalf("expected CreateAction, but got %d at index %d", action, i) + } + + if createAction.GetResource().Resource != "services" { + t.Fatalf("expected action on 'services', but got '%s' at action index %d", createAction.GetResource().Resource, i) + } + + service := createAction.GetObject().(*corev1.Service) + + if service.Name != util.KarmadaEtcdName(name) && service.Name != util.KarmadaEtcdClientName(name) { + t.Fatalf("expected created actions to be performed on etcd peer and client services, but found one on: %s", service.Name) + } + + if service.Namespace != namespace { + t.Fatalf("expected service namespace '%s', but got '%s'", namespace, service.Namespace) + } + + if service.Name == util.KarmadaEtcdName(name) { + peerServicePortsExpected := []corev1.ServicePort{ + { + Name: "client", + Port: constants.EtcdListenClientPort, + Protocol: corev1.ProtocolTCP, + TargetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: constants.EtcdListenClientPort, + }, + }, + { + Name: "server", + Port: constants.EtcdListenPeerPort, + Protocol: corev1.ProtocolTCP, + TargetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: constants.EtcdListenPeerPort, + }, + }, + } + err := verifyEtcdPeerOrClientService(service, peerServicePortsExpected) + if err != nil { + t.Errorf("failed to verify etcd peer service: %v", err) + } + } + + if service.Name == util.KarmadaEtcdClientName(name) { + clientServicePortsExpected := []corev1.ServicePort{ + { + Name: "client", + Port: constants.EtcdListenClientPort, + Protocol: corev1.ProtocolTCP, + TargetPort: intstr.IntOrString{ + Type: intstr.Int, + IntVal: constants.EtcdListenClientPort, + }, + }, + } + err := verifyEtcdPeerOrClientService(service, clientServicePortsExpected) + if err != nil { + t.Errorf("failed to verify etcd client service: %v", err) + } + } + } +} + +// verifyStatefulSetCreation asserts that a StatefulSet was created in the given clientset. +// It checks that exactly one action was recorded, verifies that it is a creation action for a StatefulSet, +// and then validates the details of the created StatefulSet against the expected parameters. +func verifyStatefulSetCreation(client *fakeclientset.Clientset, replicas int32, imagePullPolicy corev1.PullPolicy, name, namespace, image, imageTag string) error { + // Assert that a Statefulset was created. + actions := client.Actions() + if len(actions) != 1 { + return fmt.Errorf("expected exactly 1 action either create or update, but got %d actions", len(actions)) + } + + // Check that the action was a Statefulset creation. + createAction, ok := actions[0].(coretesting.CreateAction) + if !ok { + return fmt.Errorf("expected a CreateAction, but got %T", actions[0]) + } + + if createAction.GetResource().Resource != "statefulsets" { + return fmt.Errorf("expected action on 'statefulsets', but got '%s'", createAction.GetResource().Resource) + } + + statefulSet := createAction.GetObject().(*appsv1.StatefulSet) + return verifyStatefulSetDetails( + statefulSet, replicas, imagePullPolicy, name, namespace, image, imageTag, + ) +} + +// verifyStatefulSetDetails validates the details of a StatefulSet against the expected parameters. +func verifyStatefulSetDetails(statefulSet *appsv1.StatefulSet, replicas int32, imagePullPolicy corev1.PullPolicy, name, namespace, image, imageTag string) error { + expectedStatefulsetName := util.KarmadaEtcdName(name) + if statefulSet.Name != expectedStatefulsetName { + return fmt.Errorf("expected statefulset name '%s', but got '%s'", expectedStatefulsetName, statefulSet.Name) + } + + if statefulSet.Namespace != namespace { + return fmt.Errorf("expected statefulset namespace '%s', but got '%s'", namespace, statefulSet.Namespace) + } + + if _, exists := statefulSet.Annotations["annotationKey"]; !exists { + return fmt.Errorf("expected annotation with key 'annotationKey' and value 'annotationValue', but it was missing") + } + + if _, exists := statefulSet.Labels["labelKey"]; !exists { + return fmt.Errorf("expected label with key 'labelKey' and value 'labelValue', but it was missing") + } + + if statefulSet.Spec.Replicas == nil || *statefulSet.Spec.Replicas != replicas { + return fmt.Errorf("expected replicas to be %d, but got %d", replicas, statefulSet.Spec.Replicas) + } + + containers := statefulSet.Spec.Template.Spec.Containers + if len(containers) != 1 { + return fmt.Errorf("expected exactly 1 container, but got %d", len(containers)) + } + container := containers[0] + + expectedImage := fmt.Sprintf("%s:%s", image, imageTag) + if container.Image != expectedImage { + return fmt.Errorf("expected container image '%s', but got '%s'", expectedImage, container.Image) + } + + if container.ImagePullPolicy != imagePullPolicy { + return fmt.Errorf("expected image pull policy '%s', but got '%s'", imagePullPolicy, container.ImagePullPolicy) + } + + err := verifyEtcdServers(&container, name, namespace) + if err != nil { + return fmt.Errorf("failed to verify etcd servers %v", err) + } + + err = verifySecrets(statefulSet, name) + if err != nil { + return fmt.Errorf("failed to verify secrets %v", err) + } + + err = verifyVolumeMounts(&container) + if err != nil { + return fmt.Errorf("failed to verify mounts %v", err) + } + + err = verifyInitialClusters(&container, replicas, name, namespace) + if err != nil { + return fmt.Errorf("failed to verify initial clusters %v", err) + } + + err = verifyEtcdCipherSuite(&container) + if err != nil { + return fmt.Errorf("failed to verify etcd cipher suite") + } + + return nil +} + +// verifyEtcdServers checks that the container command includes the correct etcd server argument. +func verifyEtcdServers(container *corev1.Container, name, namespace string) error { + etcdServersArg := fmt.Sprintf("https://%s.%s.svc.cluster.local:%d,", util.KarmadaEtcdClientName(name), namespace, constants.EtcdListenClientPort) + etcdServersArg = fmt.Sprintf("--advertise-client-urls=%s", etcdServersArg[:len(etcdServersArg)-1]) + if !contains(container.Command, etcdServersArg) { + return fmt.Errorf("etcd servers argument '%s' not found in container command", etcdServersArg) + } + + return nil +} + +// verifySecrets validates that the expected secrets are present in the StatefulSet's volumes. +func verifySecrets(statefulSet *appsv1.StatefulSet, name string) error { + var extractedSecrets []string + for _, volume := range statefulSet.Spec.Template.Spec.Volumes { + extractedSecrets = append(extractedSecrets, volume.Secret.SecretName) + } + expectedSecrets := []string{util.EtcdCertSecretName(name)} + for _, expectedSecret := range expectedSecrets { + if !contains(extractedSecrets, expectedSecret) { + return fmt.Errorf("expected secret '%s' not found in extracted secrets", expectedSecret) + } + } + + return nil +} + +// verifyVolumeMounts checks that the expected volume mounts are present in the container. +func verifyVolumeMounts(container *corev1.Container) error { + var extractedVolumeMounts []string + for _, volumeMount := range container.VolumeMounts { + extractedVolumeMounts = append(extractedVolumeMounts, volumeMount.Name) + } + expectedVolumeMounts := []string{constants.EtcdDataVolumeName} + for _, expectedVolumeMount := range expectedVolumeMounts { + if !contains(extractedVolumeMounts, expectedVolumeMount) { + return fmt.Errorf("expected volume mount '%s' not found in extracted volume mounts", expectedVolumeMount) + } + } + + return nil +} + +// verifyInitialClusters validates that the container command includes the correct initial cluster argument. +func verifyInitialClusters(container *corev1.Container, replicas int32, name, namespace string) error { + expectedInitialClusters := make([]string, replicas) + for i := 0; i < int(replicas); i++ { + memberName := fmt.Sprintf("%s-%d", util.KarmadaEtcdName(name), i) + memberPeerURL := fmt.Sprintf("http://%s.%s.%s.svc.cluster.local:%v", + memberName, + util.KarmadaEtcdName(name), + namespace, + constants.EtcdListenPeerPort, + ) + expectedInitialClusters[i] = fmt.Sprintf("%s=%s", memberName, memberPeerURL) + } + initialClustersArg := fmt.Sprintf("--initial-cluster=%s", strings.Join(expectedInitialClusters, ",")) + if !contains(container.Command, initialClustersArg) { + return fmt.Errorf("expected container commands to include '%s', but it was missing", initialClustersArg) + } + + return nil +} + +// verifyEtcdCipherSuite checks that the container command includes the correct cipher suites argument. +func verifyEtcdCipherSuite(container *corev1.Container) error { + etcdCipherSuitesArg := fmt.Sprintf("--cipher-suites=%s", genEtcdCipherSuites()) + if !contains(container.Command, etcdCipherSuitesArg) { + return fmt.Errorf("the cipher suites argument '%s' is missing from the container command", etcdCipherSuitesArg) + } + + for _, command := range container.Command { + if strings.HasPrefix(command, "--listen-client-urls") && !strings.HasSuffix(command, strconv.Itoa(constants.EtcdListenClientPort)) { + return fmt.Errorf("expected '--listen-client-urls' command should end with %d", constants.EtcdListenClientPort) + } + + if strings.HasPrefix(command, "--listen-peer-urls") && !strings.HasSuffix(command, strconv.Itoa(constants.EtcdListenPeerPort)) { + return fmt.Errorf("expected '--listen-peer-urls' command should end with %d", constants.EtcdListenPeerPort) + } + } + + return nil +} + +// verifyEtcdPeerOrClientService verifies that the expected ports are present in the etcd peer or client service. +func verifyEtcdPeerOrClientService(service *corev1.Service, expectedPorts []corev1.ServicePort) error { + for _, servicePortExpected := range expectedPorts { + found := false + for _, port := range service.Spec.Ports { + if port == servicePortExpected { + found = true + } + } + if !found { + return fmt.Errorf("expected port %v isn't found in etcd peer service ports", servicePortExpected) + } + } + + return nil +} + +// contains check if a slice contains a specific string. +func contains(slice []string, item string) bool { + for _, s := range slice { + if s == item { + return true + } + } + return false +} diff --git a/operator/pkg/controlplane/etcd/mainfests.go b/operator/pkg/controlplane/etcd/manifests.go similarity index 100% rename from operator/pkg/controlplane/etcd/mainfests.go rename to operator/pkg/controlplane/etcd/manifests.go diff --git a/operator/pkg/controlplane/etcd/util.go b/operator/pkg/controlplane/etcd/util.go new file mode 100644 index 000000000000..32b43f642ec8 --- /dev/null +++ b/operator/pkg/controlplane/etcd/util.go @@ -0,0 +1,89 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package etcd + +import ( + "fmt" + "strconv" + "strings" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + + operatorv1alpha1 "github.com/karmada-io/karmada/operator/pkg/apis/operator/v1alpha1" + "github.com/karmada-io/karmada/operator/pkg/constants" + "github.com/karmada-io/karmada/operator/pkg/util" +) + +// ConfigureClientCredentials configures etcd client credentials for Karmada core and aggregated API servers +func ConfigureClientCredentials(apiServerDeployment *appsv1.Deployment, etcdCfg *operatorv1alpha1.Etcd, name, namespace string) error { + etcdClientServiceName := util.KarmadaEtcdClientName(name) + etcdCertSecretName := util.EtcdCertSecretName(name) + if etcdCfg.External == nil { + etcdClientCredentialsArgs := []string{ + fmt.Sprintf("--etcd-cafile=%s/%s.crt", constants.EtcdClientCredentialsMountPath, constants.EtcdCaCertAndKeyName), + fmt.Sprintf("--etcd-certfile=%s/%s.crt", constants.EtcdClientCredentialsMountPath, constants.EtcdClientCertAndKeyName), + fmt.Sprintf("--etcd-keyfile=%s/%s.key", constants.EtcdClientCredentialsMountPath, constants.EtcdClientCertAndKeyName), + fmt.Sprintf("--etcd-servers=https://%s.%s.svc.cluster.local:%s", etcdClientServiceName, namespace, strconv.Itoa(constants.EtcdListenClientPort)), + } + apiServerDeployment.Spec.Template.Spec.Containers[0].Command = append(apiServerDeployment.Spec.Template.Spec.Containers[0].Command, etcdClientCredentialsArgs...) + + etcdClientCredentialsVolumeMount := corev1.VolumeMount{ + Name: constants.EtcdClientCredentialsVolumeName, + MountPath: constants.EtcdClientCredentialsMountPath, + ReadOnly: true, + } + apiServerDeployment.Spec.Template.Spec.Containers[0].VolumeMounts = append(apiServerDeployment.Spec.Template.Spec.Containers[0].VolumeMounts, etcdClientCredentialsVolumeMount) + + etcdClientCredentialsVolume := corev1.Volume{ + Name: constants.EtcdClientCredentialsVolumeName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: etcdCertSecretName, + }, + }, + } + apiServerDeployment.Spec.Template.Spec.Volumes = append(apiServerDeployment.Spec.Template.Spec.Volumes, etcdClientCredentialsVolume) + } else { + etcdServers := strings.Join(etcdCfg.External.Endpoints, ",") + etcdClientCredentialsArgs := []string{ + fmt.Sprintf("--etcd-cafile=%s/%s", constants.EtcdClientCredentialsMountPath, constants.CaCertDataKey), + fmt.Sprintf("--etcd-certfile=%s/%s", constants.EtcdClientCredentialsMountPath, constants.TLSCertDataKey), + fmt.Sprintf("--etcd-keyfile=%s/%s", constants.EtcdClientCredentialsMountPath, constants.TLSPrivateKeyDataKey), + fmt.Sprintf("--etcd-servers=%s", etcdServers), + } + apiServerDeployment.Spec.Template.Spec.Containers[0].Command = append(apiServerDeployment.Spec.Template.Spec.Containers[0].Command, etcdClientCredentialsArgs...) + + etcdClientCredentialsVolumeMount := corev1.VolumeMount{ + Name: constants.EtcdClientCredentialsVolumeName, + MountPath: constants.EtcdClientCredentialsMountPath, + ReadOnly: true, + } + apiServerDeployment.Spec.Template.Spec.Containers[0].VolumeMounts = append(apiServerDeployment.Spec.Template.Spec.Containers[0].VolumeMounts, etcdClientCredentialsVolumeMount) + + etcdClientCredentialsVolume := corev1.Volume{ + Name: constants.EtcdClientCredentialsVolumeName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: etcdCfg.External.SecretRef.Name, + }, + }, + } + apiServerDeployment.Spec.Template.Spec.Volumes = append(apiServerDeployment.Spec.Template.Spec.Volumes, etcdClientCredentialsVolume) + } + return nil +} diff --git a/operator/pkg/controlplane/manifests.go b/operator/pkg/controlplane/manifests.go index fc67c4d2fa96..07b49a8a2434 100644 --- a/operator/pkg/controlplane/manifests.go +++ b/operator/pkg/controlplane/manifests.go @@ -128,11 +128,11 @@ spec: command: - /bin/karmada-controller-manager - --kubeconfig=/etc/karmada/kubeconfig - - --bind-address=0.0.0.0 + - --metrics-bind-address=:8080 - --cluster-status-update-frequency=10s - - --secure-port=10357 - --failover-eviction-timeout=30s - --leader-elect-resource-namespace={{ .SystemNamespace }} + - --health-probe-bind-address=0.0.0.0:10357 - --v=4 livenessProbe: httpGet: @@ -143,6 +143,10 @@ spec: initialDelaySeconds: 15 periodSeconds: 15 timeoutSeconds: 5 + ports: + - containerPort: 8080 + name: metrics + protocol: TCP volumeMounts: - name: kubeconfig subPath: kubeconfig @@ -184,8 +188,8 @@ spec: command: - /bin/karmada-scheduler - --kubeconfig=/etc/karmada/kubeconfig - - --bind-address=0.0.0.0 - - --secure-port=10351 + - --metrics-bind-address=0.0.0.0:8080 + - --health-probe-bind-address=0.0.0.0:10351 - --enable-scheduler-estimator=true - --leader-elect-resource-namespace={{ .SystemNamespace }} - --scheduler-estimator-ca-file=/etc/karmada/pki/ca.crt @@ -201,6 +205,10 @@ spec: initialDelaySeconds: 15 periodSeconds: 15 timeoutSeconds: 5 + ports: + - containerPort: 8080 + name: metrics + protocol: TCP volumeMounts: - name: karmada-certs mountPath: /etc/karmada/pki @@ -248,7 +256,8 @@ spec: command: - /bin/karmada-descheduler - --kubeconfig=/etc/karmada/kubeconfig - - --bind-address=0.0.0.0 + - --metrics-bind-address=0.0.0.0:8080 + - --health-probe-bind-address=0.0.0.0:10358 - --leader-elect-resource-namespace={{ .SystemNamespace }} - --scheduler-estimator-ca-file=/etc/karmada/pki/ca.crt - --scheduler-estimator-cert-file=/etc/karmada/pki/karmada.crt @@ -263,6 +272,10 @@ spec: initialDelaySeconds: 15 periodSeconds: 15 timeoutSeconds: 5 + ports: + - containerPort: 8080 + name: metrics + protocol: TCP volumeMounts: - name: karmada-certs mountPath: /etc/karmada/pki diff --git a/operator/pkg/controlplane/metricsadapter/mainfests.go b/operator/pkg/controlplane/metricsadapter/manifests.go similarity index 100% rename from operator/pkg/controlplane/metricsadapter/mainfests.go rename to operator/pkg/controlplane/metricsadapter/manifests.go diff --git a/operator/pkg/controlplane/metricsadapter/metricsadapter_test.go b/operator/pkg/controlplane/metricsadapter/metricsadapter_test.go new file mode 100644 index 000000000000..cbfd4a9d3629 --- /dev/null +++ b/operator/pkg/controlplane/metricsadapter/metricsadapter_test.go @@ -0,0 +1,236 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metricsadapter + +import ( + "fmt" + "testing" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + fakeclientset "k8s.io/client-go/kubernetes/fake" + coretesting "k8s.io/client-go/testing" + "k8s.io/utils/ptr" + + operatorv1alpha1 "github.com/karmada-io/karmada/operator/pkg/apis/operator/v1alpha1" + "github.com/karmada-io/karmada/operator/pkg/util" +) + +func TestEnsureKarmadaMetricAdapter(t *testing.T) { + var replicas int32 = 2 + image, imageTag := "docker.io/karmada/karmada-metrics-adapter", "latest" + name := "karmada-demo" + namespace := "test" + imagePullPolicy := corev1.PullIfNotPresent + annotations := map[string]string{"annotationKey": "annotationValue"} + labels := map[string]string{"labelKey": "labelValue"} + + cfg := &operatorv1alpha1.KarmadaMetricsAdapter{ + CommonSettings: operatorv1alpha1.CommonSettings{ + Image: operatorv1alpha1.Image{ + ImageRepository: image, + ImageTag: imageTag, + }, + Replicas: ptr.To[int32](replicas), + Annotations: annotations, + Labels: labels, + Resources: corev1.ResourceRequirements{}, + ImagePullPolicy: imagePullPolicy, + }, + } + + // Create fake clientset. + fakeClient := fakeclientset.NewSimpleClientset() + + err := EnsureKarmadaMetricAdapter(fakeClient, cfg, name, namespace) + if err != nil { + t.Fatalf("failed to ensure karmada metrics adapter, but got: %v", err) + } + + actions := fakeClient.Actions() + if len(actions) != 2 { + t.Fatalf("expected 2 actions, but got %d", len(actions)) + } +} + +func TestInstallKarmadaMetricAdapter(t *testing.T) { + var replicas int32 = 2 + image, imageTag := "docker.io/karmada/karmada-metrics-adapter", "latest" + name := "karmada-demo" + namespace := "test" + imagePullPolicy := corev1.PullIfNotPresent + annotations := map[string]string{"annotationKey": "annotationValue"} + labels := map[string]string{"labelKey": "labelValue"} + + cfg := &operatorv1alpha1.KarmadaMetricsAdapter{ + CommonSettings: operatorv1alpha1.CommonSettings{ + Image: operatorv1alpha1.Image{ + ImageRepository: image, + ImageTag: imageTag, + }, + Replicas: ptr.To[int32](replicas), + Annotations: annotations, + Labels: labels, + Resources: corev1.ResourceRequirements{}, + ImagePullPolicy: imagePullPolicy, + }, + } + + // Create fake clientset. + fakeClient := fakeclientset.NewSimpleClientset() + + err := installKarmadaMetricAdapter(fakeClient, cfg, name, namespace) + if err != nil { + t.Fatalf("failed to install karmada metrics adapter: %v", err) + } + + err = verifyDeploymentCreation( + fakeClient, replicas, imagePullPolicy, name, namespace, image, imageTag, + ) + if err != nil { + t.Fatalf("failed to verify deployment creation: %v", err) + } +} + +func TestCreateKarmadaMetricAdapterService(t *testing.T) { + // Define inputs. + name := "karmada-demo" + namespace := "test" + + // Initialize fake clientset. + client := fakeclientset.NewSimpleClientset() + + err := createKarmadaMetricAdapterService(client, name, namespace) + if err != nil { + t.Fatalf("failed to create karmada metrics adapter service %v", err) + } + + // Ensure the expected action (service creation) occurred. + actions := client.Actions() + if len(actions) != 1 { + t.Fatalf("expected 1 actions, but got %d actions", len(actions)) + } + + // Validate the action is a CreateAction and it's for the correct resource (Service). + createAction, ok := actions[0].(coretesting.CreateAction) + if !ok { + t.Fatalf("expected CreateAction, but got %T", actions[0]) + } + + if createAction.GetResource().Resource != "services" { + t.Fatalf("expected action on 'services', but got '%s'", createAction.GetResource().Resource) + } + + // Validate the created service object. + service := createAction.GetObject().(*corev1.Service) + expectedServiceName := util.KarmadaMetricsAdapterName(name) + if service.Name != expectedServiceName { + t.Fatalf("expected service name '%s', but got '%s'", expectedServiceName, service.Name) + } + + if service.Namespace != namespace { + t.Fatalf("expected service namespace '%s', but got '%s'", namespace, service.Namespace) + } +} + +func verifyDeploymentCreation(client *fakeclientset.Clientset, replicas int32, imagePullPolicy corev1.PullPolicy, name, namespace, image, imageTag string) error { + // Assert that a Deployment was created. + actions := client.Actions() + if len(actions) != 1 { + return fmt.Errorf("expected exactly 1 action either create or update, but got %d actions", len(actions)) + } + + // Check that the action was a Deployment creation. + createAction, ok := actions[0].(coretesting.CreateAction) + if !ok { + return fmt.Errorf("expected a CreateAction, but got %T", actions[0]) + } + + if createAction.GetResource().Resource != "deployments" { + return fmt.Errorf("expected action on 'statefulsets', but got '%s'", createAction.GetResource().Resource) + } + + deployment := createAction.GetObject().(*appsv1.Deployment) + return verifyDeploymentDetails( + deployment, replicas, imagePullPolicy, name, namespace, image, imageTag, + ) +} + +// verifyDeploymentDetails validates the details of a Deployment against the expected parameters. +func verifyDeploymentDetails(deployment *appsv1.Deployment, replicas int32, imagePullPolicy corev1.PullPolicy, name, namespace, image, imageTag string) error { + expectedDeploymentName := util.KarmadaMetricsAdapterName(name) + if deployment.Name != expectedDeploymentName { + return fmt.Errorf("expected deployment name '%s', but got '%s'", expectedDeploymentName, deployment.Name) + } + + if deployment.Namespace != namespace { + return fmt.Errorf("expected deployment namespace '%s', but got '%s'", namespace, deployment.Namespace) + } + + if _, exists := deployment.Annotations["annotationKey"]; !exists { + return fmt.Errorf("expected annotation with key 'annotationKey' and value 'annotationValue', but it was missing") + } + + if _, exists := deployment.Labels["labelKey"]; !exists { + return fmt.Errorf("expected label with key 'labelKey' and value 'labelValue', but it was missing") + } + + if deployment.Spec.Replicas == nil || *deployment.Spec.Replicas != replicas { + return fmt.Errorf("expected replicas to be %d, but got %d", replicas, deployment.Spec.Replicas) + } + + containers := deployment.Spec.Template.Spec.Containers + if len(containers) != 1 { + return fmt.Errorf("expected exactly 1 container, but got %d", len(containers)) + } + container := containers[0] + + expectedImage := fmt.Sprintf("%s:%s", image, imageTag) + if container.Image != expectedImage { + return fmt.Errorf("expected container image '%s', but got '%s'", expectedImage, container.Image) + } + + if container.ImagePullPolicy != imagePullPolicy { + return fmt.Errorf("expected image pull policy '%s', but got '%s'", imagePullPolicy, container.ImagePullPolicy) + } + + var extractedSecrets []string + for _, volume := range deployment.Spec.Template.Spec.Volumes { + extractedSecrets = append(extractedSecrets, volume.Secret.SecretName) + } + expectedSecrets := []string{ + util.AdminKubeconfigSecretName(name), + util.KarmadaCertSecretName(name), + } + for _, expectedSecret := range expectedSecrets { + if !contains(extractedSecrets, expectedSecret) { + return fmt.Errorf("expected secret '%s' not found in extracted secrets", expectedSecret) + } + } + + return nil +} + +// contains check if a slice contains a specific string. +func contains(slice []string, item string) bool { + for _, s := range slice { + if s == item { + return true + } + } + return false +} diff --git a/operator/pkg/controlplane/search/mainfests.go b/operator/pkg/controlplane/search/manifests.go similarity index 90% rename from operator/pkg/controlplane/search/mainfests.go rename to operator/pkg/controlplane/search/manifests.go index 3cf9daefcd83..2990ba1fae99 100644 --- a/operator/pkg/controlplane/search/mainfests.go +++ b/operator/pkg/controlplane/search/manifests.go @@ -57,10 +57,6 @@ spec: - --kubeconfig=/etc/kubeconfig - --authentication-kubeconfig=/etc/kubeconfig - --authorization-kubeconfig=/etc/kubeconfig - - --etcd-servers=https://{{ .EtcdClientService }}.{{ .Namespace }}.svc.cluster.local:{{ .EtcdListenClientPort }} - - --etcd-cafile=/etc/karmada/pki/etcd-ca.crt - - --etcd-certfile=/etc/karmada/pki/etcd-client.crt - - --etcd-keyfile=/etc/karmada/pki/etcd-client.key - --tls-cert-file=/etc/karmada/pki/karmada.crt - --tls-private-key-file=/etc/karmada/pki/karmada.key - --tls-min-version=VersionTLS13 diff --git a/operator/pkg/controlplane/search/search.go b/operator/pkg/controlplane/search/search.go index 590d90d6f839..e7251d0c8e4e 100644 --- a/operator/pkg/controlplane/search/search.go +++ b/operator/pkg/controlplane/search/search.go @@ -26,37 +26,34 @@ import ( clientsetscheme "k8s.io/client-go/kubernetes/scheme" operatorv1alpha1 "github.com/karmada-io/karmada/operator/pkg/apis/operator/v1alpha1" - "github.com/karmada-io/karmada/operator/pkg/constants" + "github.com/karmada-io/karmada/operator/pkg/controlplane/etcd" "github.com/karmada-io/karmada/operator/pkg/util" "github.com/karmada-io/karmada/operator/pkg/util/apiclient" "github.com/karmada-io/karmada/operator/pkg/util/patcher" ) // EnsureKarmadaSearch creates karmada search deployment and service resource. -func EnsureKarmadaSearch(client clientset.Interface, cfg *operatorv1alpha1.KarmadaSearch, name, namespace string, featureGates map[string]bool) error { - if err := installKarmadaSearch(client, cfg, name, namespace, featureGates); err != nil { +func EnsureKarmadaSearch(client clientset.Interface, cfg *operatorv1alpha1.KarmadaSearch, etcdCfg *operatorv1alpha1.Etcd, name, namespace string, featureGates map[string]bool) error { + if err := installKarmadaSearch(client, cfg, etcdCfg, name, namespace, featureGates); err != nil { return err } return createKarmadaSearchService(client, name, namespace) } -func installKarmadaSearch(client clientset.Interface, cfg *operatorv1alpha1.KarmadaSearch, name, namespace string, _ map[string]bool) error { +func installKarmadaSearch(client clientset.Interface, cfg *operatorv1alpha1.KarmadaSearch, etcdCfg *operatorv1alpha1.Etcd, name, namespace string, _ map[string]bool) error { searchDeploymentSetBytes, err := util.ParseTemplate(KarmadaSearchDeployment, struct { DeploymentName, Namespace, Image, ImagePullPolicy, KarmadaCertsSecret string - KubeconfigSecret, EtcdClientService string + KubeconfigSecret string Replicas *int32 - EtcdListenClientPort int32 }{ - DeploymentName: util.KarmadaSearchName(name), - Namespace: namespace, - Image: cfg.Image.Name(), - ImagePullPolicy: string(cfg.ImagePullPolicy), - KarmadaCertsSecret: util.KarmadaCertSecretName(name), - Replicas: cfg.Replicas, - KubeconfigSecret: util.AdminKubeconfigSecretName(name), - EtcdClientService: util.KarmadaEtcdClientName(name), - EtcdListenClientPort: constants.EtcdListenClientPort, + DeploymentName: util.KarmadaSearchName(name), + Namespace: namespace, + Image: cfg.Image.Name(), + ImagePullPolicy: string(cfg.ImagePullPolicy), + KarmadaCertsSecret: util.KarmadaCertSecretName(name), + Replicas: cfg.Replicas, + KubeconfigSecret: util.AdminKubeconfigSecretName(name), }) if err != nil { return fmt.Errorf("error when parsing KarmadaSearch Deployment template: %w", err) @@ -67,6 +64,11 @@ func installKarmadaSearch(client clientset.Interface, cfg *operatorv1alpha1.Karm return fmt.Errorf("err when decoding KarmadaSearch Deployment: %w", err) } + err = etcd.ConfigureClientCredentials(searchDeployment, etcdCfg, name, namespace) + if err != nil { + return err + } + patcher.NewPatcher().WithAnnotations(cfg.Annotations).WithLabels(cfg.Labels). WithExtraArgs(cfg.ExtraArgs).WithResources(cfg.Resources).ForDeployment(searchDeployment) diff --git a/operator/pkg/controlplane/search/search_test.go b/operator/pkg/controlplane/search/search_test.go new file mode 100644 index 000000000000..fb9e8f731baf --- /dev/null +++ b/operator/pkg/controlplane/search/search_test.go @@ -0,0 +1,279 @@ +/* +Copyright 2024 The Karmada Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package search + +import ( + "fmt" + "testing" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + fakeclientset "k8s.io/client-go/kubernetes/fake" + coretesting "k8s.io/client-go/testing" + "k8s.io/utils/ptr" + + operatorv1alpha1 "github.com/karmada-io/karmada/operator/pkg/apis/operator/v1alpha1" + "github.com/karmada-io/karmada/operator/pkg/constants" + "github.com/karmada-io/karmada/operator/pkg/util" +) + +func TestEnsureKarmadaSearch(t *testing.T) { + var replicas int32 = 2 + image, imageTag := "docker.io/karmada/karmada-search", "latest" + name := "karmada-demo" + namespace := "test" + imagePullPolicy := corev1.PullIfNotPresent + annotations := map[string]string{"annotationKey": "annotationValue"} + labels := map[string]string{"labelKey": "labelValue"} + extraArgs := map[string]string{"cmd1": "arg1", "cmd2": "arg2"} + + cfg := &operatorv1alpha1.KarmadaSearch{ + CommonSettings: operatorv1alpha1.CommonSettings{ + Image: operatorv1alpha1.Image{ + ImageRepository: image, + ImageTag: imageTag, + }, + Replicas: ptr.To[int32](replicas), + Annotations: annotations, + Labels: labels, + Resources: corev1.ResourceRequirements{}, + ImagePullPolicy: imagePullPolicy, + }, + ExtraArgs: extraArgs, + } + + // Create fake clientset. + fakeClient := fakeclientset.NewSimpleClientset() + etcdCfg := &operatorv1alpha1.Etcd{ + Local: &operatorv1alpha1.LocalEtcd{}, + } + err := EnsureKarmadaSearch(fakeClient, cfg, etcdCfg, name, namespace, map[string]bool{}) + if err != nil { + t.Fatalf("failed to ensure karmada search, but got: %v", err) + } + + actions := fakeClient.Actions() + if len(actions) != 2 { + t.Fatalf("expected 2 actions, but got %d", len(actions)) + } +} + +func TestInstallKarmadaSearch(t *testing.T) { + var replicas int32 = 2 + image, imageTag := "docker.io/karmada/karmada-search", "latest" + name := "karmada-demo" + namespace := "test" + imagePullPolicy := corev1.PullIfNotPresent + annotations := map[string]string{"annotationKey": "annotationValue"} + labels := map[string]string{"labelKey": "labelValue"} + extraArgs := map[string]string{"cmd1": "arg1", "cmd2": "arg2"} + + cfg := &operatorv1alpha1.KarmadaSearch{ + CommonSettings: operatorv1alpha1.CommonSettings{ + Image: operatorv1alpha1.Image{ + ImageRepository: image, + ImageTag: imageTag, + }, + Replicas: ptr.To[int32](replicas), + Annotations: annotations, + Labels: labels, + Resources: corev1.ResourceRequirements{}, + ImagePullPolicy: imagePullPolicy, + }, + ExtraArgs: extraArgs, + } + + // Create fake clientset. + fakeClient := fakeclientset.NewSimpleClientset() + etcdCfg := &operatorv1alpha1.Etcd{ + Local: &operatorv1alpha1.LocalEtcd{}, + } + err := installKarmadaSearch(fakeClient, cfg, etcdCfg, name, namespace, map[string]bool{}) + if err != nil { + t.Fatalf("failed to install karmada search: %v", err) + } + + err = verifyDeploymentCreation(fakeClient, replicas, imagePullPolicy, extraArgs, name, namespace, image, imageTag) + if err != nil { + t.Fatalf("failed to verify karmada search deployment creation: %v", err) + } +} + +func TestCreateKarmadaSearchService(t *testing.T) { + // Define inputs. + name := "karmada-demo" + namespace := "test" + + // Initialize fake clientset. + client := fakeclientset.NewSimpleClientset() + + err := createKarmadaSearchService(client, name, namespace) + if err != nil { + t.Fatalf("failed to create karmada search service") + } + + // Ensure the expected action (service creation) occurred. + actions := client.Actions() + if len(actions) != 1 { + t.Fatalf("expected 1 actions, but got %d actions", len(actions)) + } + + // Validate the action is a CreateAction and it's for the correct resource (Service). + createAction, ok := actions[0].(coretesting.CreateAction) + if !ok { + t.Fatalf("expected CreateAction, but got %T", actions[0]) + } + + if createAction.GetResource().Resource != "services" { + t.Fatalf("expected action on 'services', but got '%s'", createAction.GetResource().Resource) + } + + // Validate the created service object. + service := createAction.GetObject().(*corev1.Service) + expectedServiceName := util.KarmadaSearchName(name) + if service.Name != expectedServiceName { + t.Fatalf("expected service name '%s', but got '%s'", expectedServiceName, service.Name) + } + + if service.Namespace != namespace { + t.Fatalf("expected service namespace '%s', but got '%s'", namespace, service.Namespace) + } +} + +// verifyDeploymentCreation validates the details of a Deployment against the expected parameters. +func verifyDeploymentCreation(client *fakeclientset.Clientset, replicas int32, imagePullPolicy corev1.PullPolicy, extraArgs map[string]string, name, namespace, image, imageTag string) error { + // Assert that a Deployment was created. + actions := client.Actions() + if len(actions) != 1 { + return fmt.Errorf("expected exactly 1 action either create or update, but got %d actions", len(actions)) + } + + // Check that the action was a Deployment creation. + createAction, ok := actions[0].(coretesting.CreateAction) + if !ok { + return fmt.Errorf("expected a CreateAction, but got %T", actions[0]) + } + + if createAction.GetResource().Resource != "deployments" { + return fmt.Errorf("expected action on 'deployments', but got '%s'", createAction.GetResource().Resource) + } + + deployment := createAction.GetObject().(*appsv1.Deployment) + return verifyDeploymentDetails(deployment, replicas, imagePullPolicy, extraArgs, name, namespace, image, imageTag) +} + +// verifyDeploymentDetails validates the details of a Deployment against the expected parameters. +func verifyDeploymentDetails(deployment *appsv1.Deployment, replicas int32, imagePullPolicy corev1.PullPolicy, extraArgs map[string]string, name, namespace, image, imageTag string) error { + expectedDeploymentName := util.KarmadaSearchName(name) + if deployment.Name != expectedDeploymentName { + return fmt.Errorf("expected deployment name '%s', but got '%s'", expectedDeploymentName, deployment.Name) + } + + if deployment.Namespace != namespace { + return fmt.Errorf("expected deployment namespace '%s', but got '%s'", namespace, deployment.Namespace) + } + + if _, exists := deployment.Annotations["annotationKey"]; !exists { + return fmt.Errorf("expected annotation with key 'annotationKey' and value 'annotationValue', but it was missing") + } + + if _, exists := deployment.Labels["labelKey"]; !exists { + return fmt.Errorf("expected label with key 'labelKey' and value 'labelValue', but it was missing") + } + + if deployment.Spec.Replicas == nil || *deployment.Spec.Replicas != replicas { + return fmt.Errorf("expected replicas to be %d, but got %d", replicas, deployment.Spec.Replicas) + } + + containers := deployment.Spec.Template.Spec.Containers + if len(containers) != 1 { + return fmt.Errorf("expected exactly 1 container, but got %d", len(containers)) + } + + expectedImage := fmt.Sprintf("%s:%s", image, imageTag) + container := containers[0] + if container.Image != expectedImage { + return fmt.Errorf("expected container image '%s', but got '%s'", expectedImage, container.Image) + } + + if container.ImagePullPolicy != imagePullPolicy { + return fmt.Errorf("expected image pull policy '%s', but got '%s'", imagePullPolicy, container.ImagePullPolicy) + } + + err := verifyExtraArgs(&container, extraArgs) + if err != nil { + return fmt.Errorf("failed to verify extra args: %v", err) + } + + err = verifySecrets(deployment, name) + if err != nil { + return fmt.Errorf("failed to verify secrets: %v", err) + } + + err = verifyEtcdServers(&container, name, namespace) + if err != nil { + return fmt.Errorf("failed to verify etcd servers: %v", err) + } + + return nil +} + +// verifySecrets validates that the expected secrets are present in the Deployment's volumes. +func verifySecrets(deployment *appsv1.Deployment, name string) error { + var extractedSecrets []string + for _, volume := range deployment.Spec.Template.Spec.Volumes { + extractedSecrets = append(extractedSecrets, volume.Secret.SecretName) + } + expectedSecrets := []string{ + util.AdminKubeconfigSecretName(name), + util.KarmadaCertSecretName(name), + } + for _, expectedSecret := range expectedSecrets { + if !contains(extractedSecrets, expectedSecret) { + return fmt.Errorf("expected secret '%s' not found in extracted secrets", expectedSecret) + } + } + return nil +} + +// verifyEtcdServers checks that the container command includes the correct etcd server argument. +func verifyEtcdServers(container *corev1.Container, name, namespace string) error { + etcdServersArg := fmt.Sprintf("https://%s.%s.svc.cluster.local:%d,", util.KarmadaEtcdClientName(name), namespace, constants.EtcdListenClientPort) + etcdServersArg = fmt.Sprintf("--etcd-servers=%s", etcdServersArg[:len(etcdServersArg)-1]) + if !contains(container.Command, etcdServersArg) { + return fmt.Errorf("etcd servers argument '%s' not found in container command", etcdServersArg) + } + return nil +} + +// verifyExtraArgs checks that the container command includes the extra arguments. +func verifyExtraArgs(container *corev1.Container, extraArgs map[string]string) error { + for key, value := range extraArgs { + expectedArg := fmt.Sprintf("--%s=%s", key, value) + if !contains(container.Command, expectedArg) { + return fmt.Errorf("expected container commands to include '%s', but it was missing", expectedArg) + } + } + return nil +} + +// contains check if a slice contains a specific string. +func contains(slice []string, item string) bool { + for _, s := range slice { + if s == item { + return true + } + } + return false +} diff --git a/operator/pkg/controlplane/webhook/mainfests.go b/operator/pkg/controlplane/webhook/manifests.go similarity index 95% rename from operator/pkg/controlplane/webhook/mainfests.go rename to operator/pkg/controlplane/webhook/manifests.go index 36de4f176753..4e8b0705c689 100644 --- a/operator/pkg/controlplane/webhook/mainfests.go +++ b/operator/pkg/controlplane/webhook/manifests.go @@ -49,6 +49,7 @@ spec: - /bin/karmada-webhook - --kubeconfig=/etc/karmada/kubeconfig - --bind-address=0.0.0.0 + - --metrics-bind-address=:8080 - --default-not-ready-toleration-seconds=30 - --default-unreachable-toleration-seconds=30 - --secure-port=8443 @@ -56,6 +57,9 @@ spec: - --v=4 ports: - containerPort: 8443 + - containerPort: 8080 + name: metrics + protocol: TCP volumeMounts: - name: kubeconfig subPath: kubeconfig diff --git a/operator/pkg/controlplane/webhook/webhook_test.go b/operator/pkg/controlplane/webhook/webhook_test.go new file mode 100644 index 000000000000..8d009e3780ef --- /dev/null +++ b/operator/pkg/controlplane/webhook/webhook_test.go @@ -0,0 +1,259 @@ +/* +Copyright 2024 The Karmada Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhook + +import ( + "fmt" + "testing" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + fakeclientset "k8s.io/client-go/kubernetes/fake" + coretesting "k8s.io/client-go/testing" + "k8s.io/utils/ptr" + + operatorv1alpha1 "github.com/karmada-io/karmada/operator/pkg/apis/operator/v1alpha1" + "github.com/karmada-io/karmada/operator/pkg/util" +) + +func TestEnsureKarmadaWebhook(t *testing.T) { + var replicas int32 = 2 + image, imageTag := "docker.io/karmada/karmada-webhook", "latest" + name := "karmada-demo" + namespace := "test" + imagePullPolicy := corev1.PullIfNotPresent + annotations := map[string]string{"annotationKey": "annotationValue"} + labels := map[string]string{"labelKey": "labelValue"} + extraArgs := map[string]string{"cmd1": "arg1", "cmd2": "arg2"} + + cfg := &operatorv1alpha1.KarmadaWebhook{ + CommonSettings: operatorv1alpha1.CommonSettings{ + Image: operatorv1alpha1.Image{ + ImageRepository: image, + ImageTag: imageTag, + }, + Replicas: ptr.To[int32](replicas), + Annotations: annotations, + Labels: labels, + Resources: corev1.ResourceRequirements{}, + ImagePullPolicy: imagePullPolicy, + }, + ExtraArgs: extraArgs, + } + + // Create fake clientset. + fakeClient := fakeclientset.NewSimpleClientset() + + err := EnsureKarmadaWebhook(fakeClient, cfg, name, namespace, map[string]bool{}) + if err != nil { + t.Fatalf("failed to ensure karmada webhook, but got: %v", err) + } + + actions := fakeClient.Actions() + if len(actions) != 2 { + t.Fatalf("expected 2 actions, but got %d", len(actions)) + } +} + +func TestInstallKarmadaWebhook(t *testing.T) { + var replicas int32 = 2 + image, imageTag := "docker.io/karmada/karmada-webhook", "latest" + name := "karmada-demo" + namespace := "test" + imagePullPolicy := corev1.PullIfNotPresent + annotations := map[string]string{"annotationKey": "annotationValue"} + labels := map[string]string{"labelKey": "labelValue"} + extraArgs := map[string]string{"cmd1": "arg1", "cmd2": "arg2"} + + cfg := &operatorv1alpha1.KarmadaWebhook{ + CommonSettings: operatorv1alpha1.CommonSettings{ + Image: operatorv1alpha1.Image{ + ImageRepository: image, + ImageTag: imageTag, + }, + Replicas: ptr.To[int32](replicas), + Annotations: annotations, + Labels: labels, + Resources: corev1.ResourceRequirements{}, + ImagePullPolicy: imagePullPolicy, + }, + ExtraArgs: extraArgs, + } + + // Create fake clientset. + fakeClient := fakeclientset.NewSimpleClientset() + + err := installKarmadaWebhook(fakeClient, cfg, name, namespace, map[string]bool{}) + if err != nil { + t.Fatalf("failed to install karmada webhook: %v", err) + } + + err = verifyDeploymentCreation(fakeClient, replicas, imagePullPolicy, extraArgs, name, namespace, image, imageTag) + if err != nil { + t.Fatalf("failed to verify karmada webhook deployment creation: %v", err) + } +} + +func TestCreateKarmadaWebhookService(t *testing.T) { + // Define inputs. + name := "karmada-demo" + namespace := "test" + + // Initialize fake clientset. + client := fakeclientset.NewSimpleClientset() + + err := createKarmadaWebhookService(client, name, namespace) + if err != nil { + t.Fatalf("failed to create karmada webhook service") + } + + // Ensure the expected action (service creation) occurred. + actions := client.Actions() + if len(actions) != 1 { + t.Fatalf("expected 1 actions, but got %d actions", len(actions)) + } + + // Validate the action is a CreateAction and it's for the correct resource (Service). + createAction, ok := actions[0].(coretesting.CreateAction) + if !ok { + t.Fatalf("expected CreateAction, but got %T", actions[0]) + } + + if createAction.GetResource().Resource != "services" { + t.Fatalf("expected action on 'services', but got '%s'", createAction.GetResource().Resource) + } + + // Validate the created service object. + service := createAction.GetObject().(*corev1.Service) + expectedServiceName := util.KarmadaWebhookName(name) + if service.Name != expectedServiceName { + t.Fatalf("expected service name '%s', but got '%s'", expectedServiceName, service.Name) + } + + if service.Namespace != namespace { + t.Fatalf("expected service namespace '%s', but got '%s'", namespace, service.Namespace) + } +} + +// verifyDeploymentCreation validates the details of a Deployment against the expected parameters. +func verifyDeploymentCreation(client *fakeclientset.Clientset, replicas int32, imagePullPolicy corev1.PullPolicy, extraArgs map[string]string, name, namespace, image, imageTag string) error { + // Assert that a Deployment was created. + actions := client.Actions() + if len(actions) != 1 { + return fmt.Errorf("expected exactly 1 action either create or update, but got %d actions", len(actions)) + } + + // Check that the action was a Deployment creation. + createAction, ok := actions[0].(coretesting.CreateAction) + if !ok { + return fmt.Errorf("expected a CreateAction, but got %T", actions[0]) + } + + if createAction.GetResource().Resource != "deployments" { + return fmt.Errorf("expected action on 'deployments', but got '%s'", createAction.GetResource().Resource) + } + + deployment := createAction.GetObject().(*appsv1.Deployment) + return verifyDeploymentDetails(deployment, replicas, imagePullPolicy, extraArgs, name, namespace, image, imageTag) +} + +// verifyDeploymentDetails validates the details of a Deployment against the expected parameters. +func verifyDeploymentDetails(deployment *appsv1.Deployment, replicas int32, imagePullPolicy corev1.PullPolicy, extraArgs map[string]string, name, namespace, image, imageTag string) error { + expectedDeploymentName := util.KarmadaWebhookName(name) + if deployment.Name != expectedDeploymentName { + return fmt.Errorf("expected deployment name '%s', but got '%s'", expectedDeploymentName, deployment.Name) + } + + if deployment.Namespace != namespace { + return fmt.Errorf("expected deployment namespace '%s', but got '%s'", namespace, deployment.Namespace) + } + + if _, exists := deployment.Annotations["annotationKey"]; !exists { + return fmt.Errorf("expected annotation with key 'annotationKey' and value 'annotationValue', but it was missing") + } + + if _, exists := deployment.Labels["labelKey"]; !exists { + return fmt.Errorf("expected label with key 'labelKey' and value 'labelValue', but it was missing") + } + + if deployment.Spec.Replicas == nil || *deployment.Spec.Replicas != replicas { + return fmt.Errorf("expected replicas to be %d, but got %d", replicas, deployment.Spec.Replicas) + } + + containers := deployment.Spec.Template.Spec.Containers + if len(containers) != 1 { + return fmt.Errorf("expected exactly 1 container, but got %d", len(containers)) + } + + expectedImage := fmt.Sprintf("%s:%s", image, imageTag) + container := containers[0] + if container.Image != expectedImage { + return fmt.Errorf("expected container image '%s', but got '%s'", expectedImage, container.Image) + } + + if container.ImagePullPolicy != imagePullPolicy { + return fmt.Errorf("expected image pull policy '%s', but got '%s'", imagePullPolicy, container.ImagePullPolicy) + } + + err := verifyExtraArgs(&container, extraArgs) + if err != nil { + return fmt.Errorf("failed to verify extra args: %v", err) + } + + err = verifySecrets(deployment, name) + if err != nil { + return fmt.Errorf("failed to verify secrets: %v", err) + } + + return nil +} + +// verifySecrets validates that the expected secrets are present in the Deployment's volumes. +func verifySecrets(deployment *appsv1.Deployment, name string) error { + var extractedSecrets []string + for _, volume := range deployment.Spec.Template.Spec.Volumes { + extractedSecrets = append(extractedSecrets, volume.Secret.SecretName) + } + expectedSecrets := []string{ + util.AdminKubeconfigSecretName(name), + util.WebhookCertSecretName(name), + } + for _, expectedSecret := range expectedSecrets { + if !contains(extractedSecrets, expectedSecret) { + return fmt.Errorf("expected secret '%s' not found in extracted secrets", expectedSecret) + } + } + return nil +} + +// verifyExtraArgs checks that the container command includes the extra arguments. +func verifyExtraArgs(container *corev1.Container, extraArgs map[string]string) error { + for key, value := range extraArgs { + expectedArg := fmt.Sprintf("--%s=%s", key, value) + if !contains(container.Command, expectedArg) { + return fmt.Errorf("expected container commands to include '%s', but it was missing", expectedArg) + } + } + return nil +} + +// contains check if a slice contains a specific string. +func contains(slice []string, item string) bool { + for _, s := range slice { + if s == item { + return true + } + } + return false +} diff --git a/operator/pkg/deinit.go b/operator/pkg/deinit.go index e2fd08d658e8..fbb6eb4ccec6 100644 --- a/operator/pkg/deinit.go +++ b/operator/pkg/deinit.go @@ -36,6 +36,7 @@ type DeInitOptions struct { Namespace string Kubeconfig *rest.Config HostCluster *operatorv1alpha1.HostCluster + Karmada *operatorv1alpha1.Karmada } // DeInitOpt defines a type of function to set DeInitOptions values. @@ -56,8 +57,8 @@ type deInitData struct { func NewDeInitDataJob(opt *DeInitOptions) *workflow.Job { deInitJob := workflow.NewJob() - deInitJob.AppendTask(tasks.NewRemoveComponentTask()) - deInitJob.AppendTask(tasks.NewCleanupCertTask()) + deInitJob.AppendTask(tasks.NewRemoveComponentTask(opt.Karmada)) + deInitJob.AppendTask(tasks.NewCleanupCertTask(opt.Karmada)) deInitJob.AppendTask(tasks.NewCleanupKubeconfigTask()) deInitJob.SetDataInitializer(func() (workflow.RunData, error) { @@ -126,6 +127,7 @@ func defaultJobDeInitOptions() *DeInitOptions { // NewDeInitOptWithKarmada returns a DeInitOpt function to initialize DeInitOptions with karmada resource func NewDeInitOptWithKarmada(karmada *operatorv1alpha1.Karmada) DeInitOpt { return func(o *DeInitOptions) { + o.Karmada = karmada o.Name = karmada.GetName() o.Namespace = karmada.GetNamespace() diff --git a/operator/pkg/init.go b/operator/pkg/init.go index 31a0bce99cdb..3c91c69b66fc 100644 --- a/operator/pkg/init.go +++ b/operator/pkg/init.go @@ -119,7 +119,13 @@ func NewInitJob(opt *InitOptions) *workflow.Job { initJob.AppendTask(tasks.NewCertTask()) initJob.AppendTask(tasks.NewNamespaceTask()) initJob.AppendTask(tasks.NewUploadCertsTask()) - initJob.AppendTask(tasks.NewEtcdTask()) + + etcdConfig := opt.Karmada.Spec.Components.Etcd + // Only required if local etcd is configured + if etcdConfig.Local != nil { + initJob.AppendTask(tasks.NewEtcdTask()) + } + initJob.AppendTask(tasks.NewKarmadaApiserverTask()) initJob.AppendTask(tasks.NewUploadKubeconfigTask()) initJob.AppendTask(tasks.NewKarmadaAggregatedApiserverTask()) diff --git a/operator/pkg/karmadaresource/apiservice/apiservice.go b/operator/pkg/karmadaresource/apiservice/apiservice.go index 4eadfa731784..d0b9bb1ac808 100644 --- a/operator/pkg/karmadaresource/apiservice/apiservice.go +++ b/operator/pkg/karmadaresource/apiservice/apiservice.go @@ -46,7 +46,7 @@ func init() { } // EnsureAggregatedAPIService creates aggregated APIService and a service -func EnsureAggregatedAPIService(aggregatorClient *aggregator.Clientset, client clientset.Interface, karmadaControlPlaneServiceName, karmadaControlPlaneNamespace, hostClusterServiceName, hostClusterNamespace, caBundle string) error { +func EnsureAggregatedAPIService(aggregatorClient aggregator.Interface, client clientset.Interface, karmadaControlPlaneServiceName, karmadaControlPlaneNamespace, hostClusterServiceName, hostClusterNamespace, caBundle string) error { if err := aggregatedApiserverService(client, karmadaControlPlaneServiceName, karmadaControlPlaneNamespace, hostClusterServiceName, hostClusterNamespace); err != nil { return err } @@ -54,7 +54,7 @@ func EnsureAggregatedAPIService(aggregatorClient *aggregator.Clientset, client c return aggregatedAPIService(aggregatorClient, karmadaControlPlaneServiceName, karmadaControlPlaneNamespace, caBundle) } -func aggregatedAPIService(client *aggregator.Clientset, karmadaControlPlaneServiceName, karmadaControlPlaneNamespace, caBundle string) error { +func aggregatedAPIService(client aggregator.Interface, karmadaControlPlaneServiceName, karmadaControlPlaneNamespace, caBundle string) error { apiServiceBytes, err := util.ParseTemplate(KarmadaAggregatedAPIService, struct { Namespace string ServiceName string @@ -101,7 +101,7 @@ func aggregatedApiserverService(client clientset.Interface, karmadaControlPlaneS } // EnsureMetricsAdapterAPIService creates APIService and a service for karmada-metrics-adapter -func EnsureMetricsAdapterAPIService(aggregatorClient *aggregator.Clientset, client clientset.Interface, karmadaControlPlaneServiceName, karmadaControlPlaneNamespace, hostClusterServiceName, hostClusterNamespace, caBundle string) error { +func EnsureMetricsAdapterAPIService(aggregatorClient aggregator.Interface, client clientset.Interface, karmadaControlPlaneServiceName, karmadaControlPlaneNamespace, hostClusterServiceName, hostClusterNamespace, caBundle string) error { if err := karmadaMetricsAdapterService(client, karmadaControlPlaneServiceName, karmadaControlPlaneNamespace, hostClusterServiceName, hostClusterNamespace); err != nil { return err } @@ -109,7 +109,7 @@ func EnsureMetricsAdapterAPIService(aggregatorClient *aggregator.Clientset, clie return karmadaMetricsAdapterAPIService(aggregatorClient, karmadaControlPlaneServiceName, karmadaControlPlaneNamespace, caBundle) } -func karmadaMetricsAdapterAPIService(client *aggregator.Clientset, karmadaControlPlaneServiceName, karmadaControlPlaneNamespace, caBundle string) error { +func karmadaMetricsAdapterAPIService(client aggregator.Interface, karmadaControlPlaneServiceName, karmadaControlPlaneNamespace, caBundle string) error { for _, gv := range constants.KarmadaMetricsAdapterAPIServices { // The APIService name to metrics adapter is "$version.$group" apiServiceName := fmt.Sprintf("%s.%s", gv.Version, gv.Group) @@ -171,7 +171,7 @@ func karmadaMetricsAdapterService(client clientset.Interface, karmadaControlPlan } // EnsureSearchAPIService creates APIService and a service for karmada-metrics-adapter -func EnsureSearchAPIService(aggregatorClient *aggregator.Clientset, client clientset.Interface, karmadaControlPlaneServiceName, karmadaControlPlaneNamespace, hostClusterServiceName, hostClusterNamespace, caBundle string) error { +func EnsureSearchAPIService(aggregatorClient aggregator.Interface, client clientset.Interface, karmadaControlPlaneServiceName, karmadaControlPlaneNamespace, hostClusterServiceName, hostClusterNamespace, caBundle string) error { if err := karmadaSearchService(client, karmadaControlPlaneServiceName, karmadaControlPlaneNamespace, hostClusterServiceName, hostClusterNamespace); err != nil { return err } @@ -179,7 +179,7 @@ func EnsureSearchAPIService(aggregatorClient *aggregator.Clientset, client clien return karmadaSearchAPIService(aggregatorClient, karmadaControlPlaneServiceName, karmadaControlPlaneNamespace, caBundle) } -func karmadaSearchAPIService(client *aggregator.Clientset, karmadaControlPlaneServiceName, karmadaControlPlaneNamespace, caBundle string) error { +func karmadaSearchAPIService(client aggregator.Interface, karmadaControlPlaneServiceName, karmadaControlPlaneNamespace, caBundle string) error { apiServiceBytes, err := util.ParseTemplate(KarmadaSearchAPIService, struct { ServiceName, Namespace string CABundle string diff --git a/operator/pkg/karmadaresource/apiservice/apiservice_test.go b/operator/pkg/karmadaresource/apiservice/apiservice_test.go new file mode 100644 index 000000000000..7a9273b9f9dc --- /dev/null +++ b/operator/pkg/karmadaresource/apiservice/apiservice_test.go @@ -0,0 +1,397 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apiservice + +import ( + "encoding/base64" + "fmt" + "testing" + + corev1 "k8s.io/api/core/v1" + fakeclientset "k8s.io/client-go/kubernetes/fake" + coretesting "k8s.io/client-go/testing" + apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" + fakeAggregator "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/fake" + + "github.com/karmada-io/karmada/operator/pkg/constants" + "github.com/karmada-io/karmada/operator/pkg/util" +) + +func TestEnsureAggregatedAPIService(t *testing.T) { + name, namespace := "karmada-demo", "test" + + // Base64 encoding of the dummy certificate data. + caTestData := "test-ca-data" + caBundle := base64.StdEncoding.EncodeToString([]byte(caTestData)) + + fakeAggregatorClient := fakeAggregator.NewSimpleClientset() + fakeClient := fakeclientset.NewSimpleClientset() + err := EnsureAggregatedAPIService( + fakeAggregatorClient, fakeClient, name, namespace, name, namespace, caBundle, + ) + if err != nil { + t.Fatalf("failed to ensure aggregated api service: %v", err) + } + + // Ensure the expected action (aggregated api service creation) occurred on the fake aggregator clientset. + actions := fakeAggregatorClient.Actions() + if len(actions) != 1 { + t.Fatalf("expected 1 action, but got %d", len(actions)) + } + + // Ensure the expected action (aggregated apiserver service creation) occurred on the fake clientset. + actions = fakeClient.Actions() + if len(actions) != 1 { + t.Fatalf("expected 1 action, but got %d", len(actions)) + } +} + +func TestAggregatedAPIService(t *testing.T) { + name, namespace := "karmada-demo", "test" + + // Base64 encoding of the dummy certificate data. + caTestData := "test-ca-data" + caBundle := base64.StdEncoding.EncodeToString([]byte(caTestData)) + + fakeClient := fakeAggregator.NewSimpleClientset() + err := aggregatedAPIService(fakeClient, name, namespace, caBundle) + if err != nil { + t.Fatalf("failed to create aggregated api service: %v", err) + } + + // Ensure the expected action (service creation) occurred. + actions := fakeClient.Actions() + if len(actions) != 1 { + t.Fatalf("expected 1 action, but got %d", len(actions)) + } + + // Validate the action is a CreateAction and it's for the correct resource (apiregistrationv1.APIService). + createAction, ok := actions[0].(coretesting.CreateAction) + if !ok { + t.Fatalf("expected CreateAction, but got %T", actions[0]) + } + + if createAction.GetResource().Resource != "apiservices" { + t.Fatalf("expected action on 'apiservices', but got '%s'", createAction.GetResource().Resource) + } + + // Validate the created apiregistrationv1.APIService object. + service := createAction.GetObject().(*apiregistrationv1.APIService) + expectedServiceName := util.KarmadaAggregatedAPIServerName(name) + if service.Spec.Service.Name != expectedServiceName { + t.Fatalf("expected service name '%s', but got '%s'", expectedServiceName, service.Name) + } + + if service.Spec.Service.Namespace != namespace { + t.Fatalf("expected service namespace '%s', but got '%s'", namespace, service.Namespace) + } + + if string(service.Spec.CABundle) != caTestData { + t.Fatalf("expected service caBundle %s, but got %s", caTestData, string(service.Spec.CABundle)) + } +} + +func TestAggregatedAPIServerService(t *testing.T) { + name, namespace := "karmada-demo", "test" + + fakeClient := fakeclientset.NewSimpleClientset() + err := aggregatedApiserverService(fakeClient, name, namespace, name, namespace) + if err != nil { + t.Fatalf("failed to create aggregated apiserver service: %v", err) + } + + // Ensure the expected action (service creation) occurred. + actions := fakeClient.Actions() + if len(actions) != 1 { + t.Fatalf("expected 1 action, but got %d", len(actions)) + } + + // Validate the action is a CreateAction and it's for the correct resource (Service). + createAction, ok := actions[0].(coretesting.CreateAction) + if !ok { + t.Fatalf("expected CreateAction, but got %T", actions[0]) + } + + if createAction.GetResource().Resource != "services" { + t.Fatalf("expected action on 'services', but got '%s'", createAction.GetResource().Resource) + } + + // Validate the created service object. + service := createAction.GetObject().(*corev1.Service) + expectedServiceName := util.KarmadaAggregatedAPIServerName(name) + if service.Name != expectedServiceName { + t.Fatalf("expected service name '%s', but got '%s'", expectedServiceName, service.Name) + } + + if service.Namespace != namespace { + t.Fatalf("expected service namespace '%s', but got '%s'", namespace, service.Namespace) + } + + serviceExternalNameExpected := fmt.Sprintf("%s.%s.svc", expectedServiceName, namespace) + if service.Spec.ExternalName != serviceExternalNameExpected { + t.Fatalf("expected service external name '%s', but got '%s'", serviceExternalNameExpected, service.Spec.ExternalName) + } +} + +func TestEnsureMetricsAdapterAPIService(t *testing.T) { + name, namespace := "karmada-demo", "test" + + // Base64 encoding of the dummy certificate data. + caTestData := "test-ca-data" + caBundle := base64.StdEncoding.EncodeToString([]byte(caTestData)) + + fakeAggregatorClient := fakeAggregator.NewSimpleClientset() + fakeClient := fakeclientset.NewSimpleClientset() + err := EnsureMetricsAdapterAPIService( + fakeAggregatorClient, fakeClient, name, namespace, name, namespace, caBundle, + ) + if err != nil { + t.Fatalf("failed to ensure metrics adapter api service: %v", err) + } + + // Ensure the expected action (metrics adapter api service creation) occurred on the fake aggregator clientset. + actions := fakeAggregatorClient.Actions() + if len(actions) != len(constants.KarmadaMetricsAdapterAPIServices) { + t.Fatalf("expected %d action, but got %d", len(constants.KarmadaMetricsAdapterAPIServices), len(actions)) + } + + // Ensure the expected action (metrics adapter service creation) occurred on the fake clientset. + actions = fakeClient.Actions() + if len(actions) != 1 { + t.Fatalf("expected 1 action, but got %d", len(actions)) + } +} + +func TestKarmadaMetricsAdapterAPIService(t *testing.T) { + name, namespace := "karmada-demo", "test" + + // Base64 encoding of the dummy certificate data. + caTestData := "test-ca-data" + caBundle := base64.StdEncoding.EncodeToString([]byte(caTestData)) + + fakeAggregatorClient := fakeAggregator.NewSimpleClientset() + err := karmadaMetricsAdapterAPIService(fakeAggregatorClient, name, namespace, caBundle) + if err != nil { + t.Fatalf("failed to create karmada metrics adapter api service: %v", err) + } + + // Ensure the expected actions (service creation) occurred. + actions := fakeAggregatorClient.Actions() + expectedActionsCount := len(constants.KarmadaMetricsAdapterAPIServices) + if len(actions) != expectedActionsCount { + t.Fatalf("expected %d actions, but got %d", expectedActionsCount, len(actions)) + } + + for i, action := range actions { + // Validate the action is a CreateAction and it's for the correct resource (Service). + createAction, ok := action.(coretesting.CreateAction) + if !ok { + t.Fatalf("expected CreateAction, but got %T", action) + } + + if createAction.GetResource().Resource != "apiservices" { + t.Fatalf("expected action on 'apiservices', but got '%s'", createAction.GetResource().Resource) + } + + // Validate the created apiregistrationv1.APIService object. + service := createAction.GetObject().(*apiregistrationv1.APIService) + karmadaMetricsAdapterGV := constants.KarmadaMetricsAdapterAPIServices[i] + apiServiceNameExpected := fmt.Sprintf("%s.%s", karmadaMetricsAdapterGV.Version, karmadaMetricsAdapterGV.Group) + if service.Name != apiServiceNameExpected { + t.Fatalf("expected APIService name %s, but got %s", apiServiceNameExpected, service.ObjectMeta.Name) + } + + if service.Spec.Service.Namespace != namespace { + t.Fatalf("expected APIService namespace %s, but got %s", namespace, service.Spec.Service.Namespace) + } + + if service.Spec.Group != karmadaMetricsAdapterGV.Group { + t.Fatalf("expected APIService group %s, but got %s", karmadaMetricsAdapterGV.Group, service.Spec.Group) + } + + if service.Spec.Version != karmadaMetricsAdapterGV.Version { + t.Fatalf("expected APIService version %s, but got %s", karmadaMetricsAdapterGV.Version, service.Spec.Version) + } + + serviceNameExpected := util.KarmadaMetricsAdapterName(name) + if service.Spec.Service.Name != serviceNameExpected { + t.Fatalf("expected APIService service name %s, but got %s", serviceNameExpected, service.Spec.Service.Name) + } + + if string(service.Spec.CABundle) != caTestData { + t.Fatalf("expected service CABundle %s, but got %s", caTestData, string(service.Spec.CABundle)) + } + } +} + +func TestKarmadaMetricsAdapterService(t *testing.T) { + name, namespace := "karmada-demo", "test" + + fakeClient := fakeclientset.NewSimpleClientset() + err := karmadaMetricsAdapterService(fakeClient, name, namespace, name, namespace) + if err != nil { + t.Fatalf("failed to create karmada metrics adapter service: %v", err) + } + + // Ensure the expected action (service creation) occurred. + actions := fakeClient.Actions() + if len(actions) != 1 { + t.Fatalf("expected 1 action, but got %d", len(actions)) + } + + // Validate the action is a CreateAction and it's for the correct resource (Service). + createAction, ok := actions[0].(coretesting.CreateAction) + if !ok { + t.Fatalf("expected CreateAction, but got %T", actions[0]) + } + + if createAction.GetResource().Resource != "services" { + t.Fatalf("expected action on 'services', but got '%s'", createAction.GetResource().Resource) + } + + // Validate the created service object. + service := createAction.GetObject().(*corev1.Service) + expectedServiceName := util.KarmadaMetricsAdapterName(name) + if service.Name != expectedServiceName { + t.Fatalf("expected service name '%s', but got '%s'", expectedServiceName, service.Name) + } + + if service.Namespace != namespace { + t.Fatalf("expected service namespace '%s', but got '%s'", namespace, service.Namespace) + } + + serviceExternalNameExpected := fmt.Sprintf("%s.%s.svc", expectedServiceName, namespace) + if service.Spec.ExternalName != serviceExternalNameExpected { + t.Fatalf("expected service external name '%s', but got '%s'", serviceExternalNameExpected, service.Spec.ExternalName) + } +} + +func TestEnsureSearchAPIService(t *testing.T) { + name, namespace := "karmada-demo", "test" + + // Base64 encoding of the dummy certificate data. + caTestData := "test-ca-data" + caBundle := base64.StdEncoding.EncodeToString([]byte(caTestData)) + + fakeAggregatorClient := fakeAggregator.NewSimpleClientset() + fakeClient := fakeclientset.NewSimpleClientset() + err := EnsureSearchAPIService( + fakeAggregatorClient, fakeClient, name, namespace, name, namespace, caBundle, + ) + if err != nil { + t.Fatalf("failed to ensure search api service service: %v", err) + } + + // Ensure the expected action (search api service creation) occurred on the fake aggregator clientset. + actions := fakeAggregatorClient.Actions() + if len(actions) != 1 { + t.Fatalf("expected %d action, but got %d", 1, len(actions)) + } + + // Ensure the expected action (search service creation) occurred on the fake clientset. + actions = fakeClient.Actions() + if len(actions) != 1 { + t.Fatalf("expected 1 action, but got %d", len(actions)) + } +} + +func TestKarmadaSearchAPIService(t *testing.T) { + name, namespace := "karmada-demo", "test" + + // Base64 encoding of the dummy certificate data. + caTestData := "test-ca-data" + caBundle := base64.StdEncoding.EncodeToString([]byte(caTestData)) + + fakeClient := fakeAggregator.NewSimpleClientset() + err := karmadaSearchAPIService(fakeClient, name, namespace, caBundle) + if err != nil { + t.Fatalf("failed to ensure metrics adapter api service: %v", err) + } + + // Ensure the expected action (apiservice creation) occurred. + actions := fakeClient.Actions() + if len(actions) != 1 { + t.Fatalf("expected 1 action, but got %d", len(actions)) + } + + // Validate the action is a CreateAction and it's for the correct resource (apiregistrationv1.APIService). + createAction, ok := actions[0].(coretesting.CreateAction) + if !ok { + t.Fatalf("expected CreateAction, but got %T", actions[0]) + } + + if createAction.GetResource().Resource != "apiservices" { + t.Fatalf("expected action on 'apiservices', but got '%s'", createAction.GetResource().Resource) + } + + // Validate the created service object. + service := createAction.GetObject().(*apiregistrationv1.APIService) + serviceNameExpected := util.KarmadaSearchAPIServerName(name) + if service.Spec.Service.Name != serviceNameExpected { + t.Fatalf("expected APIService name %s, but got %s", serviceNameExpected, service.Spec.Service.Name) + } + + if service.Spec.Service.Namespace != namespace { + t.Fatalf("expected APIService namespace %s, but got %s", namespace, service.Spec.Service.Namespace) + } + + if string(service.Spec.CABundle) != caTestData { + t.Fatalf("expected service CABundle %s, but got %s", caTestData, string(service.Spec.CABundle)) + } +} + +func TestKarmadaSearchService(t *testing.T) { + name, namespace := "karmada-demo", "test" + + fakeClient := fakeclientset.NewSimpleClientset() + err := karmadaSearchService(fakeClient, name, namespace, name, namespace) + if err != nil { + t.Fatalf("failed to create karmada search service: %v", err) + } + + // Ensure the expected action (service creation) occurred. + actions := fakeClient.Actions() + if len(actions) != 1 { + t.Fatalf("expected 1 action, but got %d", len(actions)) + } + + // Validate the action is a CreateAction and it's for the correct resource (Service). + createAction, ok := actions[0].(coretesting.CreateAction) + if !ok { + t.Fatalf("expected CreateAction, but got %T", actions[0]) + } + + if createAction.GetResource().Resource != "services" { + t.Fatalf("expected action on 'services', but got '%s'", createAction.GetResource().Resource) + } + + // Validate the created service object. + service := createAction.GetObject().(*corev1.Service) + serviceNameExpected := util.KarmadaSearchName(name) + if service.Name != serviceNameExpected { + t.Fatalf("expected service name '%s', but got '%s'", serviceNameExpected, service.Name) + } + + if service.Namespace != namespace { + t.Fatalf("expected service namespace %s, but got %s", namespace, service.Namespace) + } + + serviceExternalNameExpected := fmt.Sprintf("%s.%s.svc", util.KarmadaSearchName(name), namespace) + if service.Spec.ExternalName != serviceExternalNameExpected { + t.Fatalf("expected service external name '%s', but got '%s'", serviceExternalNameExpected, service.Spec.ExternalName) + } +} diff --git a/operator/pkg/karmadaresource/rbac/manifest.go b/operator/pkg/karmadaresource/rbac/manifest.go index 66eff933c37d..4c25dca6c8fc 100644 --- a/operator/pkg/karmadaresource/rbac/manifest.go +++ b/operator/pkg/karmadaresource/rbac/manifest.go @@ -166,5 +166,33 @@ rules: - deletecollection - patch - update +` + // ClusterProxyAdminClusterRole role to proxy member clusters + ClusterProxyAdminClusterRole = ` +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cluster-proxy-admin +rules: +- apiGroups: + - 'cluster.karmada.io' + resources: + - clusters/proxy + verbs: + - '*' +` + // ClusterProxyAdminClusterRoleBinding authorize system:admin to proxy member clusters + ClusterProxyAdminClusterRoleBinding = ` +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cluster-proxy-admin +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-proxy-admin +subjects: + - kind: User + name: "system:admin" ` ) diff --git a/operator/pkg/karmadaresource/rbac/rbac.go b/operator/pkg/karmadaresource/rbac/rbac.go index 2e82b382e79f..3ca2d18666a7 100644 --- a/operator/pkg/karmadaresource/rbac/rbac.go +++ b/operator/pkg/karmadaresource/rbac/rbac.go @@ -30,25 +30,46 @@ import ( // EnsureKarmadaRBAC create karmada resource view and edit clusterrole func EnsureKarmadaRBAC(client clientset.Interface) error { - if err := grantKarmadaResourceViewClusterrole(client); err != nil { + if err := grantClusterProxyAdminRBAC(client); err != nil { return err } - return grantKarmadaResourceEditClusterrole(client) + if err := grantKarmadaResourceViewClusterRole(client); err != nil { + return err + } + return grantKarmadaResourceEditClusterRole(client) +} + +func grantClusterProxyAdminRBAC(client clientset.Interface) error { + role := &rbacv1.ClusterRole{} + if err := kuberuntime.DecodeInto(clientsetscheme.Codecs.UniversalDecoder(), []byte(ClusterProxyAdminClusterRole), role); err != nil { + return fmt.Errorf("err when decoding ClusterProxyAdmin ClusterRole: %w", err) + } + util.MergeLabel(role, util.KarmadaSystemLabel, util.KarmadaSystemLabelValue) + if err := apiclient.CreateOrUpdateClusterRole(client, role); err != nil { + return fmt.Errorf("failed to create or update ClusterRole: %w", err) + } + + roleBinding := &rbacv1.ClusterRoleBinding{} + if err := kuberuntime.DecodeInto(clientsetscheme.Codecs.UniversalDecoder(), []byte(ClusterProxyAdminClusterRoleBinding), roleBinding); err != nil { + return fmt.Errorf("err when decoding ClusterProxyAdmin ClusterRoleBinding: %w", err) + } + util.MergeLabel(role, util.KarmadaSystemLabel, util.KarmadaSystemLabelValue) + return apiclient.CreateOrUpdateClusterRoleBinding(client, roleBinding) } -func grantKarmadaResourceViewClusterrole(client clientset.Interface) error { +func grantKarmadaResourceViewClusterRole(client clientset.Interface) error { role := &rbacv1.ClusterRole{} if err := kuberuntime.DecodeInto(clientsetscheme.Codecs.UniversalDecoder(), []byte(KarmadaResourceViewClusterRole), role); err != nil { - return fmt.Errorf("err when decoding Karmada view Clusterrole: %w", err) + return fmt.Errorf("err when decoding Karmada view ClusterRole: %w", err) } util.MergeLabel(role, util.KarmadaSystemLabel, util.KarmadaSystemLabelValue) return apiclient.CreateOrUpdateClusterRole(client, role) } -func grantKarmadaResourceEditClusterrole(client clientset.Interface) error { +func grantKarmadaResourceEditClusterRole(client clientset.Interface) error { role := &rbacv1.ClusterRole{} if err := kuberuntime.DecodeInto(clientsetscheme.Codecs.UniversalDecoder(), []byte(KarmadaResourceEditClusterRole), role); err != nil { - return fmt.Errorf("err when decoding Karmada edit Clusterrole: %w", err) + return fmt.Errorf("err when decoding Karmada edit ClusterRole: %w", err) } util.MergeLabel(role, util.KarmadaSystemLabel, util.KarmadaSystemLabelValue) return apiclient.CreateOrUpdateClusterRole(client, role) diff --git a/operator/pkg/karmadaresource/rbac/rbac_test.go b/operator/pkg/karmadaresource/rbac/rbac_test.go new file mode 100644 index 000000000000..997ebecb4d7c --- /dev/null +++ b/operator/pkg/karmadaresource/rbac/rbac_test.go @@ -0,0 +1,110 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rbac + +import ( + "testing" + + rbacv1 "k8s.io/api/rbac/v1" + fakeclientset "k8s.io/client-go/kubernetes/fake" + coretesting "k8s.io/client-go/testing" + + "github.com/karmada-io/karmada/pkg/util" +) + +func TestEnsureKarmadaRBAC(t *testing.T) { + fakeClient := fakeclientset.NewSimpleClientset() + err := EnsureKarmadaRBAC(fakeClient) + if err != nil { + t.Fatalf("failed to ensure karmada rbac: %v", err) + } + + actions := fakeClient.Actions() + if len(actions) != 4 { + t.Fatalf("expected 4 actions, but got %d", len(actions)) + } +} + +func TestGrantKarmadaResourceEditClusterrole(t *testing.T) { + fakeClient := fakeclientset.NewSimpleClientset() + err := grantKarmadaResourceEditClusterRole(fakeClient) + if err != nil { + t.Fatalf("failed to grant karmada resource edit clusterrole: %v", err) + } + + // Ensure the expected action (clusterrole creation) occurred. + actions := fakeClient.Actions() + if len(actions) != 1 { + t.Fatalf("expected 1 action, but got %d", len(actions)) + } + + // Validate the action is a CreateAction and it's for the correct resource (clusterrole). + createAction, ok := actions[0].(coretesting.CreateAction) + if !ok { + t.Fatalf("expected CreateAction, but got %T", actions[0]) + } + + resourceExpected := "clusterroles" + if createAction.GetResource().Resource != resourceExpected { + t.Fatalf("expected action on %s, but got %s", resourceExpected, createAction.GetResource().Resource) + } + + clusterRole := createAction.GetObject().(*rbacv1.ClusterRole) + if _, exists := clusterRole.Labels[util.KarmadaSystemLabel]; !exists { + t.Errorf("expected label %s to exist on the clusterrole, but it does not", util.KarmadaSystemLabel) + } + + editClusterRoleNameExpected := "karmada-edit" + if clusterRole.Name != editClusterRoleNameExpected { + t.Errorf("expected edit cluster role name to be %s, but found %s", editClusterRoleNameExpected, clusterRole.Name) + } +} + +func TestGrantKarmadaResourceViewClusterrole(t *testing.T) { + fakeClient := fakeclientset.NewSimpleClientset() + err := grantKarmadaResourceViewClusterRole(fakeClient) + if err != nil { + t.Fatalf("failed to grant karmada resource view clusterrole: %v", err) + } + + // Ensure the expected action (clusterrole creation) occurred. + actions := fakeClient.Actions() + if len(actions) != 1 { + t.Fatalf("expected 1 action, but got %d", len(actions)) + } + + // Validate the action is a CreateAction and it's for the correct resource (clusterrole). + createAction, ok := actions[0].(coretesting.CreateAction) + if !ok { + t.Fatalf("expected CreateAction, but got %T", actions[0]) + } + + resourceExpected := "clusterroles" + if createAction.GetResource().Resource != resourceExpected { + t.Fatalf("expected action on %s, but got %s", resourceExpected, createAction.GetResource().Resource) + } + + clusterRole := createAction.GetObject().(*rbacv1.ClusterRole) + if _, exists := clusterRole.Labels[util.KarmadaSystemLabel]; !exists { + t.Fatalf("expected label %s to exist on the clusterrole, but it does not", util.KarmadaSystemLabel) + } + + viewClusterRoleNameExpected := "karmada-view" + if clusterRole.Name != viewClusterRoleNameExpected { + t.Fatalf("expected view cluster role name to be %s, but found %s", viewClusterRoleNameExpected, clusterRole.Name) + } +} diff --git a/operator/pkg/karmadaresource/webhookconfiguration/webhookconfiguration_test.go b/operator/pkg/karmadaresource/webhookconfiguration/webhookconfiguration_test.go new file mode 100644 index 000000000000..bfaafbd11cc3 --- /dev/null +++ b/operator/pkg/karmadaresource/webhookconfiguration/webhookconfiguration_test.go @@ -0,0 +1,130 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package webhookconfiguration + +import ( + "encoding/base64" + "fmt" + "strings" + "testing" + + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + fakeclientset "k8s.io/client-go/kubernetes/fake" + coretesting "k8s.io/client-go/testing" + + "github.com/karmada-io/karmada/operator/pkg/util" +) + +func TestEnsureWebhookConfiguration(t *testing.T) { + name, namespace := "karmada-demo", "test" + + // Base64 encoding of the dummy certificate data. + caTestData := "test-ca-data" + caBundle := base64.StdEncoding.EncodeToString([]byte(caTestData)) + + fakeClient := fakeclientset.NewSimpleClientset() + err := EnsureWebhookConfiguration(fakeClient, namespace, name, caBundle) + if err != nil { + t.Fatalf("failed to create karmada mutating webhook configuration and validation webhook configuration: %v", err) + } + + // Ensure the expected action (mutating webhook configuration and validating webhook configuration creation) occurred. + actions := fakeClient.Actions() + if len(actions) != 2 { + t.Fatalf("expected 2 actions, but got %d", len(actions)) + } +} + +func TestMutatingWebhookConfiguration(t *testing.T) { + name, namespace := "karmada-demo", "test" + + // Base64 encoding of the dummy certificate data. + caTestData := "test-ca-data" + caBundle := base64.StdEncoding.EncodeToString([]byte(caTestData)) + + fakeClient := fakeclientset.NewSimpleClientset() + err := mutatingWebhookConfiguration(fakeClient, namespace, name, caBundle) + if err != nil { + t.Fatalf("error creating the mutating webhook configuration: %v", err) + } + + // Ensure the expected action (mutating webhook configuration creation) occurred on the fake clientset. + actions := fakeClient.Actions() + if len(actions) != 1 { + t.Fatalf("expected 1 action, but got %d", len(actions)) + } + + // Validate the action is a CreateAction and it's for the correct resource (MutatingWebhookConfiguration). + createAction, ok := actions[0].(coretesting.CreateAction) + if !ok { + t.Fatalf("expected CreateAction, but got %T", actions[0]) + } + + // Validate the created MutatingWebhookConfiguration object. + mutatingWebhookConfig := createAction.GetObject().(*admissionregistrationv1.MutatingWebhookConfiguration) + serviceName := util.KarmadaWebhookName(name) + for _, webhook := range mutatingWebhookConfig.Webhooks { + clientConfigRootURL := fmt.Sprintf("https://%s.%s.svc:443", serviceName, namespace) + if !strings.HasPrefix(*webhook.ClientConfig.URL, clientConfigRootURL) { + t.Errorf("expected webhook client config url '%s' to start with '%s'", *webhook.ClientConfig.URL, clientConfigRootURL) + } + + if string(webhook.ClientConfig.CABundle) != caTestData { + t.Fatalf("expected webhook client config caBundle %s, but got %s", caTestData, string(webhook.ClientConfig.CABundle)) + } + } +} + +func TestValidatingWebhookConfiguration(t *testing.T) { + name, namespace := "karmada-demo", "test" + + // Base64 encoding of the dummy certificate data. + caTestData := "test-ca-data" + caBundle := base64.StdEncoding.EncodeToString([]byte(caTestData)) + + fakeClient := fakeclientset.NewSimpleClientset() + err := validatingWebhookConfiguration(fakeClient, namespace, name, caBundle) + if err != nil { + t.Fatalf("error creating the mutating webhook configuration: %v", err) + } + + // Ensure the expected action (validating webhook configuration creation) occurred on the fake clientset. + actions := fakeClient.Actions() + if len(actions) != 1 { + t.Fatalf("expected 1 action, but got %d", len(actions)) + } + + // Validate the action is a CreateAction and it's for the correct resource (ValidatingWebhookConfiguration). + createAction, ok := actions[0].(coretesting.CreateAction) + if !ok { + t.Fatalf("expected CreateAction, but got %T", actions[0]) + } + + // Validate the created ValidatingWebhookConfiguration object. + mutatingWebhookConfig := createAction.GetObject().(*admissionregistrationv1.ValidatingWebhookConfiguration) + serviceName := util.KarmadaWebhookName(name) + for _, webhook := range mutatingWebhookConfig.Webhooks { + clientConfigRootURL := fmt.Sprintf("https://%s.%s.svc:443", serviceName, namespace) + if !strings.HasPrefix(*webhook.ClientConfig.URL, clientConfigRootURL) { + t.Errorf("expected webhook client config url '%s' to start with '%s'", *webhook.ClientConfig.URL, clientConfigRootURL) + } + + if string(webhook.ClientConfig.CABundle) != caTestData { + t.Fatalf("expected webhook client config caBundle %s, but got %s", caTestData, string(webhook.ClientConfig.CABundle)) + } + } +} diff --git a/operator/pkg/scheme/scheme_test.go b/operator/pkg/scheme/scheme_test.go new file mode 100644 index 000000000000..c1982babb72e --- /dev/null +++ b/operator/pkg/scheme/scheme_test.go @@ -0,0 +1,54 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheme + +import ( + "fmt" + "testing" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + operatorv1alpha1 "github.com/karmada-io/karmada/operator/pkg/apis/operator/v1alpha1" +) + +func TestSchemeInitialization(t *testing.T) { + // Ensure that the Kubernetes core scheme (for example, Pod) is added. + coreGVK := schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"} + if !isGVKRegistered(Scheme, coreGVK) { + t.Errorf("K8s core scheme should be registered for GVK: %v", coreGVK) + } + + // Ensure that the Karmada operator v1alpha1 scheme (for example, Karmada) is added. + karmadaGVK := schema.GroupVersionKind{ + Group: operatorv1alpha1.GroupVersion.Group, + Version: operatorv1alpha1.GroupVersion.Version, + Kind: "Karmada", + } + if !isGVKRegistered(Scheme, karmadaGVK) { + t.Errorf("Karmada v1alpha1 scheme should be registered for GVK: %v", karmadaGVK) + } +} + +// isGVKRegistered verifies if the scheme contains a specific GVK. +func isGVKRegistered(s *runtime.Scheme, gvk schema.GroupVersionKind) bool { + _, err := s.New(gvk) + if err != nil { + fmt.Printf("Failed to find GVK: %v, Error: %v\n", gvk, err) + } + return err == nil +} diff --git a/operator/pkg/tasks/deinit/cert.go b/operator/pkg/tasks/deinit/cert.go index 1ddea6af1edc..a791689ceabc 100644 --- a/operator/pkg/tasks/deinit/cert.go +++ b/operator/pkg/tasks/deinit/cert.go @@ -22,6 +22,7 @@ import ( "k8s.io/klog/v2" + "github.com/karmada-io/karmada/operator/pkg/apis/operator/v1alpha1" "github.com/karmada-io/karmada/operator/pkg/constants" "github.com/karmada-io/karmada/operator/pkg/util" "github.com/karmada-io/karmada/operator/pkg/util/apiclient" @@ -29,16 +30,21 @@ import ( ) // NewCleanupCertTask init a task to cleanup certs -func NewCleanupCertTask() workflow.Task { +func NewCleanupCertTask(karmada *v1alpha1.Karmada) workflow.Task { + workflowTasks := []workflow.Task{ + newCleanupCertSubTask("karmada", util.KarmadaCertSecretName), + newCleanupCertSubTask("webhook", util.WebhookCertSecretName), + } + // Required only if local etcd is configured + if karmada.Spec.Components.Etcd.Local != nil { + cleanupEtcdCertTask := newCleanupCertSubTask("etcd", util.EtcdCertSecretName) + workflowTasks = append(workflowTasks, cleanupEtcdCertTask) + } return workflow.Task{ Name: "cleanup-cert", Run: runCleanupCert, RunSubTasks: true, - Tasks: []workflow.Task{ - newCleanupCertSubTask("karmada", util.KarmadaCertSecretName), - newCleanupCertSubTask("etcd", util.EtcdCertSecretName), - newCleanupCertSubTask("webhook", util.WebhookCertSecretName), - }, + Tasks: workflowTasks, } } diff --git a/operator/pkg/tasks/deinit/cert_test.go b/operator/pkg/tasks/deinit/cert_test.go new file mode 100644 index 000000000000..a183ad8c9f79 --- /dev/null +++ b/operator/pkg/tasks/deinit/cert_test.go @@ -0,0 +1,201 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tasks + +import ( + "context" + "fmt" + "strings" + "testing" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + fakeclientset "k8s.io/client-go/kubernetes/fake" + + "github.com/karmada-io/karmada/operator/pkg/apis/operator/v1alpha1" + "github.com/karmada-io/karmada/operator/pkg/constants" + "github.com/karmada-io/karmada/operator/pkg/util" + "github.com/karmada-io/karmada/operator/pkg/workflow" +) + +func TestNewCleanupCertTask(t *testing.T) { + tests := []struct { + name string + wantTask workflow.Task + }{ + { + name: "NewCleanupCertTask_IsCalled_ExpectedWorkflowTask", + wantTask: workflow.Task{ + Name: "cleanup-cert", + Run: runCleanupCert, + RunSubTasks: true, + Tasks: []workflow.Task{ + newCleanupCertSubTask("karmada", util.KarmadaCertSecretName), + newCleanupCertSubTask("webhook", util.WebhookCertSecretName), + newCleanupCertSubTask("etcd", util.EtcdCertSecretName), + }, + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + karmada := &v1alpha1.Karmada{ + Spec: v1alpha1.KarmadaSpec{ + Components: &v1alpha1.KarmadaComponents{ + Etcd: &v1alpha1.Etcd{ + Local: &v1alpha1.LocalEtcd{}, + }, + }, + }, + } + cleanupCertTask := NewCleanupCertTask(karmada) + if err := util.DeepEqualTasks(cleanupCertTask, test.wantTask); err != nil { + t.Errorf("unexpected error: %v", err) + } + }) + } +} + +func TestRunCleanupCert(t *testing.T) { + tests := []struct { + name string + runData workflow.RunData + wantErr bool + errMsg string + }{ + { + name: "RunCleanupCert_InvalidTypeAssertion_TypeAssertionIsInvalid", + runData: &MyTestData{Data: "test"}, + wantErr: true, + errMsg: "cleanup-cert task invoked with an invalid data struct", + }, + { + name: "RunCleanupCert_ValidTypeAssertion_TypeAssertionIsValid", + runData: &TestDeInitData{ + name: "karmada-demo", + namespace: "test", + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := runCleanupCert(test.runData) + if err == nil && test.wantErr { + t.Errorf("expected error, but got none") + } + if err != nil && !test.wantErr { + t.Errorf("unexpected error: %v", err) + } + if err != nil && test.wantErr && !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("expected %s error msg to contain %s", err.Error(), test.errMsg) + } + }) + } +} + +func TestRunCleanupCertSubTask(t *testing.T) { + tests := []struct { + name string + owner string + secretNameFunc util.Namefunc + runData workflow.RunData + prep func(workflow.RunData) error + verify func(workflow.RunData) error + wantErr bool + errMsg string + }{ + { + name: "RunCleanupCertSubTask_InvalidTypeAssertion_TypeAssertionIsInvalid", + owner: "karmada", + runData: &MyTestData{Data: "test"}, + prep: func(workflow.RunData) error { return nil }, + verify: func(workflow.RunData) error { return nil }, + wantErr: true, + errMsg: fmt.Sprintf("cleanup-%s-cert task invoked with an invalid data struct", "karmada"), + }, + { + name: "RunCleanupCertSubTask_WithKarmadaCertSecret_CertsHaveBeenCleanedUp", + owner: "karmada", + secretNameFunc: util.KarmadaCertSecretName, + prep: prepareKarmadaCertSecret, + verify: verifyKarmadaCertSecretDeleted, + runData: &TestDeInitData{ + name: "karmada-demo", + namespace: "test", + remoteClient: fakeclientset.NewSimpleClientset(), + }, + wantErr: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if err := test.prep(test.runData); err != nil { + t.Errorf("failed to prep the run cleanup cert subtask: %v", err) + } + cleanupCertSubTask := runCleanupCertSubTask(test.owner, test.secretNameFunc) + err := cleanupCertSubTask(test.runData) + if err == nil && test.wantErr { + t.Errorf("expected error, but got none") + } + if err != nil && !test.wantErr { + t.Errorf("unexpected error: %v", err) + } + if err != nil && test.wantErr && !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("expected %s error msg to contain %s", err.Error(), test.errMsg) + } + if err := test.verify(test.runData); err != nil { + t.Errorf("failed to verify run cleanup cert subtask: %v", err) + } + }) + } +} + +func prepareKarmadaCertSecret(rd workflow.RunData) error { + data := rd.(*TestDeInitData) + secretName := util.KarmadaCertSecretName(data.name) + _, err := data.remoteClient.CoreV1().Secrets(data.namespace).Create(context.TODO(), &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: data.namespace, + Labels: constants.KarmadaOperatorLabel, + }, + Type: corev1.SecretTypeOpaque, + }, metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("failed to create secret: %v", err) + } + return nil +} + +func verifyKarmadaCertSecretDeleted(rd workflow.RunData) error { + data := rd.(*TestDeInitData) + secretName := util.KarmadaCertSecretName(data.name) + secret, err := data.remoteClient.CoreV1().Secrets(data.namespace).Get(context.TODO(), secretName, metav1.GetOptions{}) + if err == nil { + if val, exists := secret.Labels[constants.KarmadaOperatorLabelKeyName]; exists && val != "" { + return fmt.Errorf("expected secret %s to be deleted, but it still exists with label %s=%s", secretName, constants.KarmadaOperatorLabelKeyName, val) + } + return fmt.Errorf("expected secret %s to be deleted, but it still exists", secretName) + } + if apierrors.IsNotFound(err) { + return nil + } + return err +} diff --git a/operator/pkg/tasks/deinit/component.go b/operator/pkg/tasks/deinit/component.go index 776a3b8ef433..3c845e130b2e 100644 --- a/operator/pkg/tasks/deinit/component.go +++ b/operator/pkg/tasks/deinit/component.go @@ -22,6 +22,7 @@ import ( "k8s.io/klog/v2" + "github.com/karmada-io/karmada/operator/pkg/apis/operator/v1alpha1" "github.com/karmada-io/karmada/operator/pkg/constants" "github.com/karmada-io/karmada/operator/pkg/util" "github.com/karmada-io/karmada/operator/pkg/util/apiclient" @@ -29,26 +30,31 @@ import ( ) // NewRemoveComponentTask init a remove karmada components task -func NewRemoveComponentTask() workflow.Task { +func NewRemoveComponentTask(karmada *v1alpha1.Karmada) workflow.Task { + workflowTasks := []workflow.Task{ + newRemoveComponentWithServiceSubTask(constants.KarmadaMetricsAdapterComponent, util.KarmadaMetricsAdapterName), + newRemoveComponentSubTask(constants.KarmadaDeschedulerComponent, util.KarmadaDeschedulerName), + newRemoveComponentSubTask(constants.KarmadaSchedulerComponent, util.KarmadaSchedulerName), + newRemoveComponentSubTask(constants.KarmadaControllerManagerComponent, util.KarmadaControllerManagerName), + newRemoveComponentSubTask(constants.KubeControllerManagerComponent, util.KubeControllerManagerName), + newRemoveComponentWithServiceSubTask(constants.KarmadaWebhookComponent, util.KarmadaWebhookName), + newRemoveComponentWithServiceSubTask(constants.KarmadaSearchComponent, util.KarmadaSearchName), + newRemoveComponentWithServiceSubTask(constants.KarmadaAggregatedAPIServerComponent, util.KarmadaAggregatedAPIServerName), + newRemoveComponentWithServiceSubTask(constants.KarmadaAPIserverComponent, util.KarmadaAPIServerName), + } + // Required only if local etcd is configured + if karmada.Spec.Components.Etcd.Local != nil { + removeEtcdTask := workflow.Task{ + Name: "remove-etcd", + Run: runRemoveEtcd, + } + workflowTasks = append(workflowTasks, removeEtcdTask) + } return workflow.Task{ Name: "remove-component", Run: runRemoveComponent, RunSubTasks: true, - Tasks: []workflow.Task{ - newRemoveComponentWithServiceSubTask(constants.KarmadaMetricsAdapterComponent, util.KarmadaMetricsAdapterName), - newRemoveComponentSubTask(constants.KarmadaDeschedulerComponent, util.KarmadaDeschedulerName), - newRemoveComponentSubTask(constants.KarmadaSchedulerComponent, util.KarmadaSchedulerName), - newRemoveComponentSubTask(constants.KarmadaControllerManagerComponent, util.KarmadaControllerManagerName), - newRemoveComponentSubTask(constants.KubeControllerManagerComponent, util.KubeControllerManagerName), - newRemoveComponentWithServiceSubTask(constants.KarmadaWebhookComponent, util.KarmadaWebhookName), - newRemoveComponentWithServiceSubTask(constants.KarmadaSearchComponent, util.KarmadaSearchName), - newRemoveComponentWithServiceSubTask(constants.KarmadaAggregatedAPIServerComponent, util.KarmadaAggregatedAPIServerName), - newRemoveComponentWithServiceSubTask(constants.KarmadaAPIserverComponent, util.KarmadaAPIServerName), - { - Name: "remove-etcd", - Run: runRemoveEtcd, - }, - }, + Tasks: workflowTasks, } } @@ -83,10 +89,10 @@ func runRemoveComponentSubTask(component string, workloadNameFunc util.Namefunc, return fmt.Errorf("remove-%s task invoked with an invalid data struct", component) } - // Although we found the workload by name, we cannot be sure that the - // workload was created by the karmada operator. if the workload exists the - // label "app.kubernetes.io/managed-by": "karmada-operator", we think it - // must be created by karmada operator. + // Even though we found the workload by name, we can't be certain that it was + // created by the Karmada operator. If the workload has the label + // "app.kubernetes.io/managed-by": "karmada-operator", we can assume it was + // created by the Karmada operator. err := apiclient.DeleteDeploymentIfHasLabels( data.RemoteClient(), workloadNameFunc(data.GetName()), diff --git a/operator/pkg/tasks/deinit/component_test.go b/operator/pkg/tasks/deinit/component_test.go new file mode 100644 index 000000000000..bc2985f4a5c2 --- /dev/null +++ b/operator/pkg/tasks/deinit/component_test.go @@ -0,0 +1,261 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tasks + +import ( + "context" + "fmt" + "strings" + "testing" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + fakeclientset "k8s.io/client-go/kubernetes/fake" + + "github.com/karmada-io/karmada/operator/pkg/constants" + "github.com/karmada-io/karmada/operator/pkg/util" + "github.com/karmada-io/karmada/operator/pkg/util/apiclient" + "github.com/karmada-io/karmada/operator/pkg/workflow" + "github.com/karmada-io/karmada/test/helper" +) + +func TestRunRemoveComponent(t *testing.T) { + tests := []struct { + name string + runData workflow.RunData + wantErr bool + errMsg string + }{ + { + name: "RunRemoveComponent_InvalidTypeAssertion_TypeAssertionIsInvalid", + runData: &MyTestData{Data: "test"}, + wantErr: true, + errMsg: "remove-component task invoked with an invalid data struct", + }, + { + name: "RunRemoveComponent_ValidTypeAssertion_TypeAssertionIsValid", + runData: &TestDeInitData{ + name: "karmada-demo", + namespace: "test", + }, + wantErr: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := runRemoveComponent(test.runData) + if err == nil && test.wantErr { + t.Error("expected an error, but got none") + } + if err != nil && !test.wantErr { + t.Errorf("unexpected error, got: %v", err) + } + if err != nil && test.wantErr && !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("expected error message %s to be in %s", test.errMsg, err.Error()) + } + }) + } +} + +func TestRunRemoveComponentSubTask(t *testing.T) { + name, namespace := "karmada-demo", "test" + tests := []struct { + name string + component string + runData workflow.RunData + workloadNameFunc util.Namefunc + deployment *appsv1.Deployment + service *corev1.Service + prep func(workflow.RunData, *appsv1.Deployment, *corev1.Service) error + verify func(workflow.RunData, *appsv1.Deployment, *corev1.Service) error + hasService bool + wantErr bool + errMsg string + }{ + { + name: "RunRemoveComponentSubTask_InvalidTypeAssertion_TypeAssertionIsInvalid", + component: constants.KarmadaControllerManager, + runData: &MyTestData{Data: "test"}, + prep: func(workflow.RunData, *appsv1.Deployment, *corev1.Service) error { return nil }, + verify: func(workflow.RunData, *appsv1.Deployment, *corev1.Service) error { return nil }, + wantErr: true, + errMsg: fmt.Sprintf("remove-%s task invoked with an invalid data struct", constants.KarmadaControllerManager), + }, + { + name: "RunRemoveComponentSubTask_DeleteKarmadaControllerManagerDeploymentWithSecret_DeploymentAndSecretDeleted", + component: constants.KarmadaControllerManager, + workloadNameFunc: util.KarmadaControllerManagerName, + deployment: helper.NewDeployment(namespace, util.KarmadaControllerManagerName(name)), + service: helper.NewService(namespace, util.KarmadaControllerManagerName(name), corev1.ServiceTypeClusterIP), + prep: func(rd workflow.RunData, d *appsv1.Deployment, s *corev1.Service) error { + data := rd.(*TestDeInitData) + client := data.RemoteClient() + + // Create Karmada Controller Manager deployment with given labels. + d.Labels = constants.KarmadaOperatorLabel + if err := apiclient.CreateOrUpdateDeployment(client, d); err != nil { + return fmt.Errorf("failed to create deployment, got: %v", err) + } + + // Create Karmada Controller Manager service with given labels. + s.Labels = constants.KarmadaOperatorLabel + if err := apiclient.CreateOrUpdateService(client, s); err != nil { + return fmt.Errorf("failed to create service, got: %v", err) + } + + return nil + }, + verify: func(rd workflow.RunData, d *appsv1.Deployment, s *corev1.Service) error { + data := rd.(*TestDeInitData) + client := data.RemoteClient() + + // Verify that the Karmada Controller Manager deployment is deleted. + _, err := client.AppsV1().Deployments(d.GetNamespace()).Get(context.TODO(), d.GetName(), metav1.GetOptions{}) + if err == nil { + return fmt.Errorf("expected deployment to be deleted, but got err: %v", err) + } + + // Verify that the Karmada Controller Manager service is deleted. + _, err = client.CoreV1().Services(s.GetNamespace()).Get(context.TODO(), s.GetName(), metav1.GetOptions{}) + if err == nil { + return fmt.Errorf("expected service to be deleted, but got err: %v", err) + } + + return nil + }, + runData: &TestDeInitData{ + name: name, + namespace: namespace, + remoteClient: fakeclientset.NewSimpleClientset(), + }, + hasService: true, + wantErr: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if err := test.prep(test.runData, test.deployment, test.service); err != nil { + t.Errorf("failed to prep before removing component subtask %s, got: %v", test.component, err) + } + runRemoveComponentSubTask := runRemoveComponentSubTask(test.component, test.workloadNameFunc, true) + err := runRemoveComponentSubTask(test.runData) + if err == nil && test.wantErr { + t.Error("expected an error, but got none") + } + if err != nil && !test.wantErr { + t.Errorf("unexpected error, got: %v", err) + } + if err != nil && test.wantErr && !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("expected error message %s to be in %s", test.errMsg, err.Error()) + } + if err := test.verify(test.runData, test.deployment, test.service); err != nil { + t.Errorf("failed to verify the deletion of deployments and services for %s component, got: %v", test.component, err) + } + }) + } +} + +func TestRunRemoveEtcd(t *testing.T) { + name, namespace := "karmada-demo", "test" + tests := []struct { + name string + runData workflow.RunData + statefulset *appsv1.StatefulSet + service *corev1.Service + prep func(workflow.RunData, *appsv1.StatefulSet, *corev1.Service) error + verify func(workflow.RunData, *appsv1.StatefulSet, *corev1.Service) error + wantErr bool + errMsg string + }{ + { + name: "RunRemoveEtcd_InvalidTypeAssertion_TypeAssertionIsInvalid", + runData: &MyTestData{Data: "test"}, + prep: func(workflow.RunData, *appsv1.StatefulSet, *corev1.Service) error { return nil }, + verify: func(workflow.RunData, *appsv1.StatefulSet, *corev1.Service) error { return nil }, + wantErr: true, + errMsg: "remove-etcd task invoked with an invalid data struct", + }, + { + name: "RunRemoveEtcd_DeleteEtcdStatefulSetWithService_StatefulSetAndServiceDeleted", + statefulset: helper.NewStatefulSet(namespace, util.KarmadaEtcdName(name)), + service: helper.NewService(namespace, util.KarmadaEtcdName(name), corev1.ServiceTypeClusterIP), + prep: func(rd workflow.RunData, staetfulset *appsv1.StatefulSet, service *corev1.Service) error { + data := rd.(*TestDeInitData) + client := data.RemoteClient() + + // Create Etcd statefulset with given labels. + staetfulset.Labels = constants.KarmadaOperatorLabel + if err := apiclient.CreateOrUpdateStatefulSet(client, staetfulset); err != nil { + return fmt.Errorf("failed to create statefulset, got: %v", err) + } + + // Create Etcd service with given labels. + service.Labels = constants.KarmadaOperatorLabel + if err := apiclient.CreateOrUpdateService(client, service); err != nil { + return fmt.Errorf("failed to create service, got: %v", err) + } + + return nil + }, + verify: func(rd workflow.RunData, statefulset *appsv1.StatefulSet, service *corev1.Service) error { + data := rd.(*TestDeInitData) + client := data.RemoteClient() + + // Verify that the Etcd statefulset is deleted. + _, err := client.AppsV1().StatefulSets(statefulset.GetNamespace()).Get(context.TODO(), statefulset.GetName(), metav1.GetOptions{}) + if err == nil { + return fmt.Errorf("expected statefulset to be deleted, but got err: %v", err) + } + + // Verify that the Etcd service is deleted. + _, err = client.CoreV1().Services(service.GetNamespace()).Get(context.TODO(), service.GetName(), metav1.GetOptions{}) + if err == nil { + return fmt.Errorf("expected service to be deleted, but got err: %v", err) + } + + return nil + }, + runData: &TestDeInitData{ + name: name, + namespace: namespace, + remoteClient: fakeclientset.NewSimpleClientset(), + }, + wantErr: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if err := test.prep(test.runData, test.statefulset, test.service); err != nil { + t.Errorf("failed to prep before removing etcd, got: %v", err) + } + err := runRemoveEtcd(test.runData) + if err == nil && test.wantErr { + t.Error("expected an error, but got none") + } + if err != nil && !test.wantErr { + t.Errorf("unexpected error, got: %v", err) + } + if err != nil && test.wantErr && !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("expected error message %s to be in %s", test.errMsg, err.Error()) + } + if err := test.verify(test.runData, test.statefulset, test.service); err != nil { + t.Errorf("failed to verify the deletion of statefulsets and services for etcd component, got: %v", err) + } + }) + } +} diff --git a/operator/pkg/tasks/deinit/kubeconfig_test.go b/operator/pkg/tasks/deinit/kubeconfig_test.go new file mode 100644 index 000000000000..96f54ad870c7 --- /dev/null +++ b/operator/pkg/tasks/deinit/kubeconfig_test.go @@ -0,0 +1,125 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tasks + +import ( + "context" + "fmt" + "strings" + "testing" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + fakeclientset "k8s.io/client-go/kubernetes/fake" + + "github.com/karmada-io/karmada/operator/pkg/constants" + "github.com/karmada-io/karmada/operator/pkg/util" + "github.com/karmada-io/karmada/operator/pkg/util/apiclient" + "github.com/karmada-io/karmada/operator/pkg/workflow" + "github.com/karmada-io/karmada/test/helper" +) + +func TestNewCleanupKubeconfigTask(t *testing.T) { + tests := []struct { + name string + wantTask *workflow.Task + }{ + { + name: "NewCleanupKubeconfigTask", + wantTask: &workflow.Task{ + Name: "cleanup-kubeconfig", + Run: runCleanupKubeconfig, + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + cleanupKubeconfigTask := NewCleanupKubeconfigTask() + if err := util.DeepEqualTasks(cleanupKubeconfigTask, *test.wantTask); err != nil { + t.Errorf("unexpected error, got: %v", err) + } + }) + } +} + +func TestRunCleanupKubeconfig(t *testing.T) { + name, namespace := "karmada-demo", "test" + tests := []struct { + name string + runData workflow.RunData + secret *corev1.Secret + prep func(workflow.RunData, *corev1.Secret) error + verify func(rd workflow.RunData, s *corev1.Secret) error + wantErr bool + errMsg string + }{ + { + name: "RunCleanupKubeconfig_InvalidTypeAssertion_TypeAssertionIsInvalid", + runData: &MyTestData{Data: "test"}, + prep: func(workflow.RunData, *corev1.Secret) error { return nil }, + verify: func(workflow.RunData, *corev1.Secret) error { return nil }, + wantErr: true, + errMsg: "cleanup-kubeconfig task invoked with an invalid data struct", + }, + { + name: "RunCleanupKubeconfig_DeleteSecretWithKarmadaOperatorLabel_SecretDeleted", + runData: &TestDeInitData{ + name: name, + namespace: namespace, + remoteClient: fakeclientset.NewSimpleClientset(), + }, + secret: helper.NewSecret(namespace, util.AdminKubeconfigSecretName(name), map[string][]byte{}), + prep: func(rd workflow.RunData, s *corev1.Secret) error { + data := rd.(*TestDeInitData) + s.Labels = constants.KarmadaOperatorLabel + if err := apiclient.CreateOrUpdateSecret(data.RemoteClient(), s); err != nil { + return fmt.Errorf("failed to create secret, got err: %v", err) + } + return nil + }, + verify: func(rd workflow.RunData, s *corev1.Secret) error { + data := rd.(*TestDeInitData) + _, err := data.RemoteClient().CoreV1().Secrets(s.GetNamespace()).Get(context.TODO(), s.GetName(), metav1.GetOptions{}) + if err == nil { + return fmt.Errorf("expected secret to be deleted, but got err: %v", err) + } + return nil + }, + wantErr: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if err := test.prep(test.runData, test.secret); err != nil { + t.Errorf("failed to prep the test env before cleaning the kubeconfig, got error: %v", err) + } + err := runCleanupKubeconfig(test.runData) + if err == nil && test.wantErr { + t.Error("expected an error, but got none") + } + if err != nil && !test.wantErr { + t.Errorf("unexpected error, got: %v", err) + } + if err != nil && test.wantErr && !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("expected error message %s to be in %s", test.errMsg, err.Error()) + } + if err := test.verify(test.runData, test.secret); err != nil { + t.Errorf("failed to verify the deletion of secret, got error: %v", err) + } + }) + } +} diff --git a/operator/pkg/tasks/deinit/test_helpers.go b/operator/pkg/tasks/deinit/test_helpers.go new file mode 100644 index 000000000000..1fe8dd754660 --- /dev/null +++ b/operator/pkg/tasks/deinit/test_helpers.go @@ -0,0 +1,62 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tasks + +import ( + clientset "k8s.io/client-go/kubernetes" +) + +// TestInterface defines the interface for retrieving test data. +type TestInterface interface { + // Get returns the data from the test instance. + Get() string +} + +// MyTestData is a struct that implements the TestInterface. +type MyTestData struct { + Data string +} + +// Get returns the data stored in the MyTestData struct. +func (m *MyTestData) Get() string { + return m.Data +} + +// TestDeInitData contains the configuration and state required to deinitialize Karmada components. +type TestDeInitData struct { + name string + namespace string + remoteClient clientset.Interface +} + +// Ensure TestDeInitData implements InitData interface at compile time. +var _ DeInitData = &TestDeInitData{} + +// GetName returns the name of the current Karmada installation. +func (t *TestDeInitData) GetName() string { + return t.name +} + +// GetNamespace returns the namespace of the current Karmada installation. +func (t *TestDeInitData) GetNamespace() string { + return t.namespace +} + +// RemoteClient returns the Kubernetes client for remote interactions. +func (t *TestDeInitData) RemoteClient() clientset.Interface { + return t.remoteClient +} diff --git a/operator/pkg/tasks/init/apiserver_test.go b/operator/pkg/tasks/init/apiserver_test.go new file mode 100644 index 000000000000..930bb04b2dd8 --- /dev/null +++ b/operator/pkg/tasks/init/apiserver_test.go @@ -0,0 +1,463 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tasks + +import ( + "fmt" + "strings" + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + fakeclientset "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/rest" + "k8s.io/utils/ptr" + + operatorv1alpha1 "github.com/karmada-io/karmada/operator/pkg/apis/operator/v1alpha1" + "github.com/karmada-io/karmada/operator/pkg/constants" + "github.com/karmada-io/karmada/operator/pkg/util" + "github.com/karmada-io/karmada/operator/pkg/util/apiclient" + "github.com/karmada-io/karmada/operator/pkg/workflow" +) + +func TestNewAPIServerTask(t *testing.T) { + tests := []struct { + name string + wantTask workflow.Task + }{ + { + name: "NewKarmadaApiserverTask_IsCalled_ExpectedWorkflowTask", + wantTask: workflow.Task{ + Name: "apiserver", + Run: runApiserver, + RunSubTasks: true, + Tasks: []workflow.Task{ + { + Name: constants.KarmadaAPIserverComponent, + Run: runKarmadaAPIServer, + }, + { + Name: fmt.Sprintf("%s-%s", "wait", constants.KarmadaAPIserverComponent), + Run: runWaitKarmadaAPIServer, + }, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + karmadaAPIServerTask := NewKarmadaApiserverTask() + err := util.DeepEqualTasks(karmadaAPIServerTask, test.wantTask) + if err != nil { + t.Errorf("unexpected error, got %v", err) + } + }) + } +} + +func TestNewKarmadaAggregatedApiserverTask(t *testing.T) { + tests := []struct { + name string + wantTask workflow.Task + }{ + { + name: "NewKarmadaAggregatedApiserverTask_IsCalled_ExpectedWorkflowTask", + wantTask: workflow.Task{ + Name: "aggregated-apiserver", + Run: runAggregatedApiserver, + RunSubTasks: true, + Tasks: []workflow.Task{ + { + Name: constants.KarmadaAggregatedAPIServerComponent, + Run: runKarmadaAggregatedAPIServer, + }, + { + Name: fmt.Sprintf("%s-%s", "wait", constants.KarmadaAggregatedAPIServerComponent), + Run: runWaitKarmadaAggregatedAPIServer, + }, + }, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + karmadaAggregatedAPIServerTask := NewKarmadaAggregatedApiserverTask() + err := util.DeepEqualTasks(karmadaAggregatedAPIServerTask, test.wantTask) + if err != nil { + t.Errorf("unexpected error, got %v", err) + } + }) + } +} + +func TestRunAggregatedAPIServer(t *testing.T) { + tests := []struct { + name string + runData workflow.RunData + wantErr bool + errMsg string + }{ + { + name: "RunAggregatedApiserver_InvalidTypeAssertion_TypeAssertionFailed", + runData: MyTestData{Data: "test"}, + wantErr: true, + errMsg: "aggregated-apiserver task invoked with an invalid data struct", + }, + { + name: "RunAggregatedApiserver_ValidTypeAssertion_TypeAssertionSuceeded", + runData: &TestInitData{}, + wantErr: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := runAggregatedApiserver(test.runData) + if err == nil && test.wantErr { + t.Errorf("expected error, but got none") + } + if err != nil && !test.wantErr { + t.Errorf("unexpected error: %v", err) + } + if err != nil && test.wantErr && !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("expected %s error msg to contain %s", err.Error(), test.errMsg) + } + }) + } +} + +func TestRunAPIServer(t *testing.T) { + tests := []struct { + name string + runData workflow.RunData + wantErr bool + errMsg string + }{ + { + name: "RunAPIServer_InvalidTypeAssertion_TypeAssertionFailed", + runData: MyTestData{Data: "test"}, + wantErr: true, + errMsg: "apiserver task invoked with an invalid data struct", + }, + { + name: "RunAPIServer_ValidTypeAssertion_TypeAssertionSuceeded", + runData: &TestInitData{}, + wantErr: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := runApiserver(test.runData) + if err == nil && test.wantErr { + t.Errorf("expected error, but got none") + } + if err != nil && !test.wantErr { + t.Errorf("unexpected error: %v", err) + } + if err != nil && test.wantErr && !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("expected %s error msg to contain %s", err.Error(), test.errMsg) + } + }) + } +} + +func TestRunKarmadaAPIServer(t *testing.T) { + tests := []struct { + name string + runData workflow.RunData + wantErr bool + errMsg string + }{ + { + name: "RunKarmadaAPIServer_InvalidTypeAssertion_TypeAssertionFailed", + runData: &MyTestData{Data: "test"}, + wantErr: true, + errMsg: "KarmadaApiserver task invoked with an invalid data struct", + }, + { + name: "RunKarmadaAPIServer_NilKarmadaAPIServer_RunIsCompletedWithoutErrors", + runData: &TestInitData{ + ComponentsUnits: &operatorv1alpha1.KarmadaComponents{ + Etcd: &operatorv1alpha1.Etcd{ + Local: &operatorv1alpha1.LocalEtcd{}, + }, + }, + }, + wantErr: false, + }, + { + name: "RunKarmadaAPIServer_InitializeKarmadaAPIServer_KarmadaAPIServerEnsured", + runData: &TestInitData{ + Name: "karmada-demo", + Namespace: "test", + ComponentsUnits: &operatorv1alpha1.KarmadaComponents{ + KarmadaAPIServer: &operatorv1alpha1.KarmadaAPIServer{ + CommonSettings: operatorv1alpha1.CommonSettings{ + Image: operatorv1alpha1.Image{ImageTag: "karmada-apiserver-image"}, + Replicas: ptr.To[int32](2), + Resources: corev1.ResourceRequirements{}, + ImagePullPolicy: corev1.PullIfNotPresent, + }, + ServiceSubnet: ptr.To("10.96.0.0/12"), + }, + Etcd: &operatorv1alpha1.Etcd{ + Local: &operatorv1alpha1.LocalEtcd{}, + }, + }, + RemoteClientConnector: fakeclientset.NewSimpleClientset(), + FeatureGatesOptions: map[string]bool{ + "Feature1": true, + }, + }, + wantErr: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := runKarmadaAPIServer(test.runData) + if err == nil && test.wantErr { + t.Errorf("expected error, but got none") + } + if err != nil && !test.wantErr { + t.Errorf("unexpected error: %v", err) + } + if err != nil && test.wantErr && !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("expected %s error msg to contain %s", err.Error(), test.errMsg) + } + }) + } +} + +func TestRunWaitKarmadaAPIServer(t *testing.T) { + tests := []struct { + name string + runData workflow.RunData + prep func(workflow.RunData) error + wantErr bool + errMsg string + }{ + { + name: "RunWaitKarmadaAPIServer_InvalidTypeAssertion_TypeAssertionFailed", + runData: &MyTestData{Data: "test"}, + prep: func(workflow.RunData) error { return nil }, + wantErr: true, + errMsg: "wait-KarmadaAPIServer task invoked with an invalid data struct", + }, + { + name: "RunWaitKarmadaAPIServer_TimeoutWaitingForSomeKarmadaAPIServersPods_Timeout", + runData: &TestInitData{ + Name: "karmada-demo", + Namespace: "test", + RemoteClientConnector: fakeclientset.NewSimpleClientset(), + ControlplaneConfigREST: &rest.Config{}, + FeatureGatesOptions: map[string]bool{ + "Feature1": true, + }, + }, + prep: func(workflow.RunData) error { + componentBeReadyTimeout = time.Second + return nil + }, + wantErr: true, + errMsg: "waiting for karmada-apiserver to ready timeout", + }, + { + name: "RunWaitKarmadaAPIServer_WaitingForSomeKarmadaAPIServersPods_KarmadaAPIServerIsReady", + runData: &TestInitData{ + Name: "karmada-demo", + Namespace: "test", + RemoteClientConnector: fakeclientset.NewSimpleClientset(), + ControlplaneConfigREST: &rest.Config{}, + FeatureGatesOptions: map[string]bool{ + "Feature1": true, + }, + }, + prep: func(rd workflow.RunData) error { + data := rd.(*TestInitData) + _, err := apiclient.CreatePods(data.RemoteClient(), data.Namespace, util.KarmadaAPIServerName(data.Name), 2, karmadaApiserverLabels, true) + return err + }, + wantErr: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if err := test.prep(test.runData); err != nil { + t.Errorf("failed to prep waiting for Karmada APIServer: %v", err) + } + err := runWaitKarmadaAPIServer(test.runData) + if err == nil && test.wantErr { + t.Errorf("expected error, but got none") + } + if err != nil && !test.wantErr { + t.Errorf("unexpected error: %v", err) + } + if err != nil && test.wantErr && !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("expected %s error msg to contain %s", err.Error(), test.errMsg) + } + }) + } +} + +func TestRunKarmadaAggregatedAPIServer(t *testing.T) { + tests := []struct { + name string + runData workflow.RunData + prep func() error + wantErr bool + errMsg string + }{ + { + name: "RunKarmadaAggregatedAPIServer_InvalidTypeAssertion_TypeAssertionFailed", + runData: &MyTestData{Data: "test"}, + prep: func() error { return nil }, + wantErr: true, + errMsg: "KarmadaAggregatedAPIServer task invoked with an invalid data struct", + }, + { + name: "RunKarmadaAggregatedAPIServer_NilKarmadaAggregatedAPIServer_RunIsCompletedWithoutErrors", + runData: &TestInitData{ + ComponentsUnits: &operatorv1alpha1.KarmadaComponents{ + Etcd: &operatorv1alpha1.Etcd{ + Local: &operatorv1alpha1.LocalEtcd{}, + }, + }, + }, + prep: func() error { return nil }, + wantErr: false, + }, + { + name: "RunKarmadaAggregatedAPIServer_InitializeKarmadaAggregatedAPIServer_KarmadaAggregatedAPIServerEnsured", + runData: &TestInitData{ + Name: "karmada-demo", + Namespace: "test", + ComponentsUnits: &operatorv1alpha1.KarmadaComponents{ + KarmadaAggregatedAPIServer: &operatorv1alpha1.KarmadaAggregatedAPIServer{ + CommonSettings: operatorv1alpha1.CommonSettings{ + Image: operatorv1alpha1.Image{ImageTag: "karmada-aggregated-apiserver-image"}, + Replicas: ptr.To[int32](2), + Resources: corev1.ResourceRequirements{}, + ImagePullPolicy: corev1.PullIfNotPresent, + }, + }, + Etcd: &operatorv1alpha1.Etcd{ + Local: &operatorv1alpha1.LocalEtcd{}, + }, + }, + RemoteClientConnector: fakeclientset.NewSimpleClientset(), + FeatureGatesOptions: map[string]bool{ + "Feature1": true, + }, + }, + wantErr: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := runKarmadaAggregatedAPIServer(test.runData) + if err == nil && test.wantErr { + t.Errorf("expected error, but got none") + } + if err != nil && !test.wantErr { + t.Errorf("unexpected error: %v", err) + } + if err != nil && test.wantErr && !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("expected %s error msg to contain %s", err.Error(), test.errMsg) + } + }) + } +} + +func TestRunWaitKarmadaAggregatedAPIServer(t *testing.T) { + tests := []struct { + name string + runData workflow.RunData + prep func(workflow.RunData) error + wantErr bool + errMsg string + }{ + { + name: "RunWaitKarmadaAggregatedAPIServer_InvalidTypeAssertion_TypeAssertionFailed", + runData: &MyTestData{Data: "test"}, + prep: func(workflow.RunData) error { + return nil + }, + wantErr: true, + errMsg: "wait-KarmadaAggregatedAPIServer task invoked with an invalid data struct", + }, + { + name: "RunWaitKarmadaAggregatedAPIServer_TimeoutWaitingForSomeKarmadaAggregatedAPIServersPods_Timeout", + runData: &TestInitData{ + Name: "karmada-demo", + Namespace: "test", + RemoteClientConnector: fakeclientset.NewSimpleClientset(), + ControlplaneConfigREST: &rest.Config{}, + FeatureGatesOptions: map[string]bool{ + "Feature1": true, + }, + }, + prep: func(workflow.RunData) error { + componentBeReadyTimeout = time.Second + return nil + }, + wantErr: true, + errMsg: "waiting for karmada-aggregated-apiserver to ready timeout", + }, + { + name: "RunWaitKarmadaAggregatedAPIServer_WaitingForSomeKarmadaAggregatedAPIServersPods_KarmadaAggregatedAPIServerIsReady", + runData: &TestInitData{ + Name: "karmada-demo", + Namespace: "test", + RemoteClientConnector: fakeclientset.NewSimpleClientset(), + ControlplaneConfigREST: &rest.Config{}, + FeatureGatesOptions: map[string]bool{ + "Feature1": true, + }, + }, + prep: func(rd workflow.RunData) error { + data := rd.(*TestInitData) + _, err := apiclient.CreatePods(data.RemoteClient(), data.Namespace, util.KarmadaAggregatedAPIServerName(data.Name), 2, karmadaAggregatedAPIServerLabels, true) + return err + }, + wantErr: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if err := test.prep(test.runData); err != nil { + t.Errorf("failed to prep waiting for Karmada Aggregated APIServer: %v", err) + } + err := runWaitKarmadaAggregatedAPIServer(test.runData) + if err == nil && test.wantErr { + t.Errorf("expected error, but got none") + } + if err != nil && !test.wantErr { + t.Errorf("unexpected error: %v", err) + } + if err != nil && test.wantErr && !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("expected %s error msg to contain %s", err.Error(), test.errMsg) + } + }) + } +} diff --git a/operator/pkg/tasks/init/cert.go b/operator/pkg/tasks/init/cert.go index 3530df0884e4..f48dfe1da72c 100644 --- a/operator/pkg/tasks/init/cert.go +++ b/operator/pkg/tasks/init/cert.go @@ -127,7 +127,7 @@ func runCertTask(cc, caCert *certs.CertConfig) func(d workflow.RunData) error { } if cc.CAName != caCert.Name { - return fmt.Errorf("expected CAname for %s, but was %s", cc.CAName, cc.Name) + return fmt.Errorf("mismatched CA name: expected %s but got %s", cc.CAName, caCert.Name) } if err := mutateCertConfig(data, cc); err != nil { diff --git a/operator/pkg/tasks/init/cert_test.go b/operator/pkg/tasks/init/cert_test.go new file mode 100644 index 000000000000..b66a01707c47 --- /dev/null +++ b/operator/pkg/tasks/init/cert_test.go @@ -0,0 +1,427 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tasks + +import ( + "context" + "crypto/x509" + "fmt" + "strings" + "testing" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + fakeclientset "k8s.io/client-go/kubernetes/fake" + + "github.com/karmada-io/karmada/operator/pkg/apis/operator/v1alpha1" + "github.com/karmada-io/karmada/operator/pkg/certs" + "github.com/karmada-io/karmada/operator/pkg/constants" + "github.com/karmada-io/karmada/operator/pkg/util" + "github.com/karmada-io/karmada/operator/pkg/workflow" +) + +func TestNewCertTask(t *testing.T) { + tests := []struct { + name string + wantTask workflow.Task + }{ + { + name: "TestNewCertTask_IsCalled_ExpectedWorkflowTask", + wantTask: workflow.Task{ + Name: "Certs", + Run: runCerts, + Skip: skipCerts, + RunSubTasks: true, + Tasks: newCertSubTasks(), + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + certTask := NewCertTask() + err := util.DeepEqualTasks(certTask, test.wantTask) + if err != nil { + t.Errorf("unexpected error, got %v", err) + } + }) + } +} + +func TestRunCerts(t *testing.T) { + tests := []struct { + name string + runData workflow.RunData + wantErr bool + errMsg string + }{ + { + name: "RunCerts_InvalidTypeAssertion_TypeAssertionFailed", + runData: MyTestData{Data: "test"}, + wantErr: true, + errMsg: "certs task invoked with an invalid data struct", + }, + { + name: "RunCerts_ValidTypeAssertion_TypeAssertionSuceeded", + runData: &TestInitData{}, + wantErr: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := runCerts(test.runData) + if err == nil && test.wantErr { + t.Errorf("expected error, but got none") + } + if err != nil && !test.wantErr { + t.Errorf("unexpected error: %v", err) + } + if err != nil && test.wantErr && !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("expected %s error msg to contain %s", err.Error(), test.errMsg) + } + }) + } +} + +func TestSkipCerts(t *testing.T) { + client := fakeclientset.NewSimpleClientset() + tests := []struct { + name string + runData workflow.RunData + prep func() error + cleanup func() error + wantErr bool + wantSkip bool + errMsg string + }{ + { + name: "SkipCerts_InvalidTypeAssertion_TypeAssertionFailed", + runData: MyTestData{Data: "test"}, + prep: func() error { return nil }, + cleanup: func() error { return nil }, + wantErr: true, + wantSkip: false, + errMsg: "certs task invoked with an invalid data struct", + }, + { + name: "SkipCerts_ValidTypeAssertion_TypeAssertionSuceeded", + runData: &TestInitData{ + Name: "karmada-demo", + Namespace: "test", + RemoteClientConnector: client, + }, + prep: func() error { return nil }, + cleanup: func() error { return nil }, + wantErr: false, + wantSkip: false, + }, + { + name: "SkipCerts_WithEmptySecretData_ErrorReturned", + runData: &TestInitData{ + Name: "karmada-demo", + Namespace: "test", + RemoteClientConnector: client, + }, + prep: func() error { + _, err := client.CoreV1().Secrets("test").Create( + context.TODO(), &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: util.KarmadaCertSecretName("karmada-demo"), + Namespace: "test", + }, + Data: map[string][]byte{}, + }, + metav1.CreateOptions{}, + ) + return err + }, + cleanup: func() error { + err := client.CoreV1().Secrets("test").Delete( + context.TODO(), util.KarmadaCertSecretName("karmada-demo"), + metav1.DeleteOptions{}, + ) + if err != nil { + return fmt.Errorf("failed to delete %s secret", "test") + } + return nil + }, + wantErr: true, + wantSkip: false, + }, + { + name: "SkipCerts_SecertCertDataExist_CertsSkipped", + runData: &TestInitData{ + Name: "karmada-demo", + Namespace: "test", + RemoteClientConnector: client, + }, + prep: func() error { + var err error + _, err = client.CoreV1().Secrets("test").Create( + context.TODO(), &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: util.KarmadaCertSecretName("karmada-demo"), + Namespace: "test", + }, + Data: map[string][]byte{ + "ca.crt": []byte("ca-crt-data"), + "ca.key": []byte("ca-key-data"), + }, + }, + metav1.CreateOptions{}, + ) + return err + }, + cleanup: func() error { + err := client.CoreV1().Secrets("test").Delete( + context.TODO(), util.KarmadaCertSecretName("karmada-demo"), + metav1.DeleteOptions{}, + ) + if err != nil { + return fmt.Errorf("failed to delete %s secret", "test") + } + return nil + }, + wantErr: false, + wantSkip: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := test.prep() + if err != nil { + t.Errorf("failed to prep test: %v", err) + } + + skip, err := skipCerts(test.runData) + if err == nil && test.wantErr { + t.Errorf("expected error, but got none") + } + if err != nil && !test.wantErr { + t.Errorf("unexpected error: %v", err) + } + if err != nil && test.wantErr && !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("expected %s error msg to contain %s", err.Error(), test.errMsg) + } + if skip != test.wantSkip { + t.Errorf("expected %t skip, but got %t", test.wantSkip, skip) + } + err = test.cleanup() + if err != nil { + t.Errorf("failed to clean up test: %v", err) + } + }) + } +} + +func TestRunCATask(t *testing.T) { + tests := []struct { + name string + kc *certs.CertConfig + runData workflow.RunData + prep func(*certs.CertConfig) error + verify func(workflow.RunData) error + wantErr bool + errMsg string + }{ + { + name: "RunCATask_InvalidTypeAssertion_TypeAssertionFailed", + kc: certs.KarmadaCertRootCA(), + runData: MyTestData{Data: "test"}, + prep: func(*certs.CertConfig) error { return nil }, + verify: func(workflow.RunData) error { return nil }, + wantErr: true, + errMsg: "certs task invoked with an invalid data struct", + }, + { + name: "RunCATask_WithNonCACertificate_CACertificateExpected", + kc: certs.KarmadaCertAdmin(), + runData: &TestInitData{ + Name: "karmada-demo", + Namespace: "test", + }, + prep: func(*certs.CertConfig) error { return nil }, + verify: func(workflow.RunData) error { return nil }, + wantErr: true, + errMsg: fmt.Sprintf("this function should only be used for CAs, but cert %s has CA %s", constants.KarmadaCertAndKeyName, constants.CaCertAndKeyName), + }, + { + name: "RunCATask_WithEd25519UnsupportedPublicKeyAlgorithm_UnsupportedKeyType", + kc: certs.KarmadaCertRootCA(), + runData: &TestInitData{ + Name: "karmada-demo", + Namespace: "test", + }, + prep: func(cc *certs.CertConfig) error { + cc.PublicKeyAlgorithm = x509.Ed25519 + return nil + }, + verify: func(workflow.RunData) error { return nil }, + wantErr: true, + errMsg: fmt.Sprintf("unsupported key type: %T", x509.Ed25519), + }, + { + name: "RunCATask_GenerateCACertificate_CACertificateSuccessfullyGenerated", + kc: certs.KarmadaCertRootCA(), + runData: &TestInitData{ + Name: "karmada-demo", + Namespace: "test", + }, + prep: func(*certs.CertConfig) error { return nil }, + verify: func(rd workflow.RunData) error { + certData := rd.(*TestInitData).CertList() + if len(certData) != 1 { + return fmt.Errorf("expected cert store to contain the generated CA certificate but found %d certs", len(certData)) + } + return nil + }, + wantErr: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if err := test.prep(test.kc); err != nil { + t.Errorf("failed to prep cert config data: %v", err) + } + caTask := runCATask(test.kc) + err := caTask(test.runData) + if err == nil && test.wantErr { + t.Errorf("expected error, but got none") + } + if err != nil && !test.wantErr { + t.Errorf("unexpected error: %v", err) + } + if err != nil && test.wantErr && !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("expected %s error msg to contain %s", err.Error(), test.errMsg) + } + if err := test.verify(test.runData); err != nil { + t.Errorf("failed to verify run data: %v", err) + } + }) + } +} + +func TestRunCertTask(t *testing.T) { + tests := []struct { + name string + kc *certs.CertConfig + caCert *certs.CertConfig + runData workflow.RunData + prep func(*certs.CertConfig, *certs.CertConfig, workflow.RunData) error + verify func(workflow.RunData) error + wantErr bool + errMsg string + }{ + { + name: "RunCertTask_InvalidTypeAssertion_TypeAssertionFailed", + kc: certs.KarmadaCertAdmin(), + runData: MyTestData{Data: "test"}, + prep: func(*certs.CertConfig, *certs.CertConfig, workflow.RunData) error { return nil }, + verify: func(workflow.RunData) error { return nil }, + caCert: nil, + wantErr: true, + errMsg: "certs task invoked with an invalid data struct", + }, + { + name: "RunCertTask_NilCACert_CACertIsNil", + kc: certs.KarmadaCertAdmin(), + runData: &TestInitData{ + Name: "karmada-demo", + Namespace: "test", + }, + prep: func(*certs.CertConfig, *certs.CertConfig, workflow.RunData) error { return nil }, + verify: func(workflow.RunData) error { return nil }, + caCert: nil, + wantErr: true, + errMsg: fmt.Sprintf("unexpected empty ca cert for %s", constants.KarmadaCertAndKeyName), + }, + { + name: "RunCertTask_MismatchCAName_CANameIsMismatch", + kc: certs.KarmadaCertAdmin(), + runData: &TestInitData{ + Name: "karmada-demo", + Namespace: "test", + }, + prep: func(*certs.CertConfig, *certs.CertConfig, workflow.RunData) error { return nil }, + verify: func(workflow.RunData) error { return nil }, + caCert: &certs.CertConfig{ + Name: "invalid", + }, + wantErr: true, + errMsg: fmt.Sprintf("mismatched CA name: expected %s but got %s", constants.CaCertAndKeyName, "invalid"), + }, + { + name: "RunCertTask_CreateCertAndKeyFileWithCA_SuccessfullyGeneratedCertificate", + kc: certs.KarmadaCertAdmin(), + caCert: certs.KarmadaCertRootCA(), + runData: &TestInitData{ + Name: "karmada-demo", + Namespace: "test", + ComponentsUnits: &v1alpha1.KarmadaComponents{ + KarmadaAPIServer: &v1alpha1.KarmadaAPIServer{}, + }, + }, + prep: func(ca *certs.CertConfig, _ *certs.CertConfig, rd workflow.RunData) error { + newCA, err := certs.NewCertificateAuthority(ca) + if err != nil { + return fmt.Errorf("failed to create certificate authority: %v", err) + } + rd.(*TestInitData).AddCert(newCA) + return nil + }, + verify: func(rd workflow.RunData) error { + certData := rd.(*TestInitData).CertList() + if len(certData) != 2 { + return fmt.Errorf("expected cert store to contain the Certificate Authority and the associated certificate but found %d certs", len(certData)) + } + if rd.(*TestInitData).GetCert(constants.CaCertAndKeyName) == nil { + return fmt.Errorf("expected %s Karmada Root CA to exist in the certificates store", constants.CaCertAndKeyName) + } + if rd.(*TestInitData).GetCert(constants.KarmadaCertAndKeyName) == nil { + return fmt.Errorf("expected %s karmada admin certificate to exist in the certificate store", constants.KarmadaCertAndKeyName) + } + return nil + }, + wantErr: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if err := test.prep(test.caCert, test.kc, test.runData); err != nil { + t.Errorf("failed to prep cert config data: %v", err) + } + certTask := runCertTask(test.kc, test.caCert) + err := certTask(test.runData) + if err == nil && test.wantErr { + t.Errorf("expected error, but got none") + } + if err != nil && !test.wantErr { + t.Errorf("unexpected error: %v", err) + } + if err != nil && test.wantErr && !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("expected %s error msg to contain %s", err.Error(), test.errMsg) + } + if err := test.verify(test.runData); err != nil { + t.Errorf("failed to verify run data: %v", err) + } + }) + } +} diff --git a/operator/pkg/tasks/init/component.go b/operator/pkg/tasks/init/component.go index 931090b3f1cf..7efe777ec415 100644 --- a/operator/pkg/tasks/init/component.go +++ b/operator/pkg/tasks/init/component.go @@ -264,6 +264,7 @@ func runKarmadaSearch(r workflow.RunData) error { err := search.EnsureKarmadaSearch( data.RemoteClient(), cfg.KarmadaSearch, + cfg.Etcd, data.GetName(), data.GetNamespace(), data.FeatureGates(), diff --git a/operator/pkg/tasks/init/crd_test.go b/operator/pkg/tasks/init/crd_test.go new file mode 100644 index 000000000000..4fc555110c23 --- /dev/null +++ b/operator/pkg/tasks/init/crd_test.go @@ -0,0 +1,298 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tasks + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "testing" + + operatorv1alpha1 "github.com/karmada-io/karmada/operator/pkg/apis/operator/v1alpha1" + "github.com/karmada-io/karmada/operator/pkg/constants" + "github.com/karmada-io/karmada/operator/pkg/workflow" +) + +func TestRunPrepareCrds(t *testing.T) { + name, namespace := "karmada-demo", "test" + tests := []struct { + name string + runData workflow.RunData + wantErr bool + errMsg string + }{ + { + name: "RunPrepareCrds_InvalidTypeAssertion_TypeAssertionIsInvalid", + runData: &MyTestData{Data: "test"}, + wantErr: true, + errMsg: "prepare-crds task invoked with an invalid data struct", + }, + { + name: "RunPrepareCrds_ValidCrdsDirectory_CrdsDirectoryIsValid", + runData: &TestInitData{ + Name: name, + Namespace: namespace, + DataDirectory: constants.KarmadaDataDir, + CrdTarballArchive: operatorv1alpha1.CRDTarball{ + HTTPSource: &operatorv1alpha1.HTTPSource{ + URL: "https://www.example.com/crd-tarball", + }, + }, + }, + wantErr: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := runPrepareCrds(test.runData) + if err == nil && test.wantErr { + t.Fatal("expected an error, but got none") + } + if err != nil && !test.wantErr { + t.Errorf("unexpected error, got: %v", err) + } + if err != nil && test.wantErr && !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("expected error message %s to be in %s", test.errMsg, err.Error()) + } + }) + } +} + +func TestRunSkipCrdsDownload(t *testing.T) { + name, namespace := "karmada-demo", "test" + tests := []struct { + name string + runData workflow.RunData + prep func(workflow.RunData) error + cleanup func(workflow.RunData) error + wantSkip bool + wantErr bool + errMsg string + }{ + { + name: "RunSkipCrdsDownload_InvalidTypeAssertion_TypeAssertionIsInvalid", + runData: &MyTestData{}, + prep: func(workflow.RunData) error { return nil }, + cleanup: func(workflow.RunData) error { return nil }, + wantSkip: false, + wantErr: true, + errMsg: "prepare-crds task invoked with an invalid data struct", + }, + { + name: "RunSkipCrdsDownload_WithCrdsTar_CrdsDownloadSkipped", + runData: &TestInitData{ + Name: name, + Namespace: namespace, + DataDirectory: filepath.Join(os.TempDir(), "crds-test"), + CrdTarballArchive: operatorv1alpha1.CRDTarball{ + HTTPSource: &operatorv1alpha1.HTTPSource{ + URL: "https://www.example.com/crd-tarball", + }, + }, + }, + prep: runSkipCrdsDownloadPrep, + cleanup: func(rd workflow.RunData) error { + data := rd.(*TestInitData) + if err := os.RemoveAll(data.DataDirectory); err != nil { + return fmt.Errorf("failed to cleanup data directory %s, got error: %v", data.DataDirectory, err) + } + return nil + }, + wantSkip: true, + wantErr: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if err := test.prep(test.runData); err != nil { + t.Fatalf("failed to prep before skipping downloading crds, got error: %v", err) + } + skipDownload, err := skipCrdsDownload(test.runData) + if err == nil && test.wantErr { + t.Fatal("expected an error, but got none") + } + if err != nil && !test.wantErr { + t.Errorf("unexpected error, got: %v", err) + } + if err != nil && test.wantErr && !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("expected error message %s to be in %s", test.errMsg, err.Error()) + } + if skipDownload != test.wantSkip { + t.Errorf("expected crds download status to be %t, but got %t", test.wantSkip, skipDownload) + } + if err := test.cleanup(test.runData); err != nil { + t.Errorf("failed to cleanup test environment artifacts, got error: %v", err) + } + }) + } +} + +func TestRunCrdsDownload(t *testing.T) { + name, namespace := "karmada-demo", "test" + tests := []struct { + name string + runData workflow.RunData + cleanup func(workflow.RunData) error + wantErr bool + errMsg string + }{ + { + name: "RunCrdsDownload_InvalidTypeAssertion_TypeAssertionIsInvalid", + runData: &MyTestData{Data: "test"}, + cleanup: func(workflow.RunData) error { return nil }, + wantErr: true, + errMsg: "download-crds task invoked with an invalid data struct", + }, + { + name: "RunCrdsDownload_DownloadingCRDs_CRDsDownloadedSuccessfully", + runData: &TestInitData{ + Name: name, + Namespace: namespace, + DataDirectory: filepath.Join(os.TempDir(), "crds-test"), + CrdTarballArchive: operatorv1alpha1.CRDTarball{ + HTTPSource: &operatorv1alpha1.HTTPSource{ + URL: "https://github.com/karmada-io/karmada/releases/download/v1.11.1/crds.tar.gz", + }, + }, + }, + cleanup: func(rd workflow.RunData) error { + data := rd.(*TestInitData) + if err := os.RemoveAll(data.DataDirectory); err != nil { + return fmt.Errorf("failed to cleanup data directory %s, got error: %v", data.DataDirectory, err) + } + return nil + }, + wantErr: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := runCrdsDownload(test.runData) + if err == nil && test.wantErr { + t.Fatal("expected an error, but got none") + } + if err != nil && !test.wantErr { + t.Errorf("unexpected error, got: %v", err) + } + if err != nil && test.wantErr && !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("expected error message %s to be in %s", test.errMsg, err.Error()) + } + if err := test.cleanup(test.runData); err != nil { + t.Errorf("failed to cleanup test environment artifacts, got error: %v", err) + } + }) + } +} + +func TestRunUnpack(t *testing.T) { + name, namespace := "karmada-demo", "test" + tests := []struct { + name string + runData workflow.RunData + prep func(workflow.RunData) error + cleanup func(workflow.RunData) error + wantErr bool + errMsg string + }{ + { + name: "RunUnpack_InvalidTypeAssertion_TypeAssertionIsInvalid", + runData: &MyTestData{Data: "test"}, + prep: func(workflow.RunData) error { return nil }, + cleanup: func(workflow.RunData) error { return nil }, + wantErr: true, + errMsg: "unpack task invoked with an invalid data struct", + }, + { + name: "RunUnpack_CRDYamlFilesNotExist_CRDTarUnpacked", + runData: &TestInitData{ + Name: name, + Namespace: namespace, + DataDirectory: filepath.Join(os.TempDir(), "crds-test"), + CrdTarballArchive: operatorv1alpha1.CRDTarball{ + HTTPSource: &operatorv1alpha1.HTTPSource{ + URL: "https://github.com/karmada-io/karmada/releases/download/v1.11.1/crds.tar.gz", + }, + }, + }, + prep: func(rd workflow.RunData) error { + if err := runCrdsDownload(rd); err != nil { + return fmt.Errorf("failed to download crds, got error: %v", err) + } + return nil + }, + cleanup: func(rd workflow.RunData) error { + data := rd.(*TestInitData) + if err := os.RemoveAll(data.DataDirectory); err != nil { + return fmt.Errorf("failed to cleanup data directory %s, got error: %v", data.DataDirectory, err) + } + return nil + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if err := test.prep(test.runData); err != nil { + t.Fatalf("failed to prep before unpacking crds, got error: %v", err) + } + err := runUnpack(test.runData) + if err == nil && test.wantErr { + t.Error("expected an error, but got none") + } + if err != nil && !test.wantErr { + t.Errorf("unexpected error, got: %v", err) + } + if err != nil && test.wantErr && !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("expected error message %s to be in %s", test.errMsg, err.Error()) + } + if err := test.cleanup(test.runData); err != nil { + t.Errorf("failed to cleanup test environment artifacts, got error: %v", err) + } + }) + } +} + +// runSkipCrdsDownloadPrep prepares the CRD directory and creates a dummy CRD tar file. +// It is used as part of a test workflow to simulate the presence of CRD data without +// actually downloading or validating real CRD content. +func runSkipCrdsDownloadPrep(rd workflow.RunData) error { + data := rd.(*TestInitData) + crdsDir, err := getCrdsDir(data) + if err != nil { + return fmt.Errorf("failed to get crds directory, got error: %v", err) + } + + // Create CRD test directories recursively. + if err := os.MkdirAll(crdsDir, 0700); err != nil { + return fmt.Errorf("failed to create crds directory recursively, got error: %v", err) + } + + // Create CRD test file. + crdsTarFile := filepath.Join(crdsDir, crdsFileSuffix) + file, err := os.Create(crdsTarFile) + if err != nil { + return fmt.Errorf("failed to create crds tar file %s, got error: %v", crdsTarFile, err) + } + defer file.Close() + + // Write dummy data to the tar file - not necessary valid. + if _, err := file.WriteString("test data"); err != nil { + return fmt.Errorf("failed to write to tar file, got error: %v", err) + } + + return nil +} diff --git a/operator/pkg/tasks/init/rbac_test.go b/operator/pkg/tasks/init/rbac_test.go new file mode 100644 index 000000000000..82e89095845b --- /dev/null +++ b/operator/pkg/tasks/init/rbac_test.go @@ -0,0 +1,122 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tasks + +import ( + "context" + "fmt" + "strings" + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + fakeclientset "k8s.io/client-go/kubernetes/fake" + + "github.com/karmada-io/karmada/operator/pkg/util" + "github.com/karmada-io/karmada/operator/pkg/workflow" +) + +func TestNewRBACTask(t *testing.T) { + tests := []struct { + name string + wantTask workflow.Task + }{ + { + name: "NewRBACTask_IsCalled_ExpectedWorkflowTask", + wantTask: workflow.Task{ + Name: "rbac", + Run: runRBAC, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + rbacTask := NewRBACTask() + if err := util.DeepEqualTasks(rbacTask, test.wantTask); err != nil { + t.Errorf("unexpected err: %v", err) + } + }) + } +} + +func TestRunRBAC(t *testing.T) { + name, namespace := "karmada-demo", "test" + tests := []struct { + name string + runData workflow.RunData + verify func(workflow.RunData) error + wantErr bool + errMsg string + }{ + { + name: "RunRBAC_InvalidTypeAssertion_TypeAssertionIsInvalid", + runData: &MyTestData{Data: "test"}, + verify: func(workflow.RunData) error { return nil }, + wantErr: true, + errMsg: "RBAC task invoked with an invalid data struct", + }, + { + name: "RunRBAC_InstallKarmadaRBAC_KarmadaRBACInstalled", + runData: &TestInitData{ + Name: name, + Namespace: namespace, + KarmadaClientConnector: fakeclientset.NewSimpleClientset(), + }, + verify: func(rd workflow.RunData) error { + _, ok := rd.(*TestInitData) + if !ok { + return fmt.Errorf("unexpected err, rundata interface doesn't implement TestInitData") + } + + client := rd.(*TestInitData).KarmadaClient() + actions := client.(*fakeclientset.Clientset).Actions() + if len(actions) != 4 { + return fmt.Errorf("expected 4 actions, but got %d", len(actions)) + } + + rolesToCheck := []string{"cluster-proxy-admin", "karmada-edit", "karmada-view"} + for _, role := range rolesToCheck { + _, err := client.RbacV1().ClusterRoles().Get(context.TODO(), role, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get ClusterRole: %s: %v", role, err) + } + } + + return nil + }, + wantErr: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := runRBAC(test.runData) + if err == nil && test.wantErr { + t.Errorf("expected an error, got none") + } + if err != nil && !test.wantErr { + t.Errorf("unexpected error: %v", err) + } + if err != nil && test.wantErr && !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("expected %s error msg to contain %s", err.Error(), test.errMsg) + } + if err := test.verify(test.runData); err != nil { + t.Errorf("failed to verify the namespace running task, got err: %v", err) + } + }) + } +} diff --git a/operator/pkg/tasks/init/test_helpers.go b/operator/pkg/tasks/init/test_helpers.go new file mode 100644 index 000000000000..e480b5ff8584 --- /dev/null +++ b/operator/pkg/tasks/init/test_helpers.go @@ -0,0 +1,157 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tasks + +import ( + "fmt" + "strings" + + corev1 "k8s.io/api/core/v1" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + + operatorv1alpha1 "github.com/karmada-io/karmada/operator/pkg/apis/operator/v1alpha1" + "github.com/karmada-io/karmada/operator/pkg/certs" +) + +// TestInterface defines the interface for retrieving test data. +type TestInterface interface { + // Get returns the data from the test instance. + Get() string +} + +// MyTestData is a struct that implements the TestInterface. +type MyTestData struct { + Data string +} + +// Get returns the data stored in the MyTestData struct. +func (m *MyTestData) Get() string { + return m.Data +} + +// TestInitData contains the configuration and state required to initialize Karmada components. +type TestInitData struct { + Name string + Namespace string + ControlplaneConfigREST *rest.Config + DataDirectory string + CrdTarballArchive operatorv1alpha1.CRDTarball + KarmadaVersionRelease string + ComponentsUnits *operatorv1alpha1.KarmadaComponents + FeatureGatesOptions map[string]bool + RemoteClientConnector clientset.Interface + KarmadaClientConnector clientset.Interface + ControlplaneAddr string + Certs []*certs.KarmadaCert +} + +// Ensure TestInitData implements InitData interface at compile time. +var _ InitData = &TestInitData{} + +// GetName returns the name of the current Karmada installation. +func (t *TestInitData) GetName() string { + return t.Name +} + +// GetNamespace returns the namespace of the current Karmada installation. +func (t *TestInitData) GetNamespace() string { + return t.Namespace +} + +// SetControlplaneConfig sets the control plane configuration for Karmada. +func (t *TestInitData) SetControlplaneConfig(config *rest.Config) { + t.ControlplaneConfigREST = config +} + +// ControlplaneConfig returns the control plane configuration. +func (t *TestInitData) ControlplaneConfig() *rest.Config { + return t.ControlplaneConfigREST +} + +// ControlplaneAddress returns the address of the control plane. +func (t *TestInitData) ControlplaneAddress() string { + return t.ControlplaneAddr +} + +// RemoteClient returns the Kubernetes client for remote interactions. +func (t *TestInitData) RemoteClient() clientset.Interface { + return t.RemoteClientConnector +} + +// KarmadaClient returns the Kubernetes client for interacting with Karmada. +func (t *TestInitData) KarmadaClient() clientset.Interface { + return t.KarmadaClientConnector +} + +// DataDir returns the data directory used by Karmada. +func (t *TestInitData) DataDir() string { + return t.DataDirectory +} + +// CrdTarball returns the CRD tarball used for Karmada installation. +func (t *TestInitData) CrdTarball() operatorv1alpha1.CRDTarball { + return t.CrdTarballArchive +} + +// KarmadaVersion returns the version of Karmada being used. +func (t *TestInitData) KarmadaVersion() string { + return t.KarmadaVersionRelease +} + +// Components returns the Karmada components used in the current installation. +func (t *TestInitData) Components() *operatorv1alpha1.KarmadaComponents { + return t.ComponentsUnits +} + +// FeatureGates returns the feature gates enabled for the current installation. +func (t *TestInitData) FeatureGates() map[string]bool { + return t.FeatureGatesOptions +} + +// AddCert adds a Karmada certificate to the TestInitData. +func (t *TestInitData) AddCert(cert *certs.KarmadaCert) { + t.Certs = append(t.Certs, cert) +} + +// GetCert retrieves a Karmada certificate by its name. +func (t *TestInitData) GetCert(name string) *certs.KarmadaCert { + for _, cert := range t.Certs { + parts := strings.Split(cert.CertName(), ".") + if parts[0] == name { + return cert + } + } + return nil +} + +// CertList returns a list of all Karmada certificates stored in TestInitData. +func (t *TestInitData) CertList() []*certs.KarmadaCert { + return t.Certs +} + +// LoadCertFromSecret loads a Karmada certificate from a Kubernetes secret. +func (t *TestInitData) LoadCertFromSecret(secret *corev1.Secret) error { + if len(secret.Data) == 0 { + return fmt.Errorf("cert data is empty") + } + + // Dummy implementation: load empty certificate. + cert := &certs.KarmadaCert{} + t.AddCert(cert) + return nil +} diff --git a/operator/pkg/tasks/init/upload.go b/operator/pkg/tasks/init/upload.go index 05dfd5e20036..6343371abbf3 100644 --- a/operator/pkg/tasks/init/upload.go +++ b/operator/pkg/tasks/init/upload.go @@ -70,7 +70,6 @@ func runUploadAdminKubeconfig(r workflow.RunData) error { case corev1.ServiceTypeClusterIP: apiserverName := util.KarmadaAPIServerName(data.GetName()) endpoint = fmt.Sprintf("https://%s.%s.svc.cluster.local:%d", apiserverName, data.GetNamespace(), constants.KarmadaAPIserverListenClientPort) - case corev1.ServiceTypeNodePort: service, err := apiclient.GetService(data.RemoteClient(), util.KarmadaAPIServerName(data.GetName()), data.GetNamespace()) if err != nil { @@ -78,6 +77,21 @@ func runUploadAdminKubeconfig(r workflow.RunData) error { } nodePort := getNodePortFromAPIServerService(service) endpoint = fmt.Sprintf("https://%s:%d", data.ControlplaneAddress(), nodePort) + case corev1.ServiceTypeLoadBalancer: + service, err := apiclient.GetService(data.RemoteClient(), util.KarmadaAPIServerName(data.GetName()), data.GetNamespace()) + if err != nil { + return err + } + if len(service.Status.LoadBalancer.Ingress) == 0 { + return fmt.Errorf("no loadbalancer ingress found in service (%s/%s)", data.GetName(), data.GetNamespace()) + } + loadbalancerAddress := getLoadbalancerAddress(service.Status.LoadBalancer.Ingress) + if loadbalancerAddress == "" { + return fmt.Errorf("can not find loadbalancer ip or hostname in service (%s/%s)", data.GetName(), data.GetNamespace()) + } + endpoint = fmt.Sprintf("https://%s:%d", loadbalancerAddress, constants.KarmadaAPIserverListenClientPort) + default: + return errors.New("not supported service type for Karmada API server") } kubeconfig, err := buildKubeConfigFromSpec(data, endpoint) @@ -127,6 +141,17 @@ func getNodePortFromAPIServerService(service *corev1.Service) int32 { return nodePort } +func getLoadbalancerAddress(ingress []corev1.LoadBalancerIngress) string { + for _, in := range ingress { + if in.Hostname != "" { + return in.Hostname + } else if in.IP != "" { + return in.IP + } + } + return "" +} + func buildKubeConfigFromSpec(data InitData, serverURL string) (*clientcmdapi.Config, error) { ca := data.GetCert(constants.CaCertAndKeyName) if ca == nil { diff --git a/operator/pkg/tasks/init/wait.go b/operator/pkg/tasks/init/wait.go index e6c2ba836bf5..dc38c882df07 100644 --- a/operator/pkg/tasks/init/wait.go +++ b/operator/pkg/tasks/init/wait.go @@ -34,6 +34,11 @@ var ( // It includes the time for pulling the component image. componentBeReadyTimeout = 120 * time.Second + // failureThreshold represents the maximum number of retries allowed for + // waiting for a component to be ready. If the threshold is exceeded, + // the process will stop and return an error. + failureThreshold = 3 + etcdLabels = labels.Set{"karmada-app": constants.Etcd} karmadaApiserverLabels = labels.Set{"karmada-app": constants.KarmadaAPIServer} karmadaAggregatedAPIServerLabels = labels.Set{"karmada-app": constants.KarmadaAggregatedAPIServer} @@ -62,7 +67,7 @@ func runWaitApiserver(r workflow.RunData) error { waiter := apiclient.NewKarmadaWaiter(data.ControlplaneConfig(), data.RemoteClient(), componentBeReadyTimeout) // check whether the karmada apiserver is health. - if err := apiclient.TryRunCommand(waiter.WaitForAPI, 3); err != nil { + if err := apiclient.TryRunCommand(waiter.WaitForAPI, failureThreshold); err != nil { return fmt.Errorf("the karmada apiserver is unhealthy, err: %w", err) } klog.V(2).InfoS("[check-apiserver-health] the etcd and karmada-apiserver is healthy", "karmada", klog.KObj(data)) diff --git a/operator/pkg/tasks/init/wait_test.go b/operator/pkg/tasks/init/wait_test.go new file mode 100644 index 000000000000..a40bcf208b05 --- /dev/null +++ b/operator/pkg/tasks/init/wait_test.go @@ -0,0 +1,292 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tasks + +import ( + "fmt" + "net/http" + "strings" + "testing" + "time" + + "k8s.io/apimachinery/pkg/runtime" + fakeclientset "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/rest" + fakerest "k8s.io/client-go/rest/fake" + + "github.com/karmada-io/karmada/operator/pkg/util" + "github.com/karmada-io/karmada/operator/pkg/util/apiclient" + "github.com/karmada-io/karmada/operator/pkg/workflow" +) + +func TestNewCheckApiserverHealthTask(t *testing.T) { + tests := []struct { + name string + wantTask *workflow.Task + }{ + { + name: "NewCheckApiserverHealthTask", + wantTask: &workflow.Task{ + Name: "check-apiserver-health", + Run: runWaitApiserver, + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + checkAPIServerHealthTask := NewCheckApiserverHealthTask() + if err := util.DeepEqualTasks(checkAPIServerHealthTask, *test.wantTask); err != nil { + t.Errorf("unexpected error, got: %v", err) + } + }) + } +} + +func TestRunWaitAPIServer(t *testing.T) { + tests := []struct { + name string + runData workflow.RunData + prep func(workflow.RunData) error + wantErr bool + errMsg string + }{ + { + name: "RunWaitAPIServer_InvalidTypeAssertion_TypeAssertionFailed", + runData: &MyTestData{Data: "test"}, + prep: func(workflow.RunData) error { return nil }, + wantErr: true, + errMsg: "check-apiserver-health task invoked with an invalid data struct", + }, + { + name: "RunWaitAPIServer_WaitingForAPIServerHealthyStatus_Timeout", + runData: &TestInitData{ + Name: "karmada-demo", + Namespace: "test", + RemoteClientConnector: &apiclient.MockK8SRESTClient{ + RESTClientConnector: &fakerest.RESTClient{ + NegotiatedSerializer: runtime.NewSimpleNegotiatedSerializer(runtime.SerializerInfo{}), + Client: fakerest.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + return nil, fmt.Errorf("unexpected error, endpoint %s does not exist", req.URL.Path) + }), + }, + }, + ControlplaneConfigREST: &rest.Config{}, + }, + prep: func(workflow.RunData) error { + componentBeReadyTimeout, failureThreshold = time.Second, 1 + return nil + }, + wantErr: true, + errMsg: "the karmada apiserver is unhealthy", + }, + { + name: "RunWaitAPIServer_WaitingForAPIServerHealthyStatus_APIServerIsHealthy", + runData: &TestInitData{ + Name: "karmada-demo", + Namespace: "test", + RemoteClientConnector: &apiclient.MockK8SRESTClient{ + RESTClientConnector: &fakerest.RESTClient{ + NegotiatedSerializer: runtime.NewSimpleNegotiatedSerializer(runtime.SerializerInfo{}), + Client: fakerest.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + if req.URL.Path == "/healthz" { + // Return a fake 200 OK response. + return &http.Response{ + StatusCode: http.StatusOK, + Body: http.NoBody, + }, nil + } + return nil, fmt.Errorf("unexpected error, endpoint %s does not exist", req.URL.Path) + }), + }, + }, + ControlplaneConfigREST: &rest.Config{}, + }, + prep: func(workflow.RunData) error { return nil }, + wantErr: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if err := test.prep(test.runData); err != nil { + t.Errorf("failed to prep waiting for APIServer: %v", err) + } + err := runWaitApiserver(test.runData) + if err == nil && test.wantErr { + t.Errorf("expected error, but got none") + } + if err != nil && !test.wantErr { + t.Errorf("unexpected error: %v", err) + } + if err != nil && test.wantErr && !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("expected %s error msg to contain %s", err.Error(), test.errMsg) + } + }) + } +} + +func TestNewWaitControlPlaneTask(t *testing.T) { + tests := []struct { + name string + wantTask *workflow.Task + }{ + { + name: "NewCheckApiserverHealthTask", + wantTask: &workflow.Task{ + Name: "wait-controlPlane", + Run: runWaitControlPlane, + RunSubTasks: true, + Tasks: []workflow.Task{ + newWaitControlPlaneSubTask("KubeControllerManager", kubeControllerManagerLabels), + newWaitControlPlaneSubTask("KarmadaControllerManager", karmadaControllerManagerLabels), + newWaitControlPlaneSubTask("KarmadaScheduler", karmadaSchedulerLabels), + newWaitControlPlaneSubTask("KarmadaWebhook", karmadaWebhookLabels), + }, + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + waitControlPlaneTask := NewWaitControlPlaneTask() + if err := util.DeepEqualTasks(waitControlPlaneTask, *test.wantTask); err != nil { + t.Errorf("unexpected error, got: %v", err) + } + }) + } +} + +func TestRunWaitControlPlane(t *testing.T) { + tests := []struct { + name string + runData workflow.RunData + prep func(workflow.RunData) error + wantErr bool + errMsg string + }{ + { + name: "RunWaitControlPlane_InvalidTypeAssertion_TypeAssertionFailed", + runData: &MyTestData{Data: "test"}, + prep: func(workflow.RunData) error { return nil }, + wantErr: true, + errMsg: "wait-controlPlane task invoked with an invalid data struct", + }, + { + name: "RunWaitControlPlane_ValidTypeAssertion_TypeAssertionSucceeded", + runData: &TestInitData{ + Name: "test-karmada", + Namespace: "test", + }, + prep: func(workflow.RunData) error { return nil }, + wantErr: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if err := test.prep(test.runData); err != nil { + t.Errorf("failed to prep waiting control plane: %v", err) + } + err := runWaitControlPlane(test.runData) + if err == nil && test.wantErr { + t.Errorf("expected error, but got none") + } + if err != nil && !test.wantErr { + t.Errorf("unexpected error: %v", err) + } + if err != nil && test.wantErr && !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("expected %s error msg to contain %s", err.Error(), test.errMsg) + } + }) + } +} + +func TestRunWaitControlPlaneSubTask(t *testing.T) { + tests := []struct { + name string + runData workflow.RunData + prep func(workflow.RunData) error + wantErr bool + errMsg string + }{ + { + name: "RunWaitControlPlaneSubTask_InvalidTypeAssertion_TypeAssertionFailed", + runData: &MyTestData{Data: "test"}, + prep: func(workflow.RunData) error { return nil }, + wantErr: true, + errMsg: "wait-controlPlane task invoked with an invalid data struct", + }, + { + name: "RunWaitControlPlaneSubTask_WaitingForSomeKarmadaControllerManagerPods_Timeout", + runData: &TestInitData{ + Name: "karmada-demo", + Namespace: "test", + RemoteClientConnector: fakeclientset.NewSimpleClientset(), + ControlplaneConfigREST: &rest.Config{}, + }, + prep: func(workflow.RunData) error { + componentBeReadyTimeout = time.Second + return nil + }, + wantErr: true, + errMsg: "waiting for karmada-demo-controller-manager to ready timeout", + }, + { + name: "RunWaitControlPlaneSubTask_WaitingForSomeKarmadaControllerManagerPods_KarmadaControllerManagerIsReady", + runData: &TestInitData{ + Name: "karmada-demo", + Namespace: "test", + RemoteClientConnector: fakeclientset.NewSimpleClientset(), + ControlplaneConfigREST: &rest.Config{}, + }, + prep: func(rd workflow.RunData) error { + data := rd.(*TestInitData) + if _, err := apiclient.CreatePods(data.RemoteClient(), data.GetNamespace(), util.KarmadaControllerManagerName(data.GetName()), 3, karmadaControllerManagerLabels, true); err != nil { + return fmt.Errorf("failed to create pods: %v", err) + } + return nil + }, + wantErr: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if err := test.prep(test.runData); err != nil { + t.Errorf("failed to prep waiting for Karmada Controller Manager: %v", err) + } + karmadaControllerManagerName := getKarmadaControllerManagerName(test.runData) + waitForKarmadaControllerManager := runWaitControlPlaneSubTask(karmadaControllerManagerName, karmadaControllerManagerLabels) + err := waitForKarmadaControllerManager(test.runData) + if err == nil && test.wantErr { + t.Errorf("expected error, but got none") + } + if err != nil && !test.wantErr { + t.Errorf("unexpected error: %v", err) + } + if err != nil && test.wantErr && !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("expected %s error msg to contain %s", err.Error(), test.errMsg) + } + }) + } +} + +// getKarmadaControllerManagerName returns the Karmada controller manager name from the provided RunData. +// It checks if RunData is *TestInitData, otherwise retrieves it from *MyTestData. +func getKarmadaControllerManagerName(rd workflow.RunData) string { + data, ok := rd.(*TestInitData) + if ok { + return util.KarmadaControllerManagerName(data.GetName()) + } + return rd.(*MyTestData).Data +} diff --git a/operator/pkg/util/apiclient/idempotency.go b/operator/pkg/util/apiclient/idempotency.go index 81f28a6ca697..6da6c689fda5 100644 --- a/operator/pkg/util/apiclient/idempotency.go +++ b/operator/pkg/util/apiclient/idempotency.go @@ -49,7 +49,7 @@ func NewCRDsClient(c *rest.Config) (*crdsclient.Clientset, error) { } // NewAPIRegistrationClient is to create an apiregistration ClientSet -func NewAPIRegistrationClient(c *rest.Config) (*aggregator.Clientset, error) { +func NewAPIRegistrationClient(c *rest.Config) (aggregator.Interface, error) { return aggregator.NewForConfig(c) } @@ -183,7 +183,7 @@ func CreateOrUpdateValidatingWebhookConfiguration(client clientset.Interface, vw } // CreateOrUpdateAPIService creates a APIService if the target resource doesn't exist. If the resource exists already, this function will update the resource instead. -func CreateOrUpdateAPIService(apiRegistrationClient *aggregator.Clientset, apiservice *apiregistrationv1.APIService) error { +func CreateOrUpdateAPIService(apiRegistrationClient aggregator.Interface, apiservice *apiregistrationv1.APIService) error { _, err := apiRegistrationClient.ApiregistrationV1().APIServices().Create(context.TODO(), apiservice, metav1.CreateOptions{}) if err != nil { if !apierrors.IsAlreadyExists(err) { @@ -281,6 +281,32 @@ func CreateOrUpdateClusterRole(client clientset.Interface, clusterrole *rbacv1.C return nil } +// CreateOrUpdateClusterRoleBinding creates a Clusterrolebinding if the target resource doesn't exist. +// If the resource exists already, this function will update the resource instead. +func CreateOrUpdateClusterRoleBinding(client clientset.Interface, clusterrolebinding *rbacv1.ClusterRoleBinding) error { + _, err := client.RbacV1().ClusterRoleBindings().Create(context.TODO(), clusterrolebinding, metav1.CreateOptions{}) + + if err != nil { + if !apierrors.IsAlreadyExists(err) { + return err + } + + older, err := client.RbacV1().ClusterRoleBindings().Get(context.TODO(), clusterrolebinding.GetName(), metav1.GetOptions{}) + if err != nil { + return err + } + + clusterrolebinding.ResourceVersion = older.ResourceVersion + _, err = client.RbacV1().ClusterRoleBindings().Update(context.TODO(), clusterrolebinding, metav1.UpdateOptions{}) + if err != nil { + return err + } + } + + klog.V(4).InfoS("Successfully created or updated clusterrolebinding", "clusterrolebinding", clusterrolebinding.GetName()) + return nil +} + // DeleteDeploymentIfHasLabels deletes a Deployment that exists the given labels. func DeleteDeploymentIfHasLabels(client clientset.Interface, name, namespace string, ls labels.Set) error { deployment, err := client.AppsV1().Deployments(namespace).Get(context.TODO(), name, metav1.GetOptions{}) diff --git a/operator/pkg/util/apiclient/test_helpers.go b/operator/pkg/util/apiclient/test_helpers.go new file mode 100644 index 000000000000..efa566ca20e4 --- /dev/null +++ b/operator/pkg/util/apiclient/test_helpers.go @@ -0,0 +1,125 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apiclient + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/discovery" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" +) + +// MockK8SRESTClient is a struct that implements clientset.Interface. +type MockK8SRESTClient struct { + clientset.Interface + RESTClientConnector rest.Interface +} + +// Discovery returns a mocked discovery interface. +func (m *MockK8SRESTClient) Discovery() discovery.DiscoveryInterface { + return &MockDiscovery{ + RESTClientConnector: m.RESTClientConnector, + } +} + +// MockDiscovery is a mock implementation of DiscoveryInterface. +type MockDiscovery struct { + discovery.DiscoveryInterface + RESTClientConnector rest.Interface +} + +// RESTClient returns a restClientConnector that is used to communicate with API server +// by this client implementation. +func (m *MockDiscovery) RESTClient() rest.Interface { + return m.RESTClientConnector +} + +// CreatePods creates a specified number of pods in the given namespace +// with the provided component name and optional labels. It uses a +// Kubernetes client to interact with the API and can mark the pods as +// running if the `markRunningState` flag is set. +// +// Parameters: +// - client: Kubernetes client interface for API requests. +// - namespace: Namespace for pod creation. +// - componentName: Base name for the pods and their containers. +// - replicaCount: Number of pods to create. +// - labels: Labels to apply to the pods. +// - markRunningState: If true, updates the pods' status to running. +// +// Returns: +// - A slice of pointers to corev1.Pod representing the created pods. +// - An error if pod creation fails. +func CreatePods(client clientset.Interface, namespace string, componentName string, replicaCount int32, labels map[string]string, markRunningState bool) ([]*corev1.Pod, error) { + pods := make([]*corev1.Pod, 0, replicaCount) + for i := int32(0); i < replicaCount; i++ { + podName := fmt.Sprintf("%s-pod-%d", componentName, i) + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: podName, + Namespace: namespace, + Labels: labels, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: fmt.Sprintf("my-%s-container-%d", componentName, i), + Image: fmt.Sprintf("my-%s-image:latest", componentName), + Ports: []corev1.ContainerPort{{ContainerPort: 80}}, + }}, + }, + } + _, err := client.CoreV1().Pods(namespace).Create(context.TODO(), pod, metav1.CreateOptions{}) + if err != nil { + return nil, fmt.Errorf("failed to create pod %s: %w", podName, err) + } + + // Mark the pod as in running state if flag is set. + if markRunningState { + if err := UpdatePodStatus(client, pod); err != nil { + return nil, fmt.Errorf("failed to update pod status, got err: %v", err) + } + } + pods = append(pods, pod) + } + return pods, nil +} + +// UpdatePodStatus updates the status of a pod to PodRunning and sets the PodReady condition. +func UpdatePodStatus(client clientset.Interface, pod *corev1.Pod) error { + // Mark the pod as in running state. + pod.Status = corev1.PodStatus{ + Phase: corev1.PodRunning, + Conditions: []corev1.PodCondition{ + { + Type: corev1.PodReady, + Status: corev1.ConditionTrue, + }, + }, + } + + // Update the pod status in the Kubernetes cluster. + _, err := client.CoreV1().Pods(pod.GetNamespace()).UpdateStatus(context.TODO(), pod, metav1.UpdateOptions{}) + if err != nil { + return fmt.Errorf("failed to update status of the pod %s: %w", pod.GetName(), err) + } + + return nil +} diff --git a/operator/pkg/util/apiclient/wait.go b/operator/pkg/util/apiclient/wait.go index 92eddeb447f7..1c55cb79851d 100644 --- a/operator/pkg/util/apiclient/wait.go +++ b/operator/pkg/util/apiclient/wait.go @@ -36,6 +36,18 @@ const ( APICallRetryInterval = 500 * time.Millisecond ) +var ( + // initialBackoffDuration defines the initial duration for the backoff mechanism, + // set to 5 seconds. This value is used to determine the wait time before retrying + // a failed command. + initialBackoffDuration = 5 * time.Second + + // backoffTimeoutFactor is the factor by which the backoff duration is multiplied + // after each failure. In this case, it is set to 2, meaning the wait time will + // double with each consecutive failure. + backoffTimeoutFactor float64 = 2 +) + // Waiter is an interface for waiting for criteria in Karmada to happen type Waiter interface { // WaitForAPI waits for the API Server's /healthz endpoint to become "ok" @@ -79,9 +91,13 @@ func (w *KarmadaWaiter) WaitForAPI() error { }) } +var aggregateClientFromConfigBuilder = func(karmadaConfig *rest.Config) (aggregator.Interface, error) { + return aggregator.NewForConfig(karmadaConfig) +} + // WaitForAPIService waits for the APIService condition to become "true" func (w *KarmadaWaiter) WaitForAPIService(name string) error { - aggregateClient, err := aggregator.NewForConfig(w.karmadaConfig) + aggregateClient, err := aggregateClientFromConfigBuilder(w.karmadaConfig) if err != nil { return err } @@ -162,20 +178,21 @@ func (w *KarmadaWaiter) SetTimeout(timeout time.Duration) { w.timeout = timeout } -// TryRunCommand runs a function a maximum of failureThreshold times, and retries on error. If failureThreshold is hit; the last error is returned +// TryRunCommand runs a function a maximum of failureThreshold times, and +// retries on error. If failureThreshold is hit; the last error is returned. func TryRunCommand(f func() error, failureThreshold int) error { backoff := wait.Backoff{ - Duration: 5 * time.Second, - Factor: 2, // double the timeout for every failure + Duration: initialBackoffDuration, + Factor: backoffTimeoutFactor, Steps: failureThreshold, } return wait.ExponentialBackoff(backoff, func() (bool, error) { err := f() if err != nil { - // Retry until the timeout + // Retry until the timeout. return false, nil } - // The last f() call was a success, return cleanly + // The last f() call was a success, return cleanly. return true, nil }) } diff --git a/operator/pkg/util/apiclient/wait_test.go b/operator/pkg/util/apiclient/wait_test.go new file mode 100644 index 000000000000..c1d7a32cf565 --- /dev/null +++ b/operator/pkg/util/apiclient/wait_test.go @@ -0,0 +1,446 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apiclient + +import ( + "context" + "errors" + "fmt" + "net/http" + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + clientset "k8s.io/client-go/kubernetes" + fakeclientset "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/rest" + fakerest "k8s.io/client-go/rest/fake" + apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" + aggregator "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset" + fakeAggregator "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/fake" + + "github.com/karmada-io/karmada/operator/pkg/constants" +) + +func TestWaitForAPI(t *testing.T) { + tests := []struct { + name string + karmadaWriter *KarmadaWaiter + wantErr bool + }{ + { + name: "WaitForAPI_WaitingForAPIServerHealthyStatus_Timeout", + karmadaWriter: &KarmadaWaiter{ + karmadaConfig: &rest.Config{}, + client: &MockK8SRESTClient{ + RESTClientConnector: &fakerest.RESTClient{ + NegotiatedSerializer: runtime.NewSimpleNegotiatedSerializer(runtime.SerializerInfo{}), + Client: fakerest.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + return nil, fmt.Errorf("unexpected error, endpoint %s does not exist", req.URL.Path) + }), + }, + }, + timeout: time.Second, + }, + wantErr: true, + }, + { + name: "WaitForAPI_WaitingForAPIServerHealthyStatus_APIServerIsHealthy", + karmadaWriter: &KarmadaWaiter{ + karmadaConfig: &rest.Config{}, + client: &MockK8SRESTClient{ + RESTClientConnector: &fakerest.RESTClient{ + NegotiatedSerializer: runtime.NewSimpleNegotiatedSerializer(runtime.SerializerInfo{}), + Client: fakerest.CreateHTTPClient(func(req *http.Request) (*http.Response, error) { + if req.URL.Path == "/healthz" { + // Return a fake 200 OK response. + return &http.Response{ + StatusCode: http.StatusOK, + Body: http.NoBody, + }, nil + } + return nil, fmt.Errorf("unexpected error, endpoint %s does not exist", req.URL.Path) + }), + }, + }, + timeout: time.Millisecond, + }, + wantErr: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := test.karmadaWriter.WaitForAPI() + if err == nil && test.wantErr { + t.Errorf("expected an error, but got none") + } + if err != nil && !test.wantErr { + t.Errorf("unexpected error, got: %v", err) + } + }) + } +} + +func TestWaitForAPIService(t *testing.T) { + name := "karmada-demo-apiservice" + tests := []struct { + name string + karmadaWriter *KarmadaWaiter + apiService *apiregistrationv1.APIService + client aggregator.Interface + prep func(aggregator.Interface, *apiregistrationv1.APIService) error + wantErr bool + }{ + { + name: "WaitForAPIService_WaitingForKarmadaAPIServiceAvailableStatus_Timeout", + karmadaWriter: &KarmadaWaiter{ + karmadaConfig: &rest.Config{}, + timeout: time.Second, + }, + apiService: &apiregistrationv1.APIService{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: apiregistrationv1.APIServiceSpec{ + Service: &apiregistrationv1.ServiceReference{ + Name: "karmada-demo-service", + Namespace: "test", + }, + Version: "v1beta1", + }, + }, + client: fakeAggregator.NewSimpleClientset(), + prep: func(client aggregator.Interface, _ *apiregistrationv1.APIService) error { + aggregateClientFromConfigBuilder = func(*rest.Config) (aggregator.Interface, error) { + return client, nil + } + return nil + }, + wantErr: true, + }, + { + name: "WaitForAPIService_WaitingForKarmadaAPIServiceAvailableStatus_KarmadaAPIServiceIsAvailable", + karmadaWriter: &KarmadaWaiter{ + karmadaConfig: &rest.Config{}, + timeout: time.Millisecond, + }, + apiService: &apiregistrationv1.APIService{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: apiregistrationv1.APIServiceSpec{ + Service: &apiregistrationv1.ServiceReference{ + Name: "karmada-demo-service", + Namespace: "test", + }, + Version: "v1beta1", + }, + }, + client: fakeAggregator.NewSimpleClientset(), + prep: func(client aggregator.Interface, apiService *apiregistrationv1.APIService) error { + apiServiceCreated, err := client.ApiregistrationV1().APIServices().Create(context.TODO(), apiService, metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("faield to create api service %s, got err: %v", apiService.Name, err) + } + apiServiceCreated.Status = apiregistrationv1.APIServiceStatus{ + Conditions: []apiregistrationv1.APIServiceCondition{ + { + Type: apiregistrationv1.Available, + Status: apiregistrationv1.ConditionTrue, + }, + }, + } + if _, err = client.ApiregistrationV1().APIServices().Update(context.TODO(), apiServiceCreated, metav1.UpdateOptions{}); err != nil { + return fmt.Errorf("failed to update api service with available status, got err: %v", err) + } + aggregateClientFromConfigBuilder = func(*rest.Config) (aggregator.Interface, error) { + return client, nil + } + return nil + }, + wantErr: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if err := test.prep(test.client, test.apiService); err != nil { + t.Errorf("failed to prep waiting for Karmada API Service, got err: %v", err) + } + err := test.karmadaWriter.WaitForAPIService(name) + if err == nil && test.wantErr { + t.Errorf("expected an error, but got none") + } + if err != nil && !test.wantErr { + t.Errorf("unexpected error, got: %v", err) + } + }) + } +} + +func TestWaitForPods(t *testing.T) { + name, namespace := "karmada-demo-apiserver", "test" + karmadaAPIServerLabels := labels.Set{"karmada-app": constants.KarmadaAPIServer} + var replicas int32 = 2 + tests := []struct { + name string + karmadaWriter *KarmadaWaiter + prep func(client clientset.Interface) error + wantErr bool + }{ + { + name: "WaitForPods_WaitingForAllKarmadaAPIServerPods_Timeout", + karmadaWriter: &KarmadaWaiter{ + karmadaConfig: &rest.Config{}, + client: fakeclientset.NewSimpleClientset(), + timeout: time.Second, + }, + prep: func(client clientset.Interface) error { + _, err := CreatePods(client, namespace, name, replicas, karmadaAPIServerLabels, false) + if err != nil { + return fmt.Errorf("failed to create pods, got err: %v", err) + } + return nil + }, + wantErr: true, + }, + { + name: "WaitForPods_WaitingForAllKarmadaAPIServerPods_AllAreUpAndRunning", + karmadaWriter: &KarmadaWaiter{ + karmadaConfig: &rest.Config{}, + client: fakeclientset.NewSimpleClientset(), + timeout: time.Second * 2, + }, + prep: func(client clientset.Interface) error { + pods, err := CreatePods(client, namespace, name, replicas, karmadaAPIServerLabels, false) + if err != nil { + return fmt.Errorf("failed to create pods, got err: %v", err) + } + time.AfterFunc(time.Second, func() { + for _, pod := range pods { + if err := UpdatePodStatus(client, pod); err != nil { + fmt.Printf("failed to update pod status, got err: %v", err) + return + } + } + }) + return nil + }, + wantErr: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if err := test.prep(test.karmadaWriter.client); err != nil { + t.Errorf("failed to prep before waiting for all Karmada APIServer pods , got err: %v", err) + } + err := test.karmadaWriter.WaitForPods(karmadaAPIServerLabels.String(), namespace) + if err == nil && test.wantErr { + t.Errorf("expected an error, but got none") + } + if err != nil && !test.wantErr { + t.Errorf("unexpected error, got: %v", err) + } + }) + } +} + +func TestWaitForSomePods(t *testing.T) { + name, namespace := "karmada-demo-apiserver", "test" + karmadaAPIServerLabels := labels.Set{"karmada-app": constants.KarmadaAPIServer} + var replicas int32 = 2 + tests := []struct { + name string + karmadaWriter *KarmadaWaiter + prep func(client clientset.Interface) error + wantErr bool + }{ + { + name: "WaitForSomePods_WaitingForSomeKarmadaAPIServerPods_Timeout", + karmadaWriter: &KarmadaWaiter{ + karmadaConfig: &rest.Config{}, + client: fakeclientset.NewSimpleClientset(), + timeout: time.Second, + }, + prep: func(client clientset.Interface) error { + _, err := CreatePods(client, namespace, name, replicas, karmadaAPIServerLabels, false) + if err != nil { + return fmt.Errorf("failed to create pods, got err: %v", err) + } + return nil + }, + wantErr: true, + }, + { + name: "WaitForSomePods_WaitingForSomeKarmadaAPIServerPods_SomeAreUpAndRunning", + karmadaWriter: &KarmadaWaiter{ + karmadaConfig: &rest.Config{}, + client: fakeclientset.NewSimpleClientset(), + timeout: time.Millisecond, + }, + prep: func(client clientset.Interface) error { + pods, err := CreatePods(client, namespace, name, replicas, karmadaAPIServerLabels, false) + if err != nil { + return fmt.Errorf("failed to create pods, got err: %v", err) + } + for _, pod := range pods[:1] { + if err := UpdatePodStatus(client, pod); err != nil { + return fmt.Errorf("failed to update pod status, got err: %v", err) + } + } + return nil + }, + wantErr: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if err := test.prep(test.karmadaWriter.client); err != nil { + t.Errorf("failed to prep before waiting for some Karmada APIServer pods , got err: %v", err) + } + err := test.karmadaWriter.WaitForSomePods(karmadaAPIServerLabels.String(), namespace, 1) + if err == nil && test.wantErr { + t.Errorf("expected an error, but got none") + } + if err != nil && !test.wantErr { + t.Errorf("unexpected error, got: %v", err) + } + }) + } +} + +func TestTryRunCommand(t *testing.T) { + tests := []struct { + name string + failureThreshold int + targetFunc func() error + prep func() error + wantErr bool + }{ + { + name: "TryRunCommand_HitTheFailureThreshold_CommandTimedOut", + failureThreshold: 2, + targetFunc: func() error { + return errors.New("unexpected error") + }, + prep: func() error { + initialBackoffDuration = time.Millisecond + return nil + }, + wantErr: true, + }, + { + name: "TryRunCommand_BelowFailureThreshold_CommandRunSuccessfully", + failureThreshold: 2, + targetFunc: func() error { return nil }, + prep: func() error { + initialBackoffDuration = time.Millisecond + return nil + }, + wantErr: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if err := test.prep(); err != nil { + t.Errorf("failed to prep before trying to running command, got err: %v", err) + } + err := TryRunCommand(test.targetFunc, test.failureThreshold) + if err == nil && test.wantErr { + t.Errorf("expected an error, but got none") + } + if err != nil && !test.wantErr { + t.Errorf("unexpected error, got: %v", err) + } + }) + } +} + +func TestIsPodRunning(t *testing.T) { + tests := []struct { + name string + pod *corev1.Pod + want bool + }{ + { + name: "IsPodRunning_PodInPendingState_PodIsNotRunningYet", + pod: &corev1.Pod{ + Status: corev1.PodStatus{ + Phase: corev1.PodPending, + }, + }, + want: false, + }, + { + name: "IsPodRunning_WithDeletionTimestamp_PodIsNotRunningYet", + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + DeletionTimestamp: &metav1.Time{Time: time.Now()}, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + }, + }, + want: false, + }, + { + name: "IsPodRunning_PodReadyConditionReadinessIsFalse_PodIsNotRunningYet", + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + DeletionTimestamp: nil, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + Conditions: []corev1.PodCondition{ + { + Type: corev1.PodReady, + Status: corev1.ConditionFalse, + }, + }, + }, + }, + want: false, + }, + { + name: "IsPodRunning_PodSatisfyAllRunningConditions_PodIsAlreadyRunning", + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + DeletionTimestamp: nil, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + Conditions: []corev1.PodCondition{ + { + Type: corev1.PodReady, + Status: corev1.ConditionTrue, + }, + }, + }, + }, + want: true, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if got := isPodRunning(*test.pod); got != test.want { + t.Errorf("expected pod running status %t, but got %t", test.want, got) + } + }) + } +} diff --git a/operator/pkg/util/endpoint_test.go b/operator/pkg/util/endpoint_test.go new file mode 100644 index 000000000000..0a12c24a1a08 --- /dev/null +++ b/operator/pkg/util/endpoint_test.go @@ -0,0 +1,182 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "context" + "fmt" + "strings" + "testing" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clientset "k8s.io/client-go/kubernetes" + fakeclientset "k8s.io/client-go/kubernetes/fake" +) + +func TestGetControlPlaneEndpoint(t *testing.T) { + tests := []struct { + name string + address string + port string + wantErr bool + wantEndpoint string + }{ + { + name: "GetControlplaneEndpoint_InvalidAddress_AddressIsInvalid", + address: "192.168:1:1", + port: "6060", + wantErr: true, + }, + { + name: "GetControlplaneEndpoint_ParseEndpoint_EndpointParsed", + address: "192.168.1.1", + port: "6060", + wantErr: false, + wantEndpoint: "https://192.168.1.1:6060", + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + controlPlaneEndpoint, err := GetControlplaneEndpoint(test.address, test.port) + if err == nil && test.wantErr { + t.Errorf("expected an error, but got none") + } + if err != nil && !test.wantErr { + t.Errorf("unexpected error, got: %v", err) + } + if controlPlaneEndpoint != test.wantEndpoint { + t.Errorf("expected endpoint %s, but got %s", test.wantEndpoint, controlPlaneEndpoint) + } + }) + } +} + +func TestGetAPIServiceIP(t *testing.T) { + tests := []struct { + name string + client clientset.Interface + prep func(clientset.Interface) error + wantAPIServiceIP string + wantErr bool + errMsg string + }{ + { + name: "GetAPIServiceIP_WithNoNodesInTheCluster_FailedToGetAPIServiceIP", + client: fakeclientset.NewSimpleClientset(), + prep: func(clientset.Interface) error { return nil }, + wantErr: true, + errMsg: "there are no nodes in cluster", + }, + { + name: "GetAPIServiceIP_WithoutMasterNode_WorkerNodeAPIServiceIPReturned", + client: fakeclientset.NewSimpleClientset(), + prep: func(client clientset.Interface) error { + nodes := []*corev1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node-1", + }, + Status: corev1.NodeStatus{ + Addresses: []corev1.NodeAddress{ + { + Type: corev1.NodeInternalIP, + Address: "192.168.1.2", + }, + }, + }, + }, + } + for _, node := range nodes { + _, err := client.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("failed to create node %s: %v", node.Name, err) + } + } + return nil + }, + wantErr: false, + wantAPIServiceIP: "192.168.1.2", + }, + { + name: "GetAPIServiceIP_WithMasterNode_MasterNodeAPIServiceIPReturned", + client: fakeclientset.NewSimpleClientset(), + prep: func(client clientset.Interface) error { + nodes := []*corev1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node-1", + Labels: map[string]string{ + "node-role.kubernetes.io/master": "", + }, + }, + Status: corev1.NodeStatus{ + Addresses: []corev1.NodeAddress{ + { + Type: corev1.NodeInternalIP, + Address: "192.168.1.1", + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node-2", + }, + Status: corev1.NodeStatus{ + Addresses: []corev1.NodeAddress{ + { + Type: corev1.NodeInternalIP, + Address: "192.168.1.2", + }, + }, + }, + }, + } + for _, node := range nodes { + _, err := client.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("failed to create node %s: %v", node.Name, err) + } + } + return nil + }, + wantErr: false, + wantAPIServiceIP: "192.168.1.1", + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if err := test.prep(test.client); err != nil { + t.Errorf("failed to prep before getting API service IP: %v", err) + } + apiServiceIP, err := GetAPIServiceIP(test.client) + if err == nil && test.wantErr { + t.Errorf("expected an error, but got none") + } + if err != nil && !test.wantErr { + t.Errorf("unexpected error, got: %v", err) + } + if err != nil && test.wantErr && !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("expected error message %s to be in %s", test.errMsg, err.Error()) + } + if apiServiceIP != test.wantAPIServiceIP { + t.Errorf("expected API service IP %s, but got %s", test.wantAPIServiceIP, apiServiceIP) + } + }) + } +} diff --git a/operator/pkg/util/kubeconfig.go b/operator/pkg/util/kubeconfig.go index 6fb65b7adade..792fe89c3bf8 100644 --- a/operator/pkg/util/kubeconfig.go +++ b/operator/pkg/util/kubeconfig.go @@ -61,13 +61,18 @@ func CreateBasic(serverURL, clusterName, userName string, caCert []byte) *client } } -// IsInCluster returns a bool represents whether the remote cluster is the local or not. +// IsInCluster checks if the specified host cluster is the local cluster. +// It returns true if: +// - the hostCluster is nil; +// - or its SecretRef is nil; +// - or the SecretRef's Name is an empty string. +// This indicates that the remote cluster is either not configured or not identifiable as the local cluster. func IsInCluster(hostCluster *operatorv1alpha1.HostCluster) bool { return hostCluster == nil || hostCluster.SecretRef == nil || len(hostCluster.SecretRef.Name) == 0 } // BuildClientFromSecretRef builds a clientset from the secret reference. -func BuildClientFromSecretRef(client *clientset.Clientset, ref *operatorv1alpha1.LocalSecretReference) (*clientset.Clientset, error) { +func BuildClientFromSecretRef(client clientset.Interface, ref *operatorv1alpha1.LocalSecretReference) (clientset.Interface, error) { secret, err := client.CoreV1().Secrets(ref.Namespace).Get(context.TODO(), ref.Name, metav1.GetOptions{}) if err != nil { return nil, err @@ -81,7 +86,7 @@ func BuildClientFromSecretRef(client *clientset.Clientset, ref *operatorv1alpha1 return newClientSetForConfig(kubeconfigBytes) } -func newClientSetForConfig(kubeconfig []byte) (*clientset.Clientset, error) { +func newClientSetForConfig(kubeconfig []byte) (clientset.Interface, error) { clientConfig, err := clientcmd.NewClientConfigFromBytes(kubeconfig) if err != nil { return nil, err diff --git a/operator/pkg/util/kubeconfig_test.go b/operator/pkg/util/kubeconfig_test.go new file mode 100644 index 000000000000..d2f914c6ac9f --- /dev/null +++ b/operator/pkg/util/kubeconfig_test.go @@ -0,0 +1,252 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "context" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/base64" + "encoding/pem" + "errors" + "fmt" + "math/big" + "strings" + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + clientset "k8s.io/client-go/kubernetes" + fakeclientset "k8s.io/client-go/kubernetes/fake" + coretesting "k8s.io/client-go/testing" + + operatorv1alpha1 "github.com/karmada-io/karmada/operator/pkg/apis/operator/v1alpha1" +) + +func TestBuildClientFromSecretRef(t *testing.T) { + name, namespace := "test-secret", "test" + token := "my-sample-token" + kubeconfig := ` +apiVersion: v1 +clusters: +- cluster: + server: https://127.0.0.1:6443 + certificate-authority-data: %s + name: test-cluster +contexts: +- context: + cluster: test-cluster + user: test-user + name: test-context +current-context: test-context +kind: Config +preferences: {} +users: +- name: test-user + user: + token: %s +` + tests := []struct { + name string + client clientset.Interface + ref *operatorv1alpha1.LocalSecretReference + wantErr bool + prep func(clientset.Interface) error + errMsg string + }{ + { + name: "BuildClientFromSecretRef_GotNetworkIssue_FailedToBuildClient", + client: fakeclientset.NewSimpleClientset(), + ref: &operatorv1alpha1.LocalSecretReference{ + Name: name, + Namespace: namespace, + }, + prep: func(client clientset.Interface) error { + client.(*fakeclientset.Clientset).Fake.PrependReactor("get", "secrets", func(coretesting.Action) (bool, runtime.Object, error) { + return true, nil, errors.New("unexpected error: encountered a network issue while getting the secrets") + }) + return nil + }, + wantErr: true, + errMsg: "unexpected error: encountered a network issue while getting the secrets", + }, + { + name: "BuildClientFromSecretRef_WithoutKubeConfig_KubeConfigIsNotFound", + client: fakeclientset.NewSimpleClientset(), + ref: &operatorv1alpha1.LocalSecretReference{ + Name: name, + Namespace: namespace, + }, + prep: func(client clientset.Interface) error { + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Data: map[string][]byte{}, + } + _, err := client.CoreV1().Secrets(namespace).Create(context.TODO(), secret, metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("failed to create secret %s in %s namespace, got err: %v", name, namespace, err) + } + return nil + }, + wantErr: true, + errMsg: "the kubeconfig or data key 'kubeconfig' is not found", + }, + { + name: "BuildClientFromSecretRef_WithKubeconfig_ClientIsBuilt", + client: fakeclientset.NewSimpleClientset(), + ref: &operatorv1alpha1.LocalSecretReference{ + Name: name, + Namespace: namespace, + }, + prep: func(client clientset.Interface) error { + // Generate kubeconfig bytes. + caCert, err := generateTestCACertificate() + if err != nil { + return fmt.Errorf("failed to generate CA certificate: %v", err) + } + base64CACert := base64.StdEncoding.EncodeToString([]byte(caCert)) + base64Token := base64.StdEncoding.EncodeToString([]byte(token)) + kubeconfigBytes := []byte(fmt.Sprintf(kubeconfig, base64CACert, base64Token)) + + // Create secret with kubeconfig data. + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Data: map[string][]byte{"kubeconfig": kubeconfigBytes}, + } + _, err = client.CoreV1().Secrets(namespace).Create(context.TODO(), secret, metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("failed to create secret %s in %s namespace, got err: %v", name, namespace, err) + } + return nil + }, + wantErr: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if err := test.prep(test.client); err != nil { + t.Errorf("failed to prep before building client from secret ref: %v", err) + } + _, err := BuildClientFromSecretRef(test.client, test.ref) + if err == nil && test.wantErr { + t.Errorf("expected an error, but got none") + } + if err != nil && !test.wantErr { + t.Errorf("unexpected error, got: %v", err) + } + if err != nil && test.wantErr && !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("expceted error message %s to be in %s", test.errMsg, err.Error()) + } + }) + } +} + +func TestIsInCluster(t *testing.T) { + tests := []struct { + name string + hostCluster *operatorv1alpha1.HostCluster + want bool + }{ + { + name: "IsInCluster_WithoutHostCluster_ItIsLocal", + hostCluster: nil, + want: true, + }, + { + name: "IsInCluster_WithoutSecretRef_ItIsLocal", + hostCluster: &operatorv1alpha1.HostCluster{ + SecretRef: nil, + }, + want: true, + }, + { + name: "IsInCluster_WithoutSecretRefName_ItIsLocal", + hostCluster: &operatorv1alpha1.HostCluster{ + SecretRef: &operatorv1alpha1.LocalSecretReference{ + Name: "", + }, + }, + want: true, + }, + { + name: "IsInCluster_WithAllRemoteClusterConfigurations_ItIsRemote", + hostCluster: &operatorv1alpha1.HostCluster{ + SecretRef: &operatorv1alpha1.LocalSecretReference{ + Name: "remote-secret", + Namespace: "test", + }, + }, + want: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if got := IsInCluster(test.hostCluster); got != test.want { + t.Errorf("expected host cluster local status to be %t, but got %t", test.want, got) + } + }) + } +} + +// generateTestCACertificate returns a self-signed CA certificate as a PEM string. +func generateTestCACertificate() (string, error) { + // Generate a new RSA private key + priv, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return "", err + } + + // Set the certificate parameters. + notBefore := time.Now() + notAfter := notBefore.Add(365 * 24 * time.Hour) + + serialNumber, err := rand.Int(rand.Reader, big.NewInt(1<<62)) + if err != nil { + return "", err + } + + cert := &x509.Certificate{ + SerialNumber: serialNumber, + NotBefore: notBefore, + NotAfter: notAfter, + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, + BasicConstraintsValid: true, + IsCA: true, + } + + // Create the certificate. + certDER, err := x509.CreateCertificate(rand.Reader, cert, cert, &priv.PublicKey, priv) + if err != nil { + return "", err + } + + // PEM encode the certificate. + certPEM := &pem.Block{Type: "CERTIFICATE", Bytes: certDER} + certPEMData := pem.EncodeToMemory(certPEM) + + return string(certPEMData), nil +} diff --git a/operator/pkg/util/naming_test.go b/operator/pkg/util/naming_test.go new file mode 100644 index 000000000000..42aafb4727a8 --- /dev/null +++ b/operator/pkg/util/naming_test.go @@ -0,0 +1,46 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import "testing" + +func TestGenerateResourceName(t *testing.T) { + tests := []struct { + name string + component string + suffix string + want string + }{ + { + name: "GenerateResourceName_WithKarmada_SuffixImmediatelyAfter", + component: "karmada-demo", + suffix: "search", + want: "karmada-demo-search", + }, + { + name: "GenerateResourceName_WithoutKarmada_KarmadaInTheMiddle", + component: "test-demo", + suffix: "search", + want: "test-demo-karmada-search", + }, + } + for _, test := range tests { + if got := generateResourceName(test.component, test.suffix); got != test.want { + t.Errorf("expected resource name generated to be %s, but got %s", test.want, got) + } + } +} diff --git a/operator/pkg/util/patcher/pather.go b/operator/pkg/util/patcher/pather.go index b6211576c4f1..9c883c32aaee 100644 --- a/operator/pkg/util/patcher/pather.go +++ b/operator/pkg/util/patcher/pather.go @@ -35,12 +35,14 @@ import ( // Patcher defines multiple variables that need to be patched. type Patcher struct { - labels map[string]string - annotations map[string]string - extraArgs map[string]string - featureGates map[string]bool - volume *operatorv1alpha1.VolumeData - resources corev1.ResourceRequirements + labels map[string]string + annotations map[string]string + extraArgs map[string]string + extraVolumes []corev1.Volume + extraVolumeMounts []corev1.VolumeMount + featureGates map[string]bool + volume *operatorv1alpha1.VolumeData + resources corev1.ResourceRequirements } // NewPatcher returns a patcher. @@ -66,6 +68,18 @@ func (p *Patcher) WithExtraArgs(extraArgs map[string]string) *Patcher { return p } +// WithExtraVolumes sets extra volumes for the patcher. +func (p *Patcher) WithExtraVolumes(extraVolumes []corev1.Volume) *Patcher { + p.extraVolumes = extraVolumes + return p +} + +// WithExtraVolumeMounts sets extra volume mounts for the patcher. +func (p *Patcher) WithExtraVolumeMounts(extraVolumeMounts []corev1.VolumeMount) *Patcher { + p.extraVolumeMounts = extraVolumeMounts + return p +} + // WithFeatureGates sets featureGates to the patcher. func (p *Patcher) WithFeatureGates(featureGates map[string]bool) *Patcher { p.featureGates = featureGates @@ -122,6 +136,10 @@ func (p *Patcher) ForDeployment(deployment *appsv1.Deployment) { command = append(command, buildArgumentListFromMap(argsMap, overrideArgs)...) deployment.Spec.Template.Spec.Containers[0].Command = command } + // Add extra volumes and volume mounts + // First container in the pod is expected to contain the Karmada component + deployment.Spec.Template.Spec.Volumes = append(deployment.Spec.Template.Spec.Volumes, p.extraVolumes...) + deployment.Spec.Template.Spec.Containers[0].VolumeMounts = append(deployment.Spec.Template.Spec.Containers[0].VolumeMounts, p.extraVolumeMounts...) } // ForStatefulSet patches the statefulset manifest. diff --git a/operator/pkg/util/patcher/pather_test.go b/operator/pkg/util/patcher/pather_test.go new file mode 100644 index 000000000000..6a5f3acb85a1 --- /dev/null +++ b/operator/pkg/util/patcher/pather_test.go @@ -0,0 +1,521 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package patcher + +import ( + "reflect" + "testing" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" + + "github.com/karmada-io/karmada/operator/pkg/apis/operator/v1alpha1" + "github.com/karmada-io/karmada/operator/pkg/constants" +) + +func TestPatchForDeployment(t *testing.T) { + tests := []struct { + name string + patcher *Patcher + deployment *appsv1.Deployment + want *appsv1.Deployment + }{ + { + name: "PatchForDeployment_WithLabelsAndAnnotations_Patched", + patcher: &Patcher{ + labels: map[string]string{ + "label1": "value1-patched", + }, + annotations: map[string]string{ + "annotation1": "annot1-patched", + }, + }, + deployment: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + Namespace: "test", + Labels: map[string]string{ + "label1": "value1", + }, + }, + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "annotation1": "annot1", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test-container", + Image: "nginx:latest", + }, + }, + }, + }, + }, + }, + want: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + Namespace: "test", + Labels: map[string]string{ + "label1": "value1-patched", + }, + Annotations: map[string]string{ + "annotation1": "annot1-patched", + }, + }, + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "label1": "value1-patched", + }, + Annotations: map[string]string{ + "annotation1": "annot1-patched", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test-container", + Image: "nginx:latest", + }, + }, + }, + }, + }, + }, + }, + { + name: "PatchForDeployment_WithResourcesExtraArgsAndFeatureGates_Patched", + patcher: &Patcher{ + extraArgs: map[string]string{ + "some-arg": "some-value", + }, + featureGates: map[string]bool{ + "SomeGate": true, + }, + resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("500m"), + corev1.ResourceMemory: resource.MustParse("128Mi"), + }, + }, + }, + deployment: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + Namespace: "test", + }, + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test-container", + Image: "nginx:latest", + Command: []string{ + "/bin/bash", + "--feature-gates=OldGate=false", + }, + }, + }, + }, + }, + }, + }, + want: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + Namespace: "test", + Labels: map[string]string{}, + Annotations: map[string]string{}, + }, + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{}, + Annotations: map[string]string{}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test-container", + Image: "nginx:latest", + Command: []string{ + "/bin/bash", + "--feature-gates=OldGate=false,SomeGate=true", + "--some-arg=some-value", + }, + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("500m"), + corev1.ResourceMemory: resource.MustParse("128Mi"), + }, + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "PatchForDeployment_WithExtraVolumesAndVolumeMounts_Patched", + patcher: &Patcher{ + extraVolumes: []corev1.Volume{ + { + Name: "extra-volume", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, + }, + extraVolumeMounts: []corev1.VolumeMount{ + { + Name: "extra-volume", + MountPath: "/extra/path", + }, + }, + }, + deployment: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + Namespace: "test", + }, + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test-container", + Image: "nginx:latest", + }, + }, + }, + }, + }, + }, + want: &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + Namespace: "test", + Labels: map[string]string{}, + Annotations: map[string]string{}, + }, + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{}, + Annotations: map[string]string{}, + }, + Spec: corev1.PodSpec{ + Volumes: []corev1.Volume{ + { + Name: "extra-volume", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, + }, + Containers: []corev1.Container{ + { + Name: "test-container", + Image: "nginx:latest", + VolumeMounts: []corev1.VolumeMount{ + { + Name: "extra-volume", + MountPath: "/extra/path", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + test.patcher.ForDeployment(test.deployment) + if !reflect.DeepEqual(test.deployment, test.want) { + t.Errorf("unexpected err, expected deployment %v but got %v", test.deployment, test.want) + } + }) + } +} + +func TestPatchForStatefulSet(t *testing.T) { + tests := []struct { + name string + patcher *Patcher + statefulSet *appsv1.StatefulSet + want *appsv1.StatefulSet + }{ + { + name: "PatchForStatefulSet_WithLabelsAndAnnotations_Patched", + patcher: &Patcher{ + labels: map[string]string{ + "label1": "value1-patched", + }, + annotations: map[string]string{ + "annotation1": "annot1-patched", + }, + }, + statefulSet: &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-statefulset", + Namespace: "test", + Labels: map[string]string{ + "label1": "value1", + }, + }, + Spec: appsv1.StatefulSetSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{}, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "nginx", + Image: "nginx:latest", + }, + }, + }, + }, + }, + }, + want: &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-statefulset", + Namespace: "test", + Labels: map[string]string{ + "label1": "value1-patched", + }, + Annotations: map[string]string{ + "annotation1": "annot1-patched", + }, + }, + Spec: appsv1.StatefulSetSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "label1": "value1-patched", + }, + Annotations: map[string]string{ + "annotation1": "annot1-patched", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "nginx", + Image: "nginx:latest", + }, + }, + }, + }, + }, + }, + }, + { + name: "PatchForStatefulSet_WithVolumes_Patched", + patcher: &Patcher{ + volume: &v1alpha1.VolumeData{ + EmptyDir: &corev1.EmptyDirVolumeSource{ + Medium: corev1.StorageMediumMemory, + SizeLimit: &resource.Quantity{}, + }, + HostPath: &corev1.HostPathVolumeSource{ + Path: "/tmp", + Type: ptr.To(corev1.HostPathDirectory), + }, + VolumeClaim: &corev1.PersistentVolumeClaimTemplate{ + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1024m"), + }, + }, + }, + }, + }, + }, + statefulSet: &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-statefulset", + Namespace: "test", + }, + Spec: appsv1.StatefulSetSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{}, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test-container", + Image: "nginx:latest", + }, + }, + Volumes: []corev1.Volume{}, + }, + }, + }, + }, + want: &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-statefulset", + Namespace: "test", + Labels: map[string]string{}, + Annotations: map[string]string{}, + }, + Spec: appsv1.StatefulSetSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{}, + Annotations: map[string]string{}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test-container", + Image: "nginx:latest", + }, + }, + Volumes: []corev1.Volume{ + { + Name: constants.EtcdDataVolumeName, + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, + { + Name: constants.EtcdDataVolumeName, + VolumeSource: corev1.VolumeSource{ + HostPath: &corev1.HostPathVolumeSource{ + Path: "/tmp", + Type: ptr.To(corev1.HostPathDirectory), + }, + }, + }, + }, + }, + }, + VolumeClaimTemplates: []corev1.PersistentVolumeClaim{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: constants.EtcdDataVolumeName, + }, + Spec: corev1.PersistentVolumeClaimSpec{ + AccessModes: []corev1.PersistentVolumeAccessMode{ + corev1.ReadWriteOnce, + }, + Resources: corev1.VolumeResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceStorage: resource.MustParse("1024m"), + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "PatchForStatefulSet_WithResourcesAndExtraArgs_Patched", + patcher: &Patcher{ + extraArgs: map[string]string{ + "some-arg": "some-value", + }, + resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("500m"), + corev1.ResourceMemory: resource.MustParse("128Mi"), + }, + }, + }, + statefulSet: &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-statefulset", + Namespace: "test", + }, + Spec: appsv1.StatefulSetSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{}, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test-container", + Image: "nginx:latest", + }, + }, + }, + }, + }, + }, + want: &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-statefulset", + Namespace: "test", + Labels: map[string]string{}, + Annotations: map[string]string{}, + }, + Spec: appsv1.StatefulSetSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{}, + Annotations: map[string]string{}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test-container", + Image: "nginx:latest", + Command: []string{ + "--some-arg=some-value", + }, + Resources: corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("500m"), + corev1.ResourceMemory: resource.MustParse("128Mi"), + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + test.patcher.ForStatefulSet(test.statefulSet) + if !reflect.DeepEqual(test.statefulSet, test.want) { + t.Errorf("unexpected err, expected statefulset %v but got %v", test.statefulSet, test.want) + } + }) + } +} diff --git a/operator/pkg/util/template_test.go b/operator/pkg/util/template_test.go new file mode 100644 index 000000000000..9cbf75d4d689 --- /dev/null +++ b/operator/pkg/util/template_test.go @@ -0,0 +1,82 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "bytes" + "strings" + "testing" +) + +func TestParseTemplate(t *testing.T) { + tests := []struct { + name string + template string + args interface{} + want []byte + wantErr bool + errMsg string + }{ + { + name: "ParseTemplate_WithMissingVariable_ParsingFailed", + template: "Hello, {{.Name}}!", + args: struct{ Missing string }{Missing: "World"}, + want: nil, + wantErr: true, + errMsg: "error when executing template", + }, + { + name: "ParseTemplate_WithInvalidTemplateSyntax_ParsingFailed", + template: "Hello, {{.Name!", + args: struct{ Name string }{Name: "World"}, + want: nil, + wantErr: true, + errMsg: "error when parsing template", + }, + { + name: "ParseTemplate_ValidTemplateWithVariable_ParsingSucceeded", + template: "Hello, {{.Name}}!", + args: struct{ Name string }{Name: "World"}, + want: []byte("Hello, World!"), + wantErr: false, + }, + { + name: "ParseTemplate_EmptyTemplate_ParsingSucceeded", + template: "", + args: nil, + want: []byte(""), + wantErr: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + got, err := ParseTemplate(test.template, test.args) + if err == nil && test.wantErr { + t.Errorf("expected an error, but got none") + } + if err != nil && !test.wantErr { + t.Errorf("unexpected error, got: %v", err) + } + if err != nil && test.wantErr && !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("expected error message %s to be in %s", test.errMsg, err.Error()) + } + if !bytes.Equal(got, test.want) { + t.Errorf("expected parsed template bytes to be %v, but got %v", test.want, got) + } + }) + } +} diff --git a/operator/pkg/util/util.go b/operator/pkg/util/util.go index 6214527211c5..aa7205e6f5d6 100644 --- a/operator/pkg/util/util.go +++ b/operator/pkg/util/util.go @@ -25,16 +25,33 @@ import ( "net/http" "os" "path/filepath" + "reflect" "regexp" "strings" "time" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" "k8s.io/klog/v2" "sigs.k8s.io/yaml" + operatorv1alpha1 "github.com/karmada-io/karmada/operator/pkg/apis/operator/v1alpha1" + "github.com/karmada-io/karmada/operator/pkg/workflow" "github.com/karmada-io/karmada/pkg/util" ) +var ( + // ClientFactory creates a new Kubernetes clientset from the provided kubeconfig. + ClientFactory = func(kubeconfig *rest.Config) (clientset.Interface, error) { + return clientset.NewForConfig(kubeconfig) + } + + // BuildClientFromSecretRefFactory constructs a Kubernetes clientset using a LocalSecretReference. + BuildClientFromSecretRefFactory = func(client clientset.Interface, ref *operatorv1alpha1.LocalSecretReference) (clientset.Interface, error) { + return BuildClientFromSecretRef(client, ref) + } +) + // Downloader Download progress type Downloader struct { io.Reader @@ -58,11 +75,12 @@ func (d *Downloader) Read(p []byte) (n int, err error) { return } +var httpClient = http.Client{ + Timeout: 60 * time.Second, +} + // DownloadFile Download files via URL func DownloadFile(url, filePath string) error { - httpClient := http.Client{ - Timeout: 60 * time.Second, - } resp, err := httpClient.Get(url) if err != nil { return err @@ -70,7 +88,7 @@ func DownloadFile(url, filePath string) error { defer resp.Body.Close() if resp.StatusCode != 200 { - return fmt.Errorf("failed download file. url: %s code: %v", url, resp.StatusCode) + return fmt.Errorf("failed to download file. url: %s code: %v", url, resp.StatusCode) } file, err := os.OpenFile(filePath, os.O_CREATE|os.O_RDWR, util.DefaultFilePerm) @@ -222,3 +240,85 @@ func ReplaceYamlForReg(path, destResource string, reg *regexp.Regexp) ([]byte, e repl := reg.ReplaceAllString(string(data), destResource) return yaml.YAMLToJSON([]byte(repl)) } + +// ContainAllTasks checks if all tasks in the subset are present in the tasks slice. +// Returns an error if any subset task is not found; nil otherwise. +func ContainAllTasks(tasks, subset []workflow.Task) error { + for _, subsetTask := range subset { + found := false + for _, task := range tasks { + found = DeepEqualTasks(task, subsetTask) == nil + if found { + break + } + } + if !found { + return fmt.Errorf("subset task %v not found in tasks", subsetTask) + } + } + return nil +} + +// DeepEqualTasks checks if two workflow.Task instances are deeply equal. +// It returns an error if they differ, or nil if they are equal. +// The comparison includes the task name, RunSubTasks flag, +// and the length and contents of the Tasks slice. +// Function references and behavior are not compared; only the values +// of the specified fields are considered. Any differences are detailed +// in the returned error. +func DeepEqualTasks(t1, t2 workflow.Task) error { + if t1.Name != t2.Name { + return fmt.Errorf("expected t1 name %s, but got %s", t2.Name, t1.Name) + } + + if t1.RunSubTasks != t2.RunSubTasks { + return fmt.Errorf("expected t1 RunSubTasks flag %t, but got %t", t2.RunSubTasks, t1.RunSubTasks) + } + + if len(t1.Tasks) != len(t2.Tasks) { + return fmt.Errorf("expected t1 tasks length %d, but got %d", len(t2.Tasks), len(t1.Tasks)) + } + + for index := range t1.Tasks { + err := DeepEqualTasks(t1.Tasks[index], t2.Tasks[index]) + if err != nil { + return fmt.Errorf("unexpected error; tasks are not equal at index %d: %v", index, err) + } + } + + return nil +} + +// ContainsAllValues checks if all values in the 'values' slice exist in the 'container' slice or array. +func ContainsAllValues(container interface{}, values interface{}) bool { + // Ensure the provided container is a slice or array. + vContainer := reflect.ValueOf(container) + if vContainer.Kind() != reflect.Slice && vContainer.Kind() != reflect.Array { + return false + } + + // Ensure the provided values are a slice or array. + vValues := reflect.ValueOf(values) + if vValues.Kind() != reflect.Slice && vValues.Kind() != reflect.Array { + return false + } + + // Iterate over the 'values' and ensure each value exists in the container. + for i := 0; i < vValues.Len(); i++ { + value := vValues.Index(i).Interface() + found := false + // Check if this value exists in the container. + for j := 0; j < vContainer.Len(); j++ { + if reflect.DeepEqual(vContainer.Index(j).Interface(), value) { + found = true + break + } + } + // If any value is not found, return false. + if !found { + return false + } + } + // If all values were found, return true. + return true +} diff --git a/operator/pkg/util/util_test.go b/operator/pkg/util/util_test.go index 4758b6268e73..9edc46e0292f 100644 --- a/operator/pkg/util/util_test.go +++ b/operator/pkg/util/util_test.go @@ -17,10 +17,309 @@ limitations under the License. package util import ( + "archive/tar" + "bytes" + "compress/gzip" + "errors" + "fmt" + "io" + "net/http" + "os" + "path/filepath" "strings" "testing" + "time" + + "k8s.io/utils/ptr" ) +// mockReader is a simple io.Reader that returns an error after being called. +type mockReader struct { + data []byte + err error +} + +func (m *mockReader) Read(p []byte) (n int, err error) { + if m.data == nil { + return 0, m.err + } + n = copy(p, m.data) + m.data = m.data[n:] + return n, m.err +} + +type mockRoundTripper struct { + response *http.Response + err error +} + +func (m *mockRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { + if m.err != nil { + return nil, m.err + } + return m.response, nil +} + +func TestRead(t *testing.T) { + tests := []struct { + name string + downloader *Downloader + data string + prep func(downloader *Downloader, data string) error + wantErr bool + errMsg string + }{ + { + name: "Read_FailedToReadFromDataSource_ReadFailed", + downloader: &Downloader{ + Reader: &mockReader{ + err: errors.New("unexpected read error"), + }, + }, + prep: func(*Downloader, string) error { + return nil + }, + wantErr: true, + errMsg: "unexpected read error", + }, + { + name: "Read_FailedToReadWithEOF_ReadFailed", + downloader: &Downloader{ + Reader: &mockReader{ + err: io.EOF, + }, + }, + prep: func(*Downloader, string) error { + return nil + }, + wantErr: true, + errMsg: "EOF", + }, + { + name: "Read_FromValidDataSource_ReadSucceeded", + downloader: &Downloader{ + Current: 3, + Total: 10, + }, + data: "test data", + prep: func(downloader *Downloader, data string) error { + downloader.Reader = &mockReader{data: []byte(data)} + return nil + }, + wantErr: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if err := test.prep(test.downloader, test.data); err != nil { + t.Fatalf("failed to prep before reading the data, got: %v", err) + } + buffer := test.downloader.Reader.(*mockReader).data + _, err := test.downloader.Read(buffer) + if err == nil && test.wantErr { + t.Fatal("expected an error, but got none") + } + if err != nil && !test.wantErr { + t.Errorf("unexpected error, got: %v", err) + } + if err != nil && test.wantErr && !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("expected error message %s to be in %s", test.errMsg, err.Error()) + } + if string(buffer) != test.data { + t.Errorf("expected read buffer data to be %s, but got %s", test.data, string(buffer)) + } + }) + } +} + +func TestDownloadFile(t *testing.T) { + tests := []struct { + name string + url string + filePath string + prep func(url, filePath string) error + verify func(filePath string) error + wantErr bool + errMsg string + }{ + { + name: "DownloadFile_UrlIsNotFound_FailedToGetResponse", + url: "not-found-url", + prep: func(url, _ string) error { + httpClient = http.Client{ + Transport: &mockRoundTripper{ + err: fmt.Errorf("failed to get url %s, url is not found", url), + }, + Timeout: time.Second, + } + return nil + }, + verify: func(string) error { return nil }, + wantErr: true, + errMsg: "failed to get url not-found-url, url is not found", + }, + { + name: "DownloadFile_ServiceIsUnavailable_FailedToReachTheService", + url: "https://www.example.com/test-file", + prep: func(_, _ string) error { + httpClient = http.Client{ + Transport: &mockRoundTripper{ + response: &http.Response{ + StatusCode: http.StatusServiceUnavailable, + }, + }, + Timeout: time.Second, + } + return nil + }, + verify: func(string) error { return nil }, + wantErr: true, + errMsg: "failed to download file", + }, + { + name: "DownloadFile_FileDownlaoded_", + url: "https://www.example.com/test-file", + filePath: filepath.Join(os.TempDir(), "temp-download-file.txt"), + prep: func(_, filePath string) error { + // Create temp download filepath. + tempFile, err := os.Create(filePath) + if err != nil { + return fmt.Errorf("failed to create temp download file: %w", err) + } + defer tempFile.Close() + + // Create HTTP client. + httpClient = http.Client{ + Transport: &mockRoundTripper{ + response: &http.Response{ + StatusCode: http.StatusOK, + Body: io.NopCloser(bytes.NewReader([]byte("Hello, World!"))), + ContentLength: int64(len("Hello, World!")), + }, + }, + Timeout: time.Second, + } + + return nil + }, + verify: func(filePath string) error { + // Read the content of the downloaded file. + content, err := os.ReadFile(filePath) + if err != nil { + return fmt.Errorf("failed to read file: %w", err) + } + + // Verify the content of the file. + expected := "Hello, World!" + if string(content) != expected { + return fmt.Errorf("unexpected file content: got %q, want %q", string(content), expected) + } + + if err := os.Remove(filePath); err != nil { + return fmt.Errorf("failed to clean up %s", filePath) + } + + return nil + }, + wantErr: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if err := test.prep(test.url, test.filePath); err != nil { + t.Fatalf("failed to prep before downloading the file, got: %v", err) + } + err := DownloadFile(test.url, test.filePath) + if err == nil && test.wantErr { + t.Fatal("expected an error, but got none") + } + if err != nil && !test.wantErr { + t.Errorf("unexpected error, got: %v", err) + } + if err != nil && test.wantErr && !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("expected error message %s to be in %s", test.errMsg, err.Error()) + } + if err := test.verify(test.filePath); err != nil { + t.Errorf("failed to verify the actual of download of file: %v", err) + } + }) + } +} + +func TestUnpack(t *testing.T) { + tests := []struct { + name string + tarFile string + regularFile string + targetPath *string + prep func(tarFile, regularFile string, targetPath *string) error + verify func(regularFile string, targetPath string) error + wantErr bool + errMsg string + }{ + { + name: "Unpack_InvalidGzipFileHeader_InvalidHeader", + tarFile: "invalid.tar.gz", + targetPath: ptr.To(""), + prep: func(tarFile, _ string, targetPath *string) error { + var err error + *targetPath, err = os.MkdirTemp("", "test-unpack-*") + if err != nil { + return err + } + f, err := os.Create(filepath.Join(*targetPath, tarFile)) + if err != nil { + return err + } + defer f.Close() + _, err = f.WriteString("Invalid gzip content") + return err + }, + verify: func(_, targetPath string) error { + return os.RemoveAll(targetPath) + }, + wantErr: true, + errMsg: gzip.ErrHeader.Error(), + }, + { + name: "Unpack_ValidTarGzipped_UnpackedSuccessfully", + tarFile: "valid.tar.gz", + regularFile: "test-file.txt", + targetPath: ptr.To(""), + prep: verifyValidTarGzipped, + verify: func(regularFile string, targetPath string) error { + fileExpected := filepath.Join(targetPath, "test", regularFile) + _, err := os.Stat(fileExpected) + if err != nil { + return fmt.Errorf("failed to find the file %s, got error: %v", fileExpected, err) + } + return os.RemoveAll(targetPath) + }, + wantErr: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if err := test.prep(test.tarFile, test.regularFile, test.targetPath); err != nil { + t.Fatalf("failed to prep before unpacking the tar file: %v", err) + } + err := Unpack(filepath.Join(*test.targetPath, test.tarFile), *test.targetPath) + if err == nil && test.wantErr { + t.Fatal("expected an error, but got none") + } + if err != nil && !test.wantErr { + t.Errorf("unexpected error, got: %v", err) + } + if err != nil && test.wantErr && !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("expected error message %s to be in %s", test.errMsg, err.Error()) + } + if err := test.verify(test.regularFile, *test.targetPath); err != nil { + t.Errorf("failed to verify unpacking process, got: %v", err) + } + }) + } +} + func TestListFileWithSuffix(t *testing.T) { suffix := ".yaml" files := ListFileWithSuffix(".", suffix) @@ -35,3 +334,52 @@ func TestListFileWithSuffix(t *testing.T) { } } } + +// verifyValidTarGzipped creates a tar.gz file in a temporary directory. +// The archive contains a "test" directory and a file with the specified name, +// containing the message "Hello, World!". +func verifyValidTarGzipped(tarFile, regularFile string, targetPath *string) error { + var err error + *targetPath, err = os.MkdirTemp("", "test-unpack-*") + if err != nil { + return err + } + f, err := os.Create(filepath.Join(*targetPath, tarFile)) + if err != nil { + return err + } + defer f.Close() + + // Create a gzip writer. + gw := gzip.NewWriter(f) + defer gw.Close() + + // Create a tar writer. + tw := tar.NewWriter(gw) + defer tw.Close() + + // Add a directory to the tar. + if err := tw.WriteHeader(&tar.Header{ + Name: "test" + string(filepath.Separator), + Typeflag: tar.TypeDir, + Mode: 0755, + }); err != nil { + return err + } + + // Add a file to the tar. + message := "Hello, World!" + if err := tw.WriteHeader(&tar.Header{ + Name: filepath.Join("test", regularFile), + Mode: 0644, + Size: int64(len(message)), + Typeflag: tar.TypeReg, + }); err != nil { + return err + } + if _, err := tw.Write([]byte(message)); err != nil { + return err + } + + return nil +} diff --git a/operator/pkg/workflow/job_test.go b/operator/pkg/workflow/job_test.go new file mode 100644 index 000000000000..30f2656b7a5c --- /dev/null +++ b/operator/pkg/workflow/job_test.go @@ -0,0 +1,163 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workflow + +import ( + "fmt" + "reflect" + "strings" + "testing" +) + +type TestRunData struct { + name string +} + +func TestInitData(t *testing.T) { + tests := []struct { + name string + job *Job + wantRunData RunData + wantErr bool + errMsg string + }{ + { + name: "InitData_NoRunDataIsInitialized_InitializeRunDataWithError", + job: &Job{ + runDataInitializer: func() (RunData, error) { + return nil, fmt.Errorf("failed to initialize run data") + }, + runData: nil, + }, + wantRunData: nil, + wantErr: true, + errMsg: "failed to initialize run data", + }, + { + name: "InitData_NoRunDataIsInitialized_InitializeRunData", + job: &Job{ + runDataInitializer: func() (RunData, error) { + return &TestRunData{name: "test"}, nil + }, + runData: nil, + }, + wantRunData: &TestRunData{name: "test"}, + wantErr: false, + }, + { + name: "InitData_RunDataIsAlreadyInitialized_InitializeRunData", + job: &Job{ + runDataInitializer: func() (RunData, error) { + return &TestRunData{name: "test"}, nil + }, + runData: &TestRunData{name: "already-exist"}, + }, + wantRunData: &TestRunData{name: "already-exist"}, + wantErr: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + runData, err := test.job.initData() + if err == nil && test.wantErr { + t.Errorf("expected an error, but got none") + } + if err != nil && !test.wantErr { + t.Errorf("unexpected error: %v", err) + } + if err != nil && test.wantErr && !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("expected error msg %s to be part of %s", test.errMsg, err.Error()) + } + if !reflect.DeepEqual(runData, test.wantRunData) { + t.Errorf("unmatched run data equality, expected %v but got %v", test.wantRunData, runData) + } + }) + } +} + +func TestRun(t *testing.T) { + tests := []struct { + name string + job *Job + wantRunData RunData + wantErr bool + errMsg string + }{ + { + name: "Run_NilRunData_RunDataIsInitialized", + job: &Job{ + runDataInitializer: func() (RunData, error) { + return nil, fmt.Errorf("failed to initialize run data") + }, + runData: nil, + }, + wantErr: true, + errMsg: "failed to initialize run data", + }, + { + name: "Run_Task_TaskRunSuccessfully", + job: &Job{ + runDataInitializer: func() (RunData, error) { + return &TestRunData{name: "test"}, nil + }, + runData: nil, + Tasks: []Task{ + { + Name: "SkipRunningTask", + Skip: func(RunData) (bool, error) { + return true, nil + }, + }, + { + Name: "RunSubTask", + Run: func(RunData) error { + return nil + }, + Tasks: []Task{ + { + Name: "RunSubTask_2", + Run: func(RunData) error { + return nil + }, + }, + }, + RunSubTasks: true, + }, + }, + }, + wantRunData: &TestRunData{name: "test"}, + wantErr: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := test.job.Run() + if err == nil && test.wantErr { + t.Errorf("expected an error, but got none") + } + if err != nil && !test.wantErr { + t.Errorf("unexpected error: %v", err) + } + if err != nil && test.wantErr && !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("expected error msg %s to be part of %s", test.errMsg, err.Error()) + } + if !reflect.DeepEqual(test.job.runData, test.wantRunData) { + t.Errorf("unmatched run data equality, expected %v but got %v", test.wantRunData, test.job.runData) + } + }) + } +} diff --git a/pkg/aggregatedapiserver/apiserver.go b/pkg/aggregatedapiserver/apiserver.go index 5770c01b1985..2e31bb8d2ee8 100644 --- a/pkg/aggregatedapiserver/apiserver.go +++ b/pkg/aggregatedapiserver/apiserver.go @@ -17,7 +17,9 @@ limitations under the License. package aggregatedapiserver import ( + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/version" + "k8s.io/apiserver/pkg/registry/generic" "k8s.io/apiserver/pkg/registry/rest" genericapiserver "k8s.io/apiserver/pkg/server" listcorev1 "k8s.io/client-go/listers/core/v1" @@ -70,6 +72,13 @@ func (cfg *Config) Complete() CompletedConfig { return CompletedConfig{&c} } +var newClusterStorageBuilder = func(scheme *runtime.Scheme, restConfig *restclient.Config, secretLister listcorev1.SecretLister, optsGetter generic.RESTOptionsGetter) (*clusterstorage.ClusterStorage, error) { + return clusterstorage.NewStorage(scheme, restConfig, secretLister, optsGetter) +} +var apiGroupInstaller = func(server *APIServer, apiGroupInfo *genericapiserver.APIGroupInfo) error { + return server.GenericAPIServer.InstallAPIGroup(apiGroupInfo) +} + func (c completedConfig) New(restConfig *restclient.Config, secretLister listcorev1.SecretLister) (*APIServer, error) { genericServer, err := c.GenericConfig.New("aggregated-apiserver", genericapiserver.NewEmptyDelegate()) if err != nil { @@ -82,7 +91,7 @@ func (c completedConfig) New(restConfig *restclient.Config, secretLister listcor apiGroupInfo := genericapiserver.NewDefaultAPIGroupInfo(clusterapis.GroupName, clusterscheme.Scheme, clusterscheme.ParameterCodec, clusterscheme.Codecs) - clusterStorage, err := clusterstorage.NewStorage(clusterscheme.Scheme, restConfig, secretLister, c.GenericConfig.RESTOptionsGetter) + clusterStorage, err := newClusterStorageBuilder(clusterscheme.Scheme, restConfig, secretLister, c.GenericConfig.RESTOptionsGetter) if err != nil { klog.Errorf("Unable to create REST storage for a resource due to %v, will die", err) return nil, err @@ -93,7 +102,7 @@ func (c completedConfig) New(restConfig *restclient.Config, secretLister listcor v1alpha1cluster["clusters/proxy"] = clusterStorage.Proxy apiGroupInfo.VersionedResourcesStorageMap["v1alpha1"] = v1alpha1cluster - if err = server.GenericAPIServer.InstallAPIGroup(&apiGroupInfo); err != nil { + if err = apiGroupInstaller(server, &apiGroupInfo); err != nil { return nil, err } diff --git a/pkg/aggregatedapiserver/apiserver_test.go b/pkg/aggregatedapiserver/apiserver_test.go new file mode 100644 index 000000000000..3fd292ca159e --- /dev/null +++ b/pkg/aggregatedapiserver/apiserver_test.go @@ -0,0 +1,166 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package aggregatedapiserver + +import ( + "errors" + "net" + "net/http" + "strings" + "testing" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apiserver/pkg/endpoints/openapi" + "k8s.io/apiserver/pkg/registry/generic" + genericapiserver "k8s.io/apiserver/pkg/server" + "k8s.io/client-go/informers" + clientset "k8s.io/client-go/kubernetes" + fakeclientset "k8s.io/client-go/kubernetes/fake" + listcorev1 "k8s.io/client-go/listers/core/v1" + restclient "k8s.io/client-go/rest" + + clusterscheme "github.com/karmada-io/karmada/pkg/apis/cluster/scheme" + generatedopenapi "github.com/karmada-io/karmada/pkg/generated/openapi" + clusterstorage "github.com/karmada-io/karmada/pkg/registry/cluster/storage" +) + +func TestNewAggregatedAPIServer(t *testing.T) { + tests := []struct { + name string + cfg *completedConfig + genericAPIServerConfig *genericapiserver.Config + restConfig *restclient.Config + secretLister listcorev1.SecretLister + client clientset.Interface + prep func(*completedConfig, *genericapiserver.Config, clientset.Interface) error + wantErr bool + errMsg string + }{ + { + name: "NewAggregatedAPIServer_NetworkIssue_FailedToCreateRESTStorage", + cfg: &completedConfig{ + ExtraConfig: &ExtraConfig{}, + }, + genericAPIServerConfig: &genericapiserver.Config{ + RESTOptionsGetter: generic.RESTOptions{}, + Serializer: runtime.NewSimpleNegotiatedSerializer(runtime.SerializerInfo{ + MediaType: runtime.ContentTypeJSON, + }), + LoopbackClientConfig: &restclient.Config{}, + EquivalentResourceRegistry: runtime.NewEquivalentResourceRegistry(), + BuildHandlerChainFunc: func(http.Handler, *genericapiserver.Config) (secure http.Handler) { + return nil + }, + ExternalAddress: "10.0.0.0:10000", + }, + client: fakeclientset.NewSimpleClientset(), + prep: func(cfg *completedConfig, genericAPIServerCfg *genericapiserver.Config, client clientset.Interface) error { + sharedInformer := informers.NewSharedInformerFactory(client, 0) + cfg.GenericConfig = genericAPIServerCfg.Complete(sharedInformer) + newClusterStorageBuilder = func(*runtime.Scheme, *restclient.Config, listcorev1.SecretLister, generic.RESTOptionsGetter) (*clusterstorage.ClusterStorage, error) { + return nil, errors.New("unexpected network issue while creating the cluster storage") + } + return nil + }, + wantErr: true, + errMsg: "unexpected network issue while creating the cluster storage", + }, + { + name: "NewAggregatedAPIServer_InstalledAPIGroup_FailedToInstallAPIGroup", + cfg: &completedConfig{ + ExtraConfig: &ExtraConfig{}, + }, + genericAPIServerConfig: &genericapiserver.Config{ + RESTOptionsGetter: generic.RESTOptions{}, + Serializer: runtime.NewSimpleNegotiatedSerializer(runtime.SerializerInfo{ + MediaType: runtime.ContentTypeJSON, + }), + LoopbackClientConfig: &restclient.Config{}, + EquivalentResourceRegistry: runtime.NewEquivalentResourceRegistry(), + BuildHandlerChainFunc: func(http.Handler, *genericapiserver.Config) (secure http.Handler) { + return nil + }, + OpenAPIV3Config: genericapiserver.DefaultOpenAPIV3Config(generatedopenapi.GetOpenAPIDefinitions, openapi.NewDefinitionNamer(clusterscheme.Scheme)), + ExternalAddress: "10.0.0.0:10000", + }, + prep: func(cfg *completedConfig, genericAPIServerCfg *genericapiserver.Config, client clientset.Interface) error { + sharedInformer := informers.NewSharedInformerFactory(client, 0) + cfg.GenericConfig = genericAPIServerCfg.Complete(sharedInformer) + newClusterStorageBuilder = func(*runtime.Scheme, *restclient.Config, listcorev1.SecretLister, generic.RESTOptionsGetter) (*clusterstorage.ClusterStorage, error) { + return &clusterstorage.ClusterStorage{}, nil + } + apiGroupInstaller = func(*APIServer, *genericapiserver.APIGroupInfo) error { + return errors.New("failed to install api group") + } + return nil + }, + wantErr: true, + errMsg: "failed to install api group", + }, + { + name: "NewAggregatedAPIServer_InstalledAPIGroup_APIGroupInstalled", + cfg: &completedConfig{ + ExtraConfig: &ExtraConfig{}, + }, + genericAPIServerConfig: &genericapiserver.Config{ + RESTOptionsGetter: generic.RESTOptions{}, + SecureServing: &genericapiserver.SecureServingInfo{ + Listener: &net.TCPListener{}, + }, + Serializer: runtime.NewSimpleNegotiatedSerializer(runtime.SerializerInfo{ + MediaType: runtime.ContentTypeJSON, + }), + LoopbackClientConfig: &restclient.Config{}, + EquivalentResourceRegistry: runtime.NewEquivalentResourceRegistry(), + BuildHandlerChainFunc: func(http.Handler, *genericapiserver.Config) (secure http.Handler) { + return nil + }, + OpenAPIV3Config: genericapiserver.DefaultOpenAPIV3Config(generatedopenapi.GetOpenAPIDefinitions, openapi.NewDefinitionNamer(clusterscheme.Scheme)), + ExternalAddress: "10.0.0.0:10000", + }, + prep: func(cfg *completedConfig, genericAPIServerCfg *genericapiserver.Config, client clientset.Interface) error { + sharedInformer := informers.NewSharedInformerFactory(client, 0) + cfg.GenericConfig = genericAPIServerCfg.Complete(sharedInformer) + newClusterStorageBuilder = func(*runtime.Scheme, *restclient.Config, listcorev1.SecretLister, generic.RESTOptionsGetter) (*clusterstorage.ClusterStorage, error) { + return &clusterstorage.ClusterStorage{}, nil + } + apiGroupInstaller = func(*APIServer, *genericapiserver.APIGroupInfo) error { + return nil + } + return nil + }, + wantErr: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if err := test.prep(test.cfg, test.genericAPIServerConfig, test.client); err != nil { + t.Fatalf("failed to prep test environment before creating new aggregated apiserver, got: %v", err) + } + _, err := test.cfg.New(test.restConfig, test.secretLister) + if err == nil && test.wantErr { + t.Fatal("expected an error, but got none") + } + if err != nil && !test.wantErr { + t.Errorf("unexpected error, got: %v", err) + } + if err != nil && test.wantErr && !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("expected error message %s to be in %s", test.errMsg, err.Error()) + } + }) + } +} diff --git a/pkg/apis/cluster/types.go b/pkg/apis/cluster/types.go index 05d783cb5283..f7f8aea3df97 100644 --- a/pkg/apis/cluster/types.go +++ b/pkg/apis/cluster/types.go @@ -266,6 +266,9 @@ type LocalSecretReference struct { const ( // ClusterConditionReady means the cluster is healthy and ready to accept workloads. ClusterConditionReady = "Ready" + + // ClusterConditionCompleteAPIEnablements indicates whether the cluster's API enablements(.status.apiEnablements) is complete. + ClusterConditionCompleteAPIEnablements = "CompleteAPIEnablements" ) // ClusterStatus contains information about the current status of a diff --git a/pkg/apis/cluster/v1alpha1/types.go b/pkg/apis/cluster/v1alpha1/types.go index 29cfc61da0fc..fe56dca7da94 100644 --- a/pkg/apis/cluster/v1alpha1/types.go +++ b/pkg/apis/cluster/v1alpha1/types.go @@ -278,6 +278,9 @@ type LocalSecretReference struct { const ( // ClusterConditionReady means the cluster is healthy and ready to accept workloads. ClusterConditionReady = "Ready" + + // ClusterConditionCompleteAPIEnablements indicates whether the cluster's API enablements(.status.apiEnablements) is complete. + ClusterConditionCompleteAPIEnablements = "CompleteAPIEnablements" ) // ClusterStatus contains information about the current status of a diff --git a/pkg/apis/config/v1alpha1/resourceinterpreterwebhook_types.go b/pkg/apis/config/v1alpha1/resourceinterpreterwebhook_types.go index eb3e067d2ca9..d1edef1262cf 100644 --- a/pkg/apis/config/v1alpha1/resourceinterpreterwebhook_types.go +++ b/pkg/apis/config/v1alpha1/resourceinterpreterwebhook_types.go @@ -99,7 +99,7 @@ type RuleWithOperations struct { type InterpreterOperation string const ( - // InterpreterOperationAll indicates math all InterpreterOperation. + // InterpreterOperationAll indicates matching all InterpreterOperation. InterpreterOperationAll InterpreterOperation = "*" // InterpreterOperationInterpretReplica indicates that karmada want to figure out the replica declaration of a specific object. diff --git a/pkg/apis/policy/v1alpha1/override_types.go b/pkg/apis/policy/v1alpha1/override_types.go index c57e20381662..6c8b3d47bd86 100644 --- a/pkg/apis/policy/v1alpha1/override_types.go +++ b/pkg/apis/policy/v1alpha1/override_types.go @@ -101,6 +101,7 @@ type RuleWithCluster struct { // - ArgsOverrider // - LabelsOverrider // - AnnotationsOverrider +// - FieldOverrider // - Plaintext type Overriders struct { // Plaintext represents override rules defined with plaintext overriders. @@ -126,6 +127,13 @@ type Overriders struct { // AnnotationsOverrider represents the rules dedicated to handling workload annotations // +optional AnnotationsOverrider []LabelAnnotationOverrider `json:"annotationsOverrider,omitempty"` + + // FieldOverrider represents the rules dedicated to modifying a specific field in any Kubernetes resource. + // This allows changing a single field within the resource with multiple operations. + // It is designed to handle structured field values such as those found in ConfigMaps or Secrets. + // The current implementation supports JSON and YAML formats, but can easily be extended to support XML in the future. + // +optional + FieldOverrider []FieldOverrider `json:"fieldOverrider,omitempty"` } // LabelAnnotationOverrider represents the rules dedicated to handling workload labels/annotations @@ -255,6 +263,65 @@ const ( OverriderOpReplace OverriderOperator = "replace" ) +// FieldOverrider represents the rules dedicated to modifying a specific field in any Kubernetes resource. +// This allows changing a single field within the resource with multiple operations. +// It is designed to handle structured field values such as those found in ConfigMaps or Secrets. +// The current implementation supports JSON and YAML formats, but can easily be extended to support XML in the future. +// Note: In any given instance, FieldOverrider processes either JSON or YAML fields, but not both simultaneously. +type FieldOverrider struct { + // FieldPath specifies the initial location in the instance document where the operation should take place. + // The path uses RFC 6901 for navigating into nested structures. For example, the path "/data/db-config.yaml" + // specifies the configuration data key named "db-config.yaml" in a ConfigMap: "/data/db-config.yaml". + // +required + FieldPath string `json:"fieldPath"` + + // JSON represents the operations performed on the JSON document specified by the FieldPath. + // +optional + JSON []JSONPatchOperation `json:"json,omitempty"` + + // YAML represents the operations performed on the YAML document specified by the FieldPath. + // +optional + YAML []YAMLPatchOperation `json:"yaml,omitempty"` +} + +// JSONPatchOperation represents a single field modification operation for JSON format. +type JSONPatchOperation struct { + // SubPath specifies the relative location within the initial FieldPath where the operation should take place. + // The path uses RFC 6901 for navigating into nested structures. + // +required + SubPath string `json:"subPath"` + + // Operator indicates the operation on target field. + // Available operators are: "add", "remove", and "replace". + // +kubebuilder:validation:Enum=add;remove;replace + // +required + Operator OverriderOperator `json:"operator"` + + // Value is the new value to set for the specified field if the operation is "add" or "replace". + // For "remove" operation, this field is ignored. + // +optional + Value apiextensionsv1.JSON `json:"value,omitempty"` +} + +// YAMLPatchOperation represents a single field modification operation for YAML format. +type YAMLPatchOperation struct { + // SubPath specifies the relative location within the initial FieldPath where the operation should take place. + // The path uses RFC 6901 for navigating into nested structures. + // +required + SubPath string `json:"subPath"` + + // Operator indicates the operation on target field. + // Available operators are: "add", "remove", and "replace". + // +kubebuilder:validation:Enum=add;remove;replace + // +required + Operator OverriderOperator `json:"operator"` + + // Value is the new value to set for the specified field if the operation is "add" or "replace". + // For "remove" operation, this field is ignored. + // +optional + Value apiextensionsv1.JSON `json:"value,omitempty"` +} + // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // OverridePolicyList is a collection of OverridePolicy. diff --git a/pkg/apis/policy/v1alpha1/propagation_types.go b/pkg/apis/policy/v1alpha1/propagation_types.go index ee6a38a85867..60be208a4ffc 100644 --- a/pkg/apis/policy/v1alpha1/propagation_types.go +++ b/pkg/apis/policy/v1alpha1/propagation_types.go @@ -181,6 +181,25 @@ type PropagationSpec struct { // nil means no suspension. no default values. // +optional Suspension *Suspension `json:"suspension,omitempty"` + + // PreserveResourcesOnDeletion controls whether resources should be preserved on the + // member clusters when the resource template is deleted. + // If set to true, resources will be preserved on the member clusters. + // Default is false, which means resources will be deleted along with the resource template. + // + // This setting is particularly useful during workload migration scenarios to ensure + // that rollback can occur quickly without affecting the workloads running on the + // member clusters. + // + // Additionally, this setting applies uniformly across all member clusters and will not + // selectively control preservation on only some clusters. + // + // Note: This setting does not apply to the deletion of the policy itself. + // When the policy is deleted, the resource templates and their corresponding + // propagated resources in member clusters will remain unchanged unless explicitly deleted. + // + // +optional + PreserveResourcesOnDeletion *bool `json:"preserveResourcesOnDeletion,omitempty"` } // ResourceSelector the resources will be selected. diff --git a/pkg/apis/policy/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/policy/v1alpha1/zz_generated.deepcopy.go index 764fa323ef39..c1683072ed54 100644 --- a/pkg/apis/policy/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/policy/v1alpha1/zz_generated.deepcopy.go @@ -453,6 +453,36 @@ func (in *FederatedResourceQuotaStatus) DeepCopy() *FederatedResourceQuotaStatus return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FieldOverrider) DeepCopyInto(out *FieldOverrider) { + *out = *in + if in.JSON != nil { + in, out := &in.JSON, &out.JSON + *out = make([]JSONPatchOperation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.YAML != nil { + in, out := &in.YAML, &out.YAML + *out = make([]YAMLPatchOperation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldOverrider. +func (in *FieldOverrider) DeepCopy() *FieldOverrider { + if in == nil { + return nil + } + out := new(FieldOverrider) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FieldSelector) DeepCopyInto(out *FieldSelector) { *out = *in @@ -513,6 +543,23 @@ func (in *ImagePredicate) DeepCopy() *ImagePredicate { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JSONPatchOperation) DeepCopyInto(out *JSONPatchOperation) { + *out = *in + in.Value.DeepCopyInto(&out.Value) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JSONPatchOperation. +func (in *JSONPatchOperation) DeepCopy() *JSONPatchOperation { + if in == nil { + return nil + } + out := new(JSONPatchOperation) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LabelAnnotationOverrider) DeepCopyInto(out *LabelAnnotationOverrider) { *out = *in @@ -677,6 +724,13 @@ func (in *Overriders) DeepCopyInto(out *Overriders) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.FieldOverrider != nil { + in, out := &in.FieldOverrider, &out.FieldOverrider + *out = make([]FieldOverrider, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -843,6 +897,11 @@ func (in *PropagationSpec) DeepCopyInto(out *PropagationSpec) { *out = new(Suspension) (*in).DeepCopyInto(*out) } + if in.PreserveResourcesOnDeletion != nil { + in, out := &in.PreserveResourcesOnDeletion, &out.PreserveResourcesOnDeletion + *out = new(bool) + **out = **in + } return } @@ -1022,3 +1081,20 @@ func (in *Suspension) DeepCopy() *Suspension { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *YAMLPatchOperation) DeepCopyInto(out *YAMLPatchOperation) { + *out = *in + in.Value.DeepCopyInto(&out.Value) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new YAMLPatchOperation. +func (in *YAMLPatchOperation) DeepCopy() *YAMLPatchOperation { + if in == nil { + return nil + } + out := new(YAMLPatchOperation) + in.DeepCopyInto(out) + return out +} diff --git a/pkg/apis/work/v1alpha1/work_types.go b/pkg/apis/work/v1alpha1/work_types.go index f1e03ee3e79c..4f136df06648 100644 --- a/pkg/apis/work/v1alpha1/work_types.go +++ b/pkg/apis/work/v1alpha1/work_types.go @@ -60,9 +60,17 @@ type WorkSpec struct { // SuspendDispatching controls whether dispatching should // be suspended, nil means not suspend. - // Note: true means stop propagating to all clusters. + // Note: true means stop propagating to the corresponding member cluster, and + // does not prevent status collection. // +optional SuspendDispatching *bool `json:"suspendDispatching,omitempty"` + + // PreserveResourcesOnDeletion controls whether resources should be preserved on the + // member cluster when the Work object is deleted. + // If set to true, resources will be preserved on the member cluster. + // Default is false, which means resources will be deleted along with the Work object. + // +optional + PreserveResourcesOnDeletion *bool `json:"preserveResourcesOnDeletion,omitempty"` } // WorkloadTemplate represents the manifest workload to be deployed on managed cluster. diff --git a/pkg/apis/work/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/work/v1alpha1/zz_generated.deepcopy.go index 824f379202d6..1adbecfdf79d 100644 --- a/pkg/apis/work/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/work/v1alpha1/zz_generated.deepcopy.go @@ -386,6 +386,11 @@ func (in *WorkSpec) DeepCopyInto(out *WorkSpec) { *out = new(bool) **out = **in } + if in.PreserveResourcesOnDeletion != nil { + in, out := &in.PreserveResourcesOnDeletion, &out.PreserveResourcesOnDeletion + *out = new(bool) + **out = **in + } return } diff --git a/pkg/apis/work/v1alpha2/binding_types.go b/pkg/apis/work/v1alpha2/binding_types.go index 25952770af22..7797fb3c60fb 100644 --- a/pkg/apis/work/v1alpha2/binding_types.go +++ b/pkg/apis/work/v1alpha2/binding_types.go @@ -151,6 +151,14 @@ type ResourceBindingSpec struct { // nil means no suspension. no default values. // +optional Suspension *policyv1alpha1.Suspension `json:"suspension,omitempty"` + + // PreserveResourcesOnDeletion controls whether resources should be preserved on the + // member clusters when the binding object is deleted. + // If set to true, resources will be preserved on the member clusters. + // Default is false, which means resources will be deleted along with the binding object. + // This setting applies to all Work objects created under this binding object. + // +optional + PreserveResourcesOnDeletion *bool `json:"preserveResourcesOnDeletion,omitempty"` } // ObjectReference contains enough information to locate the referenced object inside current cluster. diff --git a/pkg/apis/work/v1alpha2/zz_generated.deepcopy.go b/pkg/apis/work/v1alpha2/zz_generated.deepcopy.go index 826fea036ee4..824a7d4f5a08 100644 --- a/pkg/apis/work/v1alpha2/zz_generated.deepcopy.go +++ b/pkg/apis/work/v1alpha2/zz_generated.deepcopy.go @@ -353,6 +353,11 @@ func (in *ResourceBindingSpec) DeepCopyInto(out *ResourceBindingSpec) { *out = new(v1alpha1.Suspension) (*in).DeepCopyInto(*out) } + if in.PreserveResourcesOnDeletion != nil { + in, out := &in.PreserveResourcesOnDeletion, &out.PreserveResourcesOnDeletion + *out = new(bool) + **out = **in + } return } diff --git a/pkg/controllers/applicationfailover/crb_application_failover_controller.go b/pkg/controllers/applicationfailover/crb_application_failover_controller.go index 257772902289..1a806a1fd1d0 100644 --- a/pkg/controllers/applicationfailover/crb_application_failover_controller.go +++ b/pkg/controllers/applicationfailover/crb_application_failover_controller.go @@ -45,7 +45,7 @@ import ( "github.com/karmada-io/karmada/pkg/util/helper" ) -// CRBApplicationFailoverControllerName is the controller name that will be used when reporting events. +// CRBApplicationFailoverControllerName is the controller name that will be used when reporting events and metrics. const CRBApplicationFailoverControllerName = "cluster-resource-binding-application-failover-controller" // CRBApplicationFailoverController is to sync ClusterResourceBinding's application failover behavior. @@ -230,6 +230,7 @@ func (c *CRBApplicationFailoverController) SetupWithManager(mgr controllerruntim } return controllerruntime.NewControllerManagedBy(mgr). + Named(CRBApplicationFailoverControllerName). For(&workv1alpha2.ClusterResourceBinding{}, builder.WithPredicates(clusterResourceBindingPredicateFn)). WithOptions(controller.Options{RateLimiter: ratelimiterflag.DefaultControllerRateLimiter(c.RateLimiterOptions)}). Complete(c) diff --git a/pkg/controllers/applicationfailover/rb_application_failover_controller.go b/pkg/controllers/applicationfailover/rb_application_failover_controller.go index bbb3ccf9e419..63e1cc2f379d 100644 --- a/pkg/controllers/applicationfailover/rb_application_failover_controller.go +++ b/pkg/controllers/applicationfailover/rb_application_failover_controller.go @@ -45,7 +45,7 @@ import ( "github.com/karmada-io/karmada/pkg/util/helper" ) -// RBApplicationFailoverControllerName is the controller name that will be used when reporting events. +// RBApplicationFailoverControllerName is the controller name that will be used when reporting events and metrics. const RBApplicationFailoverControllerName = "resource-binding-application-failover-controller" // RBApplicationFailoverController is to sync ResourceBinding's application failover behavior. @@ -232,6 +232,7 @@ func (c *RBApplicationFailoverController) SetupWithManager(mgr controllerruntime } return controllerruntime.NewControllerManagedBy(mgr). + Named(RBApplicationFailoverControllerName). For(&workv1alpha2.ResourceBinding{}, builder.WithPredicates(resourceBindingPredicateFn)). WithOptions(controller.Options{RateLimiter: ratelimiterflag.DefaultControllerRateLimiter(c.RateLimiterOptions)}). Complete(c) diff --git a/pkg/controllers/binding/binding_controller.go b/pkg/controllers/binding/binding_controller.go index 3b798ecf78d8..ae6359a5693c 100644 --- a/pkg/controllers/binding/binding_controller.go +++ b/pkg/controllers/binding/binding_controller.go @@ -49,7 +49,7 @@ import ( "github.com/karmada-io/karmada/pkg/util/overridemanager" ) -// ControllerName is the controller name that will be used when reporting events. +// ControllerName is the controller name that will be used when reporting events and metrics. const ControllerName = "binding-controller" // ResourceBindingController is to sync ResourceBinding. @@ -165,7 +165,9 @@ func (c *ResourceBindingController) removeOrphanWorks(ctx context.Context, bindi // SetupWithManager creates a controller and register to controller manager. func (c *ResourceBindingController) SetupWithManager(mgr controllerruntime.Manager) error { - return controllerruntime.NewControllerManagedBy(mgr).For(&workv1alpha2.ResourceBinding{}). + return controllerruntime.NewControllerManagedBy(mgr). + Named(ControllerName). + For(&workv1alpha2.ResourceBinding{}). WithEventFilter(predicate.GenerationChangedPredicate{}). Watches(&policyv1alpha1.OverridePolicy{}, handler.EnqueueRequestsFromMapFunc(c.newOverridePolicyFunc())). Watches(&policyv1alpha1.ClusterOverridePolicy{}, handler.EnqueueRequestsFromMapFunc(c.newOverridePolicyFunc())). diff --git a/pkg/controllers/binding/cluster_resource_binding_controller.go b/pkg/controllers/binding/cluster_resource_binding_controller.go index a897224ca174..e11fe868a498 100644 --- a/pkg/controllers/binding/cluster_resource_binding_controller.go +++ b/pkg/controllers/binding/cluster_resource_binding_controller.go @@ -49,7 +49,7 @@ import ( "github.com/karmada-io/karmada/pkg/util/overridemanager" ) -// ClusterResourceBindingControllerName is the controller name that will be used when reporting events. +// ClusterResourceBindingControllerName is the controller name that will be used when reporting events and metrics. const ClusterResourceBindingControllerName = "cluster-resource-binding-controller" // ClusterResourceBindingController is to sync ClusterResourceBinding. @@ -161,7 +161,9 @@ func (c *ClusterResourceBindingController) removeOrphanWorks(ctx context.Context // SetupWithManager creates a controller and register to controller manager. func (c *ClusterResourceBindingController) SetupWithManager(mgr controllerruntime.Manager) error { - return controllerruntime.NewControllerManagedBy(mgr).For(&workv1alpha2.ClusterResourceBinding{}). + return controllerruntime.NewControllerManagedBy(mgr). + Named(ClusterResourceBindingControllerName). + For(&workv1alpha2.ClusterResourceBinding{}). Watches(&policyv1alpha1.ClusterOverridePolicy{}, handler.EnqueueRequestsFromMapFunc(c.newOverridePolicyFunc())). WithEventFilter(predicate.GenerationChangedPredicate{}). WithOptions(controller.Options{RateLimiter: ratelimiterflag.DefaultControllerRateLimiter(c.RateLimiterOptions)}). diff --git a/pkg/controllers/binding/common.go b/pkg/controllers/binding/common.go index 52593cc7e0fa..3556989ed75e 100644 --- a/pkg/controllers/binding/common.go +++ b/pkg/controllers/binding/common.go @@ -48,6 +48,7 @@ func ensureWork( var replicas int32 var conflictResolutionInBinding policyv1alpha1.ConflictResolution var suspension *policyv1alpha1.Suspension + var preserveResourcesOnDeletion *bool switch scope { case apiextensionsv1.NamespaceScoped: bindingObj := binding.(*workv1alpha2.ResourceBinding) @@ -57,6 +58,7 @@ func ensureWork( replicas = bindingObj.Spec.Replicas conflictResolutionInBinding = bindingObj.Spec.ConflictResolution suspension = bindingObj.Spec.Suspension + preserveResourcesOnDeletion = bindingObj.Spec.PreserveResourcesOnDeletion case apiextensionsv1.ClusterScoped: bindingObj := binding.(*workv1alpha2.ClusterResourceBinding) targetClusters = bindingObj.Spec.Clusters @@ -65,6 +67,7 @@ func ensureWork( replicas = bindingObj.Spec.Replicas conflictResolutionInBinding = bindingObj.Spec.ConflictResolution suspension = bindingObj.Spec.Suspension + preserveResourcesOnDeletion = bindingObj.Spec.PreserveResourcesOnDeletion } targetClusters = mergeTargetClusters(targetClusters, requiredByBindingSnapshot) @@ -133,9 +136,14 @@ func ensureWork( Annotations: annotations, } - suspendDispatching := shouldSuspendDispatching(suspension, targetCluster) - - if err = helper.CreateOrUpdateWork(ctx, c, workMeta, clonedWorkload, &suspendDispatching); err != nil { + if err = helper.CreateOrUpdateWork( + ctx, + c, + workMeta, + clonedWorkload, + helper.WithSuspendDispatching(shouldSuspendDispatching(suspension, targetCluster)), + helper.WithPreserveResourcesOnDeletion(ptr.Deref(preserveResourcesOnDeletion, false)), + ); err != nil { return err } } diff --git a/pkg/controllers/certificate/cert_rotation_controller.go b/pkg/controllers/certificate/cert_rotation_controller.go index 9fe53a085622..d5c058c4d14c 100644 --- a/pkg/controllers/certificate/cert_rotation_controller.go +++ b/pkg/controllers/certificate/cert_rotation_controller.go @@ -50,7 +50,7 @@ import ( ) const ( - // CertRotationControllerName is the controller name that will be used when reporting events. + // CertRotationControllerName is the controller name that will be used when reporting events and metrics. CertRotationControllerName = "cert-rotation-controller" // SignerName defines the signer name for csr, 'kubernetes.io/kube-apiserver-client-kubelet' can sign the csr automatically @@ -118,7 +118,7 @@ func (c *CertRotationController) Reconcile(ctx context.Context, req controllerru return controllerruntime.Result{}, err } - if err = c.syncCertRotation(secret); err != nil { + if err = c.syncCertRotation(ctx, secret); err != nil { klog.Errorf("Failed to rotate the certificate of karmada-agent for the given member cluster: %s, err is: %v", cluster.Name, err) return controllerruntime.Result{}, err } @@ -129,6 +129,7 @@ func (c *CertRotationController) Reconcile(ctx context.Context, req controllerru // SetupWithManager creates a controller and register to controller manager. func (c *CertRotationController) SetupWithManager(mgr controllerruntime.Manager) error { return controllerruntime.NewControllerManagedBy(mgr). + Named(CertRotationControllerName). For(&clusterv1alpha1.Cluster{}, builder.WithPredicates(c.PredicateFunc)). WithEventFilter(predicate.GenerationChangedPredicate{}). WithOptions(controller.Options{ @@ -137,7 +138,7 @@ func (c *CertRotationController) SetupWithManager(mgr controllerruntime.Manager) Complete(c) } -func (c *CertRotationController) syncCertRotation(secret *corev1.Secret) error { +func (c *CertRotationController) syncCertRotation(ctx context.Context, secret *corev1.Secret) error { karmadaKubeconfig, err := getKubeconfigFromSecret(secret) if err != nil { return err @@ -174,15 +175,15 @@ func (c *CertRotationController) syncCertRotation(secret *corev1.Secret) error { return fmt.Errorf("invalid private key for certificate request: %v", err) } - csr, err := c.createCSRInControlPlane(clusterName, privateKey, oldCert) + csr, err := c.createCSRInControlPlane(ctx, clusterName, privateKey, oldCert) if err != nil { return fmt.Errorf("failed to create csr in control plane, err is: %v", err) } var newCertData []byte klog.V(1).Infof("Waiting for the client certificate to be issued") - err = wait.PollUntilContextTimeout(context.TODO(), 1*time.Second, 5*time.Minute, false, func(context.Context) (done bool, err error) { - csr, err := c.KubeClient.CertificatesV1().CertificateSigningRequests().Get(context.TODO(), csr, metav1.GetOptions{}) + err = wait.PollUntilContextTimeout(ctx, 1*time.Second, 5*time.Minute, false, func(context.Context) (done bool, err error) { + csr, err := c.KubeClient.CertificatesV1().CertificateSigningRequests().Get(ctx, csr, metav1.GetOptions{}) if err != nil { return false, fmt.Errorf("failed to get the cluster csr %s. err: %v", clusterName, err) } @@ -210,7 +211,7 @@ func (c *CertRotationController) syncCertRotation(secret *corev1.Secret) error { secret.Data["karmada-kubeconfig"] = karmadaKubeconfigBytes // Update the karmada-kubeconfig secret in the member cluster. - if _, err := c.ClusterClient.KubeClient.CoreV1().Secrets(secret.Namespace).Update(context.TODO(), secret, metav1.UpdateOptions{}); err != nil { + if _, err := c.ClusterClient.KubeClient.CoreV1().Secrets(secret.Namespace).Update(ctx, secret, metav1.UpdateOptions{}); err != nil { return fmt.Errorf("unable to update secret, err: %w", err) } @@ -225,7 +226,7 @@ func (c *CertRotationController) syncCertRotation(secret *corev1.Secret) error { return nil } -func (c *CertRotationController) createCSRInControlPlane(clusterName string, privateKey interface{}, oldCert []*x509.Certificate) (string, error) { +func (c *CertRotationController) createCSRInControlPlane(ctx context.Context, clusterName string, privateKey interface{}, oldCert []*x509.Certificate) (string, error) { csrData, err := certutil.MakeCSR(privateKey, &oldCert[0].Subject, nil, nil) if err != nil { return "", fmt.Errorf("unable to generate certificate request: %v", err) @@ -252,7 +253,7 @@ func (c *CertRotationController) createCSRInControlPlane(clusterName string, pri }, } - _, err = c.KubeClient.CertificatesV1().CertificateSigningRequests().Create(context.TODO(), certificateSigningRequest, metav1.CreateOptions{}) + _, err = c.KubeClient.CertificatesV1().CertificateSigningRequests().Create(ctx, certificateSigningRequest, metav1.CreateOptions{}) if err != nil { return "", fmt.Errorf("unable to create certificate request in control plane: %v", err) } diff --git a/pkg/controllers/certificate/cert_rotation_controller_test.go b/pkg/controllers/certificate/cert_rotation_controller_test.go new file mode 100644 index 000000000000..9033eb541595 --- /dev/null +++ b/pkg/controllers/certificate/cert_rotation_controller_test.go @@ -0,0 +1,358 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package certificate + +import ( + "context" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "math/big" + "reflect" + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/fake" + controllerruntime "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + clientfake "sigs.k8s.io/controller-runtime/pkg/client/fake" + + clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1" + "github.com/karmada-io/karmada/pkg/util" + "github.com/karmada-io/karmada/pkg/util/gclient" +) + +// cert will expire on 2124-07-28T02:18:16Z +var testCA = `-----BEGIN CERTIFICATE----- +MIIDbTCCAlWgAwIBAgIUQkIQIcbPnAqI8ucdX54k1QhlzQ4wDQYJKoZIhvcNAQEL +BQAwNDEZMBcGA1UEAwwQa3ViZXJuZXRlcy1hZG1pbjEXMBUGA1UECgwOc3lzdGVt +Om1hc3RlcnMwIBcNMjQwODIxMDIxODE2WhgPMjEyNDA3MjgwMjE4MTZaMDQxGTAX +BgNVBAMMEGt1YmVybmV0ZXMtYWRtaW4xFzAVBgNVBAoMDnN5c3RlbTptYXN0ZXJz +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAqVWggRDZWNvCgNEMoncb +ASN3RNpZfA+cK4vNC/D2R9m5ATR/ZGt06aP3zWLkqc43MmwsLcFDy/wY+hB50/It +Zqjt5EaIl1jqZnRXuEXe5phq/fZICSM2vL9tt0JX9L9c5LedSWJwSZ8gvjpwQacK +SGMy+HM5lC5Ta3bOR98sTEyFG6Z8kX9KT2HgYsveShO242TRUSJPKW+xocJjxqL+ +GFHKoZp4D+yYkZ2dahHvPiSCxe9WDXKbpZRPTxNb/EMkJ6YOuU8N2QW44u9Lx1y7 +jIAPL6vGcUJeo2UhvyShGKzPrI1tGSWnjKxKCOv8rK5NPuhIXTXDHhTCDB4/r6xt +xQIDAQABo3UwczAdBgNVHQ4EFgQU3vrp4HTLqAPRBqVbo9CV53KjlQkwHwYDVR0j +BBgwFoAU3vrp4HTLqAPRBqVbo9CV53KjlQkwDgYDVR0PAQH/BAQDAgWgMBMGA1Ud +JQQMMAoGCCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwDQYJKoZIhvcNAQELBQADggEB +ADy6VgqstiGXTxV0VzyXtHqVkHX2GNl58HYwC8ti5uy/T7U/WhzOmjDMxoZonA+m +wzE25Dp1J7DN2y02skHoYMP8u0fsDBACWtXxFhwUja+De1CiEZCGhDUeMNS3ka2j +4z9Ow3yChanJXmR6n91hA5TGJ4uk9eFrQgKoLqZ/poRaoxj6861XKWiJS1Wvrz1g +fmbSjVIn4QFA9f611iwS/wGNHJ1dLUza9WuiQeOjculCqxqBl4+kQWmRBcmOkse2 ++KuZJMIMJfS2521AZO35EgXblA2BG1TgZZz6i3E0NMjzi+T1NIMwXawiWXDt4W/l +umubw9aqN/m5NUa3hZ6XmXQ= +-----END CERTIFICATE-----` + +func makeFakeCertRotationController(threshold float64) *CertRotationController { + client := clientfake.NewClientBuilder().WithScheme(gclient.NewSchema()).WithObjects( + &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Name: "test", Finalizers: []string{util.ClusterControllerFinalizer}}, + Spec: clusterv1alpha1.ClusterSpec{ + APIEndpoint: "https://127.0.0.1", + SecretRef: &clusterv1alpha1.LocalSecretReference{Namespace: "ns1", Name: "secret1"}, + }, + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + { + Type: clusterv1alpha1.ClusterConditionReady, + Status: metav1.ConditionTrue, + }, + }, + }, + }, + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Namespace: "ns1", Name: "secret1"}, + Data: map[string][]byte{clusterv1alpha1.SecretTokenKey: []byte("token"), clusterv1alpha1.SecretCADataKey: []byte(testCA)}, + }).Build() + return &CertRotationController{ + Client: client, + KubeClient: fake.NewSimpleClientset(), + CertRotationRemainingTimeThreshold: threshold, + ClusterClientSetFunc: util.NewClusterClientSet, + KarmadaKubeconfigNamespace: "karmada-system", + } +} + +func TestCertRotationController_Reconcile(t *testing.T) { + tests := []struct { + name string + cluster *clusterv1alpha1.Cluster + del bool + want controllerruntime.Result + wantErr bool + }{ + { + name: "cluster not found", + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test1", + }, + }, + want: controllerruntime.Result{}, + wantErr: false, + }, + { + name: "cluster is deleted", + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Finalizers: []string{util.ClusterControllerFinalizer}, + }, + }, + del: true, + want: controllerruntime.Result{}, + wantErr: false, + }, + { + name: "get secret failed", + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + }, + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := makeFakeCertRotationController(0) + if tt.del { + if err := c.Client.Delete(context.Background(), tt.cluster); err != nil { + t.Fatalf("delete cluster failed, error %v", err) + } + } + + req := controllerruntime.Request{NamespacedName: client.ObjectKey{Namespace: "", Name: tt.cluster.Name}} + got, err := c.Reconcile(context.Background(), req) + if (err != nil) != tt.wantErr { + t.Errorf("CertRotationController.Reconcile() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("CertRotationController.Reconcile() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestCertRotationController_syncCertRotation(t *testing.T) { + tests := []struct { + name string + secret *corev1.Secret + signer bool + threshold float64 + wantErr bool + }{ + { + name: "shoud not rotate cert", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Namespace: "ns1", Name: "secret"}, + Data: map[string][]byte{"karmada-kubeconfig": []byte(`apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUMvakNDQWVhZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJME1EZ3hOVEEyTVRZd01Gb1hEVE0wTURneE16QTJNVFl3TUZvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTndZCjU3blNJNDgwMUZIYmtYVUhpUldTTmV3UUxRTTZQbTB5YXArd1JXR2J3emU3US9rbjl0L2xBUWwxdW1aa2ZRalUKVHgyZHV6cFpXQkRndnAreTVBNndaUyt2VTVhSFY4dE1QRi9ocHRVczB1VW11YmQ2OEs4ZnNuREd6bnJwKzdpQwo3R2VyVzB2NDNTdnpqT0dibDQ2Nlp5cXFPRmt5VVhPQ1pVWFJMbWkyMVNrbS9iU2RFS3FDZXBtRDFNSEUwVyttCkJOOXBQeFJOU1dCZGNkSFVqR29odUUrUVBJQXlDWEtBdlNlWDBOZDd6Q1Ayd1dFRE5aSmxRS0REUnFUUHdDS3QKMW9TaDdEeWhvQ0l6clBtNENIcVNHSEJCNnVORmNEZjdpNGhVY09SdW5JMHlVUEsya2FDUmdqTkZKYkJLL29SNApoSFl0SFJwUkN3b244Q3A4dWRFQ0F3RUFBYU5aTUZjd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZESUZTYXhZNDc1WlZaTlp3dGdwOU1yeFBrU2ZNQlVHQTFVZEVRUU8KTUF5Q0NtdDFZbVZ5Ym1WMFpYTXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBSnY3Ymw1L2plVlFZZkxWblByKwpBelVYNmxXdXNHajQ0b09ma2xtZGZTTmxLU3ZGUjVRMi9rWkVQZXZnU3NzZVdIWnVucmZTYkIwVDdWYjdkUkNQCjVRMTk4aUNnZDFwNm0wdXdOVGpSRi85MHhzYUdVOHBQTFQxeHlrMDlCVDc0WVVENnNtOHFVSWVrWFU0U3hlU2oKWjk3VU13azVoZndXUWpqTFc1UklwNW1qZjR2aU1uWXB6SDB4bDREV3Jka1AxbTFCdkZvWmhFMEVaKzlWcGNPYwprNTN4ZkxUR3A2S1UrQ0w4RU5teXFCeTJNcVBXdjRQKzVTZ0hldlY3Ym1WdktuMkx0blExTHdCcDdsdldYb1JRCmUzQm83d3hnSUU0Rnl0VUU4enRaS2ZJSDZPY3VzNWJGY283cGw5ckhnK1lBMHM0Y0JldjZ2UlQwODkyYUpHYmUKZnFRPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + server: https://192.168.0.180:56016 + name: kind-member1 +contexts: +- context: + cluster: kind-member1 + user: kind-member1 + name: member1 +current-context: member1 +kind: Config +preferences: {} +users: +- name: kind-member1 + user: + ## cert will expire on 2124-07-28T02:18:16Z + client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURiVENDQWxXZ0F3SUJBZ0lVUWtJUUljYlBuQXFJOHVjZFg1NGsxUWhselE0d0RRWUpLb1pJaHZjTkFRRUwKQlFBd05ERVpNQmNHQTFVRUF3d1FhM1ZpWlhKdVpYUmxjeTFoWkcxcGJqRVhNQlVHQTFVRUNnd09jM2x6ZEdWdApPbTFoYzNSbGNuTXdJQmNOTWpRd09ESXhNREl4T0RFMldoZ1BNakV5TkRBM01qZ3dNakU0TVRaYU1EUXhHVEFYCkJnTlZCQU1NRUd0MVltVnlibVYwWlhNdFlXUnRhVzR4RnpBVkJnTlZCQW9NRG5ONWMzUmxiVHB0WVhOMFpYSnoKTUlJQklqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FROEFNSUlCQ2dLQ0FRRUFxVldnZ1JEWldOdkNnTkVNb25jYgpBU04zUk5wWmZBK2NLNHZOQy9EMlI5bTVBVFIvWkd0MDZhUDN6V0xrcWM0M01td3NMY0ZEeS93WStoQjUwL0l0ClpxanQ1RWFJbDFqcVpuUlh1RVhlNXBocS9mWklDU00ydkw5dHQwSlg5TDljNUxlZFNXSndTWjhndmpwd1FhY0sKU0dNeStITTVsQzVUYTNiT1I5OHNURXlGRzZaOGtYOUtUMkhnWXN2ZVNoTzI0MlRSVVNKUEtXK3hvY0pqeHFMKwpHRkhLb1pwNEQreVlrWjJkYWhIdlBpU0N4ZTlXRFhLYnBaUlBUeE5iL0VNa0o2WU91VThOMlFXNDR1OUx4MXk3CmpJQVBMNnZHY1VKZW8yVWh2eVNoR0t6UHJJMXRHU1duakt4S0NPdjhySzVOUHVoSVhUWERIaFRDREI0L3I2eHQKeFFJREFRQUJvM1V3Y3pBZEJnTlZIUTRFRmdRVTN2cnA0SFRMcUFQUkJxVmJvOUNWNTNLamxRa3dId1lEVlIwagpCQmd3Rm9BVTN2cnA0SFRMcUFQUkJxVmJvOUNWNTNLamxRa3dEZ1lEVlIwUEFRSC9CQVFEQWdXZ01CTUdBMVVkCkpRUU1NQW9HQ0NzR0FRVUZCd01DTUF3R0ExVWRFd0VCL3dRQ01BQXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUIKQUR5NlZncXN0aUdYVHhWMFZ6eVh0SHFWa0hYMkdObDU4SFl3Qzh0aTV1eS9UN1UvV2h6T21qRE14b1pvbkErbQp3ekUyNURwMUo3RE4yeTAyc2tIb1lNUDh1MGZzREJBQ1d0WHhGaHdVamErRGUxQ2lFWkNHaERVZU1OUzNrYTJqCjR6OU93M3lDaGFuSlhtUjZuOTFoQTVUR0o0dWs5ZUZyUWdLb0xxWi9wb1Jhb3hqNjg2MVhLV2lKUzFXdnJ6MWcKZm1iU2pWSW40UUZBOWY2MTFpd1Mvd0dOSEoxZExVemE5V3VpUWVPamN1bENxeHFCbDQra1FXbVJCY21Pa3NlMgorS3VaSk1JTUpmUzI1MjFBWk8zNUVnWGJsQTJCRzFUZ1paejZpM0UwTk1qemkrVDFOSU13WGF3aVdYRHQ0Vy9sCnVtdWJ3OWFxTi9tNU5VYTNoWjZYbVhRPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t + client-key-data: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2UUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktjd2dnU2pBZ0VBQW9JQkFRQ3BWYUNCRU5sWTI4S0EKMFF5aWR4c0JJM2RFMmxsOEQ1d3JpODBMOFBaSDJia0JOSDlrYTNUcG8vZk5ZdVNwempjeWJDd3R3VVBML0JqNgpFSG5UOGkxbXFPM2tSb2lYV09wbWRGZTRSZDdtbUdyOTlrZ0pJemE4djIyM1FsZjB2MXprdDUxSlluQkpueUMrCk9uQkJwd3BJWXpMNGN6bVVMbE5yZHM1SDN5eE1USVVicG55UmYwcFBZZUJpeTk1S0U3YmpaTkZSSWs4cGI3R2gKd21QR292NFlVY3FobW5nUDdKaVJuWjFxRWU4K0pJTEY3MVlOY3B1bGxFOVBFMXY4UXlRbnBnNjVUdzNaQmJqaQo3MHZIWEx1TWdBOHZxOFp4UWw2alpTRy9KS0VZck0rc2pXMFpKYWVNckVvSTYveXNyazArNkVoZE5jTWVGTUlNCkhqK3ZyRzNGQWdNQkFBRUNnZ0VBSTdLaGU1UUp2ZW5XUDBIUzRBMHI3RG1GMDBZVXgwcWpLYXIzTnlVOVJqaG8KQUJFSktpcGRJMFFsNFc2UHRoeDdGbTRuZ2gzVUpSU29UMDlaMzR5V2RhWDNRTUI5MnlvcmdCM1d3RW82aTNKbQpXOU9uckFWNGJLSU9oeXU5VHlOb2VlOGJnWFQzSnc0YzRQMkEzTlpTSEtDTkJrT0VSL0RjTlROK21UZzdKbnBDCnMvVmoyd2pibllQNmt6MVRTcEVjRksrb3NnYldXQ1AxVDFUeFRFN1k5VlBjbWhibzU5Lzdxc2EzaE8vUjgxRysKQ0VxU3U1emgrQmJvRFZHUStpZFV3OGtqUlhUS2MzWFBWb0R6SmR6cUtJVUYwTkc1Nm4wdGNoZkVEMUpWS09PSQp5a3REdjM1Qi9JWEkwYzk1UHpJN0crOWJvSHM1aW9BcUZKUlo2bllJQVFLQmdRRHNvTmJRc0p1U1pVcW9ZZTBLCkhERFpXS3F5NUM3Q1IvOFNXTXcxeFQzdnJVV2t2Wm5TZUJHUnpvYUEvZmRXQ3lGWHdBd1ZkS0xFSzFkdW5UWDEKUkQ4Zk9odFBDdTdiaXVvV2l4YlpMcGRPUXVzZlhRcDNHUWtoOUNIQVRPc1pML0tkMmxSd0F6dHpGNTZkVHRtdQplZFIxVENiTEVZK1A1Vzg0MXhjV2Y4OEh3UUtCZ1FDM01uaXBXY0xWNDBlS2xZMEg2alhBMDByWCs3V1U1M21RClFKNXAzbWlxSW5qRWlmV2ZQZHh4Y0hyVWRndzlpRk9pVUVvNENIQnNnam5wRU5wUjVhcUJpTzRWUFBuRXdXM2EKSmJ5eWdmRW4wREdBci9PdFpTTUF6OGN4NFVnZmZpSmZPSk8rZXRNemhDMXlMcFAwR05oM2UyRzlDZ0M3eVhDSQpGT1BvdWlzSEJRS0JnSGROT0VFTGFjUkxrWEtIdk0wR0haTFhZMmpDSnRrSkY0OFdlZzc2SFJvRUVFTFkzUDhDClRrbG5DT1ZzSmhHWmx2djQ5WjZ6cVlTaUhYakZobmpjS2I4Q3V0WUZPeHd4VTRoK0k4em44cDBnbkE2NkNCYTMKNXFUWncxS0M5VjFEa1YwSXdOMmdvNDZKY0F6N3ZrQjdhQ1NqZWtPVDNQKzl1Mis2OGdjRDlVdUJBb0dBUjkxdgp3aGRwUEJpZG52ck55VllTWWlOQkQvczVIMEd5eVdqZisrMzRwdzFBelBERnZ3TTRiL1BNNjMybmpaZm1IeDFhCkVDTVhYeW15NS8vcGRRa2dXeEpKTzJHaEpaTXZzY3p0K2lUSlluSGtpWFA4cG4rdlBJbEZ2Z1ovRVlPY25qZ0cKbFVsL2dvME9ldVZVdXdQb0h1N3l4NEtlQ1F5YnJYWnNkWVphakxVQ2dZRUEyeEhLenJ3T1FwMlNNbXlzOFR1bgpZbm54bzMwV0diQzNpYVJIZkQ0TGRvNC9iNHY2TWtkMG9wdXBoMzArQjduRkZoRjlWSVplbVdGWkMrcXNKWHdLCmNwOGMzdjBNZFJkeUtTcTJ4Ly9ZL2s1KytaeU5DSXRZdzdhRy91Wks4TlZyY3h0c2hXOXBobDFJTThXTjBUYmgKNmxwd2xWZGFha0RPY09ZNjE2clRGOXM9Ci0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0=`)}, + }, + threshold: 0, + signer: false, + wantErr: false, + }, + { + name: "update secret failed", + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Namespace: "ns1", Name: "secret"}, + Data: map[string][]byte{"karmada-kubeconfig": []byte(`apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUMvakNDQWVhZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJME1EZ3hOVEEyTVRZd01Gb1hEVE0wTURneE16QTJNVFl3TUZvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTndZCjU3blNJNDgwMUZIYmtYVUhpUldTTmV3UUxRTTZQbTB5YXArd1JXR2J3emU3US9rbjl0L2xBUWwxdW1aa2ZRalUKVHgyZHV6cFpXQkRndnAreTVBNndaUyt2VTVhSFY4dE1QRi9ocHRVczB1VW11YmQ2OEs4ZnNuREd6bnJwKzdpQwo3R2VyVzB2NDNTdnpqT0dibDQ2Nlp5cXFPRmt5VVhPQ1pVWFJMbWkyMVNrbS9iU2RFS3FDZXBtRDFNSEUwVyttCkJOOXBQeFJOU1dCZGNkSFVqR29odUUrUVBJQXlDWEtBdlNlWDBOZDd6Q1Ayd1dFRE5aSmxRS0REUnFUUHdDS3QKMW9TaDdEeWhvQ0l6clBtNENIcVNHSEJCNnVORmNEZjdpNGhVY09SdW5JMHlVUEsya2FDUmdqTkZKYkJLL29SNApoSFl0SFJwUkN3b244Q3A4dWRFQ0F3RUFBYU5aTUZjd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZESUZTYXhZNDc1WlZaTlp3dGdwOU1yeFBrU2ZNQlVHQTFVZEVRUU8KTUF5Q0NtdDFZbVZ5Ym1WMFpYTXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUJBSnY3Ymw1L2plVlFZZkxWblByKwpBelVYNmxXdXNHajQ0b09ma2xtZGZTTmxLU3ZGUjVRMi9rWkVQZXZnU3NzZVdIWnVucmZTYkIwVDdWYjdkUkNQCjVRMTk4aUNnZDFwNm0wdXdOVGpSRi85MHhzYUdVOHBQTFQxeHlrMDlCVDc0WVVENnNtOHFVSWVrWFU0U3hlU2oKWjk3VU13azVoZndXUWpqTFc1UklwNW1qZjR2aU1uWXB6SDB4bDREV3Jka1AxbTFCdkZvWmhFMEVaKzlWcGNPYwprNTN4ZkxUR3A2S1UrQ0w4RU5teXFCeTJNcVBXdjRQKzVTZ0hldlY3Ym1WdktuMkx0blExTHdCcDdsdldYb1JRCmUzQm83d3hnSUU0Rnl0VUU4enRaS2ZJSDZPY3VzNWJGY283cGw5ckhnK1lBMHM0Y0JldjZ2UlQwODkyYUpHYmUKZnFRPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + server: https://192.168.0.180:56016 + name: kind-member1 +contexts: +- context: + cluster: kind-member1 + user: kind-member1 + name: member1 +current-context: member1 +kind: Config +preferences: {} +users: +- name: kind-member1 + user: + ## cert will expire on 2124-07-28T02:18:16Z + client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURiVENDQWxXZ0F3SUJBZ0lVUWtJUUljYlBuQXFJOHVjZFg1NGsxUWhselE0d0RRWUpLb1pJaHZjTkFRRUwKQlFBd05ERVpNQmNHQTFVRUF3d1FhM1ZpWlhKdVpYUmxjeTFoWkcxcGJqRVhNQlVHQTFVRUNnd09jM2x6ZEdWdApPbTFoYzNSbGNuTXdJQmNOTWpRd09ESXhNREl4T0RFMldoZ1BNakV5TkRBM01qZ3dNakU0TVRaYU1EUXhHVEFYCkJnTlZCQU1NRUd0MVltVnlibVYwWlhNdFlXUnRhVzR4RnpBVkJnTlZCQW9NRG5ONWMzUmxiVHB0WVhOMFpYSnoKTUlJQklqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FROEFNSUlCQ2dLQ0FRRUFxVldnZ1JEWldOdkNnTkVNb25jYgpBU04zUk5wWmZBK2NLNHZOQy9EMlI5bTVBVFIvWkd0MDZhUDN6V0xrcWM0M01td3NMY0ZEeS93WStoQjUwL0l0ClpxanQ1RWFJbDFqcVpuUlh1RVhlNXBocS9mWklDU00ydkw5dHQwSlg5TDljNUxlZFNXSndTWjhndmpwd1FhY0sKU0dNeStITTVsQzVUYTNiT1I5OHNURXlGRzZaOGtYOUtUMkhnWXN2ZVNoTzI0MlRSVVNKUEtXK3hvY0pqeHFMKwpHRkhLb1pwNEQreVlrWjJkYWhIdlBpU0N4ZTlXRFhLYnBaUlBUeE5iL0VNa0o2WU91VThOMlFXNDR1OUx4MXk3CmpJQVBMNnZHY1VKZW8yVWh2eVNoR0t6UHJJMXRHU1duakt4S0NPdjhySzVOUHVoSVhUWERIaFRDREI0L3I2eHQKeFFJREFRQUJvM1V3Y3pBZEJnTlZIUTRFRmdRVTN2cnA0SFRMcUFQUkJxVmJvOUNWNTNLamxRa3dId1lEVlIwagpCQmd3Rm9BVTN2cnA0SFRMcUFQUkJxVmJvOUNWNTNLamxRa3dEZ1lEVlIwUEFRSC9CQVFEQWdXZ01CTUdBMVVkCkpRUU1NQW9HQ0NzR0FRVUZCd01DTUF3R0ExVWRFd0VCL3dRQ01BQXdEUVlKS29aSWh2Y05BUUVMQlFBRGdnRUIKQUR5NlZncXN0aUdYVHhWMFZ6eVh0SHFWa0hYMkdObDU4SFl3Qzh0aTV1eS9UN1UvV2h6T21qRE14b1pvbkErbQp3ekUyNURwMUo3RE4yeTAyc2tIb1lNUDh1MGZzREJBQ1d0WHhGaHdVamErRGUxQ2lFWkNHaERVZU1OUzNrYTJqCjR6OU93M3lDaGFuSlhtUjZuOTFoQTVUR0o0dWs5ZUZyUWdLb0xxWi9wb1Jhb3hqNjg2MVhLV2lKUzFXdnJ6MWcKZm1iU2pWSW40UUZBOWY2MTFpd1Mvd0dOSEoxZExVemE5V3VpUWVPamN1bENxeHFCbDQra1FXbVJCY21Pa3NlMgorS3VaSk1JTUpmUzI1MjFBWk8zNUVnWGJsQTJCRzFUZ1paejZpM0UwTk1qemkrVDFOSU13WGF3aVdYRHQ0Vy9sCnVtdWJ3OWFxTi9tNU5VYTNoWjZYbVhRPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t + client-key-data: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2UUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktjd2dnU2pBZ0VBQW9JQkFRQ3BWYUNCRU5sWTI4S0EKMFF5aWR4c0JJM2RFMmxsOEQ1d3JpODBMOFBaSDJia0JOSDlrYTNUcG8vZk5ZdVNwempjeWJDd3R3VVBML0JqNgpFSG5UOGkxbXFPM2tSb2lYV09wbWRGZTRSZDdtbUdyOTlrZ0pJemE4djIyM1FsZjB2MXprdDUxSlluQkpueUMrCk9uQkJwd3BJWXpMNGN6bVVMbE5yZHM1SDN5eE1USVVicG55UmYwcFBZZUJpeTk1S0U3YmpaTkZSSWs4cGI3R2gKd21QR292NFlVY3FobW5nUDdKaVJuWjFxRWU4K0pJTEY3MVlOY3B1bGxFOVBFMXY4UXlRbnBnNjVUdzNaQmJqaQo3MHZIWEx1TWdBOHZxOFp4UWw2alpTRy9KS0VZck0rc2pXMFpKYWVNckVvSTYveXNyazArNkVoZE5jTWVGTUlNCkhqK3ZyRzNGQWdNQkFBRUNnZ0VBSTdLaGU1UUp2ZW5XUDBIUzRBMHI3RG1GMDBZVXgwcWpLYXIzTnlVOVJqaG8KQUJFSktpcGRJMFFsNFc2UHRoeDdGbTRuZ2gzVUpSU29UMDlaMzR5V2RhWDNRTUI5MnlvcmdCM1d3RW82aTNKbQpXOU9uckFWNGJLSU9oeXU5VHlOb2VlOGJnWFQzSnc0YzRQMkEzTlpTSEtDTkJrT0VSL0RjTlROK21UZzdKbnBDCnMvVmoyd2pibllQNmt6MVRTcEVjRksrb3NnYldXQ1AxVDFUeFRFN1k5VlBjbWhibzU5Lzdxc2EzaE8vUjgxRysKQ0VxU3U1emgrQmJvRFZHUStpZFV3OGtqUlhUS2MzWFBWb0R6SmR6cUtJVUYwTkc1Nm4wdGNoZkVEMUpWS09PSQp5a3REdjM1Qi9JWEkwYzk1UHpJN0crOWJvSHM1aW9BcUZKUlo2bllJQVFLQmdRRHNvTmJRc0p1U1pVcW9ZZTBLCkhERFpXS3F5NUM3Q1IvOFNXTXcxeFQzdnJVV2t2Wm5TZUJHUnpvYUEvZmRXQ3lGWHdBd1ZkS0xFSzFkdW5UWDEKUkQ4Zk9odFBDdTdiaXVvV2l4YlpMcGRPUXVzZlhRcDNHUWtoOUNIQVRPc1pML0tkMmxSd0F6dHpGNTZkVHRtdQplZFIxVENiTEVZK1A1Vzg0MXhjV2Y4OEh3UUtCZ1FDM01uaXBXY0xWNDBlS2xZMEg2alhBMDByWCs3V1U1M21RClFKNXAzbWlxSW5qRWlmV2ZQZHh4Y0hyVWRndzlpRk9pVUVvNENIQnNnam5wRU5wUjVhcUJpTzRWUFBuRXdXM2EKSmJ5eWdmRW4wREdBci9PdFpTTUF6OGN4NFVnZmZpSmZPSk8rZXRNemhDMXlMcFAwR05oM2UyRzlDZ0M3eVhDSQpGT1BvdWlzSEJRS0JnSGROT0VFTGFjUkxrWEtIdk0wR0haTFhZMmpDSnRrSkY0OFdlZzc2SFJvRUVFTFkzUDhDClRrbG5DT1ZzSmhHWmx2djQ5WjZ6cVlTaUhYakZobmpjS2I4Q3V0WUZPeHd4VTRoK0k4em44cDBnbkE2NkNCYTMKNXFUWncxS0M5VjFEa1YwSXdOMmdvNDZKY0F6N3ZrQjdhQ1NqZWtPVDNQKzl1Mis2OGdjRDlVdUJBb0dBUjkxdgp3aGRwUEJpZG52ck55VllTWWlOQkQvczVIMEd5eVdqZisrMzRwdzFBelBERnZ3TTRiL1BNNjMybmpaZm1IeDFhCkVDTVhYeW15NS8vcGRRa2dXeEpKTzJHaEpaTXZzY3p0K2lUSlluSGtpWFA4cG4rdlBJbEZ2Z1ovRVlPY25qZ0cKbFVsL2dvME9ldVZVdXdQb0h1N3l4NEtlQ1F5YnJYWnNkWVphakxVQ2dZRUEyeEhLenJ3T1FwMlNNbXlzOFR1bgpZbm54bzMwV0diQzNpYVJIZkQ0TGRvNC9iNHY2TWtkMG9wdXBoMzArQjduRkZoRjlWSVplbVdGWkMrcXNKWHdLCmNwOGMzdjBNZFJkeUtTcTJ4Ly9ZL2s1KytaeU5DSXRZdzdhRy91Wks4TlZyY3h0c2hXOXBobDFJTThXTjBUYmgKNmxwd2xWZGFha0RPY09ZNjE2clRGOXM9Ci0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0=`)}, + }, + threshold: 1, + signer: true, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := makeFakeCertRotationController(tt.threshold) + cc, err := util.NewClusterClientSet("test", c.Client, &util.ClientOption{}) + if err != nil { + t.Fatal(err) + } + c.ClusterClient = cc + + if tt.signer { + go mockCSRSigner(c.KubeClient) + } + + if err := c.syncCertRotation(context.Background(), tt.secret); (err != nil) != tt.wantErr { + t.Errorf("CertRotationController.syncCertRotation() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func mockCSRSigner(cs kubernetes.Interface) { + for { + select { + case <-time.After(10 * time.Second): + return + default: + csrL, _ := cs.CertificatesV1().CertificateSigningRequests().List(context.Background(), metav1.ListOptions{}) + for _, csr := range csrL.Items { + t := csr.DeepCopy() + t.Status.Certificate = []byte(testCA) + if _, err := cs.CertificatesV1().CertificateSigningRequests().UpdateStatus(context.Background(), t, metav1.UpdateOptions{}); err != nil { + return + } + } + } + } +} + +func Test_getCertValidityPeriod(t *testing.T) { + now := time.Now() + tests := []struct { + name string + certTime []map[string]time.Time + wantBefore time.Time + wantAfter time.Time + wantErr bool + }{ + { + name: "get cert validity period success", + certTime: []map[string]time.Time{ + { + "notBefore": now.Add(-36 * time.Hour).UTC().Truncate(time.Second), + "notAfter": now.Add(72 * time.Hour).UTC().Truncate(time.Second), + }, + { + "notBefore": now.Add(-24 * time.Hour).UTC().Truncate(time.Second), + "notAfter": now.Add(36 * time.Hour).UTC().Truncate(time.Second), + }, + }, + wantBefore: now.Add(-24 * time.Hour).UTC().Truncate(time.Second), + wantAfter: now.Add(36 * time.Hour).UTC().Truncate(time.Second), + wantErr: false, + }, + { + name: "parse cert fail", + certTime: []map[string]time.Time{}, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + certData := []byte{} + for _, ct := range tt.certTime { + cert, err := newMockCert(ct["notBefore"], ct["notAfter"]) + if err != nil { + t.Fatal(err) + } + certData = append(certData, pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw})...) + } + + notBefore, notAfter, err := getCertValidityPeriod(certData) + if tt.wantErr { + if err == nil { + t.Error("expected error but got nil") + } + return + } + + if err != nil { + t.Errorf("unexpected error: %v", err) + return + } + + if !tt.wantBefore.Equal(*notBefore) || !tt.wantAfter.Equal(*notAfter) { + t.Errorf("got notBefore=%s, notAfter=%s; want notBefore=%s, notAfter=%s", notBefore, notAfter, tt.wantBefore, tt.wantAfter) + } + }) + } +} + +func newMockCert(notBefore, notAfter time.Time) (*x509.Certificate, error) { + serialNumber, err := rand.Int(rand.Reader, new(big.Int).Lsh(big.NewInt(1), 128)) + if err != nil { + return nil, err + } + + testSigner, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return nil, err + } + + template := x509.Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{ + CommonName: "karmada.com", + Organization: []string{"karmada"}, + }, + NotBefore: notBefore, + NotAfter: notAfter, + + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + } + + certData, err := x509.CreateCertificate(rand.Reader, &template, &template, &testSigner.PublicKey, testSigner) + if err != nil { + return nil, err + } + + return x509.ParseCertificate(certData) +} diff --git a/pkg/controllers/cluster/cluster_controller.go b/pkg/controllers/cluster/cluster_controller.go index 048f47f749e4..0c0b4bdf6f2f 100644 --- a/pkg/controllers/cluster/cluster_controller.go +++ b/pkg/controllers/cluster/cluster_controller.go @@ -51,7 +51,7 @@ import ( ) const ( - // ControllerName is the controller name that will be used when reporting events. + // ControllerName is the controller name that will be used when reporting events and metrics. ControllerName = "cluster-controller" // MonitorRetrySleepTime is the amount of time the cluster controller that should // sleep between retrying cluster health updates. @@ -215,7 +215,7 @@ func (c *Controller) Start(ctx context.Context) error { func (c *Controller) SetupWithManager(mgr controllerruntime.Manager) error { c.clusterHealthMap = newClusterHealthMap() return utilerrors.NewAggregate([]error{ - controllerruntime.NewControllerManagedBy(mgr).For(&clusterv1alpha1.Cluster{}).Complete(c), + controllerruntime.NewControllerManagedBy(mgr).Named(ControllerName).For(&clusterv1alpha1.Cluster{}).Complete(c), mgr.Add(c), }) } diff --git a/pkg/controllers/cluster/cluster_controller_test.go b/pkg/controllers/cluster/cluster_controller_test.go new file mode 100644 index 000000000000..000725e9f168 --- /dev/null +++ b/pkg/controllers/cluster/cluster_controller_test.go @@ -0,0 +1,467 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cluster + +import ( + "context" + "reflect" + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + controllerruntime "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1" + workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1" + workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2" + "github.com/karmada-io/karmada/pkg/util" + "github.com/karmada-io/karmada/pkg/util/gclient" + "github.com/karmada-io/karmada/pkg/util/names" +) + +func newClusterController() *Controller { + rbIndexerFunc := func(obj client.Object) []string { + rb, ok := obj.(*workv1alpha2.ResourceBinding) + if !ok { + return nil + } + return util.GetBindingClusterNames(&rb.Spec) + } + + crbIndexerFunc := func(obj client.Object) []string { + crb, ok := obj.(*workv1alpha2.ClusterResourceBinding) + if !ok { + return nil + } + return util.GetBindingClusterNames(&crb.Spec) + } + client := fake.NewClientBuilder().WithScheme(gclient.NewSchema()). + WithIndex(&workv1alpha2.ResourceBinding{}, rbClusterKeyIndex, rbIndexerFunc). + WithIndex(&workv1alpha2.ClusterResourceBinding{}, crbClusterKeyIndex, crbIndexerFunc). + WithStatusSubresource(&clusterv1alpha1.Cluster{}).Build() + return &Controller{ + Client: client, + EventRecorder: record.NewFakeRecorder(1024), + clusterHealthMap: newClusterHealthMap(), + EnableTaintManager: true, + ClusterMonitorGracePeriod: 40 * time.Second, + } +} + +func TestController_Reconcile(t *testing.T) { + req := controllerruntime.Request{NamespacedName: types.NamespacedName{Name: "test-cluster"}} + tests := []struct { + name string + cluster *clusterv1alpha1.Cluster + ns *corev1.Namespace + work *workv1alpha1.Work + del bool + wCluster *clusterv1alpha1.Cluster + want controllerruntime.Result + wantErr bool + }{ + { + name: "cluster without status", + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: controllerruntime.ObjectMeta{ + Name: "test-cluster", + Finalizers: []string{util.ClusterControllerFinalizer}, + }, + }, + wCluster: &clusterv1alpha1.Cluster{ + ObjectMeta: controllerruntime.ObjectMeta{ + Name: "test-cluster", + Finalizers: []string{util.ClusterControllerFinalizer}, + }, + Spec: clusterv1alpha1.ClusterSpec{ + Taints: []corev1.Taint{{ + Key: clusterv1alpha1.TaintClusterNotReady, + Effect: corev1.TaintEffectNoSchedule, + }}, + }, + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{}, + }, + }, + want: controllerruntime.Result{}, + wantErr: false, + }, + { + name: "cluster with ready condition", + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: controllerruntime.ObjectMeta{ + Name: "test-cluster", + Finalizers: []string{util.ClusterControllerFinalizer}, + }, + Spec: clusterv1alpha1.ClusterSpec{ + Taints: []corev1.Taint{{ + Key: clusterv1alpha1.TaintClusterNotReady, + Effect: corev1.TaintEffectNoSchedule, + }}, + }, + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + { + Type: clusterv1alpha1.ClusterConditionReady, + Status: metav1.ConditionTrue, + }, + }, + }, + }, + wCluster: &clusterv1alpha1.Cluster{ + ObjectMeta: controllerruntime.ObjectMeta{ + Name: "test-cluster", + Finalizers: []string{util.ClusterControllerFinalizer}, + }, + Spec: clusterv1alpha1.ClusterSpec{Taints: []corev1.Taint{}}, + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + { + Type: clusterv1alpha1.ClusterConditionReady, + Status: metav1.ConditionTrue, + }, + }, + }, + }, + want: controllerruntime.Result{}, + wantErr: false, + }, + { + name: "cluster with unknown condition", + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: controllerruntime.ObjectMeta{ + Name: "test-cluster", + Finalizers: []string{util.ClusterControllerFinalizer}, + }, + Spec: clusterv1alpha1.ClusterSpec{ + Taints: []corev1.Taint{{ + Key: clusterv1alpha1.TaintClusterNotReady, + Effect: corev1.TaintEffectNoSchedule, + }}, + }, + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + { + Type: clusterv1alpha1.ClusterConditionReady, + Status: metav1.ConditionUnknown, + }, + }, + }, + }, + wCluster: &clusterv1alpha1.Cluster{ + ObjectMeta: controllerruntime.ObjectMeta{ + Name: "test-cluster", + Finalizers: []string{util.ClusterControllerFinalizer}, + }, + Spec: clusterv1alpha1.ClusterSpec{Taints: []corev1.Taint{ + { + Key: clusterv1alpha1.TaintClusterUnreachable, + Effect: corev1.TaintEffectNoSchedule, + }, + }}, + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + { + Type: clusterv1alpha1.ClusterConditionReady, + Status: metav1.ConditionUnknown, + }, + }, + }, + }, + want: controllerruntime.Result{}, + wantErr: false, + }, + { + name: "cluster with false condition", + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: controllerruntime.ObjectMeta{ + Name: "test-cluster", + Finalizers: []string{util.ClusterControllerFinalizer}, + }, + Spec: clusterv1alpha1.ClusterSpec{ + Taints: []corev1.Taint{{ + Key: clusterv1alpha1.TaintClusterUnreachable, + Effect: corev1.TaintEffectNoSchedule, + }}, + }, + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + { + Type: clusterv1alpha1.ClusterConditionReady, + Status: metav1.ConditionFalse, + }, + }, + }, + }, + wCluster: &clusterv1alpha1.Cluster{ + ObjectMeta: controllerruntime.ObjectMeta{ + Name: "test-cluster", + Finalizers: []string{util.ClusterControllerFinalizer}, + }, + Spec: clusterv1alpha1.ClusterSpec{Taints: []corev1.Taint{ + { + Key: clusterv1alpha1.TaintClusterNotReady, + Effect: corev1.TaintEffectNoSchedule, + }, + }}, + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + { + Type: clusterv1alpha1.ClusterConditionReady, + Status: metav1.ConditionFalse, + }, + }, + }, + }, + want: controllerruntime.Result{}, + wantErr: false, + }, + { + name: "cluster not found", + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: controllerruntime.ObjectMeta{ + Name: "test-cluster-noexist", + Finalizers: []string{util.ClusterControllerFinalizer}, + }, + }, + want: controllerruntime.Result{}, + wantErr: false, + }, + { + name: "remove cluster failed", + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: controllerruntime.ObjectMeta{ + Name: "test-cluster", + Finalizers: []string{util.ClusterControllerFinalizer}, + }, + Spec: clusterv1alpha1.ClusterSpec{ + SyncMode: clusterv1alpha1.Pull, + }, + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + { + Type: clusterv1alpha1.ClusterConditionReady, + Status: metav1.ConditionFalse, + }, + }, + }, + }, + ns: &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: names.GenerateExecutionSpaceName("test-cluster"), + }, + }, + work: &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-work", + Namespace: names.GenerateExecutionSpaceName("test-cluster"), + Finalizers: []string{util.ExecutionControllerFinalizer}, + }, + }, + del: true, + want: controllerruntime.Result{}, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := newClusterController() + if tt.cluster != nil { + if err := c.Create(context.Background(), tt.cluster, &client.CreateOptions{}); err != nil { + t.Fatalf("faild to create cluster %v", err) + } + } + + if tt.ns != nil { + if err := c.Create(context.Background(), tt.ns, &client.CreateOptions{}); err != nil { + t.Fatalf("faild to create ns %v", err) + } + } + + if tt.work != nil { + if err := c.Create(context.Background(), tt.work, &client.CreateOptions{}); err != nil { + t.Fatalf("faild to create work %v", err) + } + } + + if tt.del { + if err := c.Delete(context.Background(), tt.cluster, &client.DeleteOptions{}); err != nil { + t.Fatalf("failed to delete cluster %v", err) + } + } + + got, err := c.Reconcile(context.Background(), req) + if (err != nil) != tt.wantErr { + t.Errorf("Controller.Reconcile() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("Controller.Reconcile() = %v, want %v", got, tt.want) + return + } + + if tt.wCluster != nil { + cluster := &clusterv1alpha1.Cluster{} + if err := c.Get(context.Background(), types.NamespacedName{Name: tt.cluster.Name}, cluster, &client.GetOptions{}); err != nil { + t.Errorf("failed to get cluster %v", err) + return + } + + cleanUpCluster(cluster) + if !reflect.DeepEqual(cluster, tt.wCluster) { + t.Errorf("Cluster resource reconcile get %v, want %v", *cluster, *tt.wCluster) + } + } + }) + } +} + +func TestController_monitorClusterHealth(t *testing.T) { + tests := []struct { + name string + cluster *clusterv1alpha1.Cluster + wCluster *clusterv1alpha1.Cluster + wantErr bool + }{ + { + name: "cluster without status", + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: controllerruntime.ObjectMeta{ + Name: "test-cluster", + Finalizers: []string{util.ClusterControllerFinalizer}, + }, + Spec: clusterv1alpha1.ClusterSpec{ + SyncMode: clusterv1alpha1.Pull, + }, + }, + wCluster: &clusterv1alpha1.Cluster{ + ObjectMeta: controllerruntime.ObjectMeta{ + Name: "test-cluster", + Finalizers: []string{util.ClusterControllerFinalizer}, + }, + Spec: clusterv1alpha1.ClusterSpec{ + SyncMode: clusterv1alpha1.Pull, + Taints: []corev1.Taint{ + { + Key: clusterv1alpha1.TaintClusterUnreachable, + Effect: corev1.TaintEffectNoExecute, + }, + }, + }, + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{{ + Type: clusterv1alpha1.ClusterConditionReady, + Status: metav1.ConditionUnknown, + Reason: "ClusterStatusNeverUpdated", + Message: "Cluster status controller never posted cluster status.", + }}, + }, + }, + wantErr: false, + }, + { + name: "cluster with ready condition", + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: controllerruntime.ObjectMeta{ + Name: "test-cluster", + Finalizers: []string{util.ClusterControllerFinalizer}, + }, + Spec: clusterv1alpha1.ClusterSpec{ + SyncMode: clusterv1alpha1.Pull, + }, + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + { + Type: clusterv1alpha1.ClusterConditionReady, + Status: metav1.ConditionTrue, + }, + }, + }, + }, + wCluster: &clusterv1alpha1.Cluster{ + ObjectMeta: controllerruntime.ObjectMeta{ + Name: "test-cluster", + Finalizers: []string{util.ClusterControllerFinalizer}, + }, + Spec: clusterv1alpha1.ClusterSpec{ + SyncMode: clusterv1alpha1.Pull, + Taints: []corev1.Taint{}, + }, + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + { + Type: clusterv1alpha1.ClusterConditionReady, + Status: metav1.ConditionTrue, + }, + }, + }, + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := newClusterController() + if tt.cluster != nil { + if err := c.Create(context.Background(), tt.cluster, &client.CreateOptions{}); err != nil { + t.Fatalf("failed to create cluster: %v", err) + } + } + + if err := c.monitorClusterHealth(context.Background()); (err != nil) != tt.wantErr { + t.Errorf("Controller.monitorClusterHealth() error = %v, wantErr %v", err, tt.wantErr) + return + } + + cluster := &clusterv1alpha1.Cluster{} + if err := c.Get(context.Background(), types.NamespacedName{Name: "test-cluster"}, cluster, &client.GetOptions{}); err != nil { + t.Errorf("failed to get cluster: %v", err) + return + } + + cleanUpCluster(cluster) + if !reflect.DeepEqual(cluster, tt.wCluster) { + t.Errorf("Cluster resource get %+v, want %+v", *cluster, *tt.wCluster) + return + } + }) + } +} + +// cleanUpCluster removes unnecessary fields from Cluster resource for testing purposes. +func cleanUpCluster(c *clusterv1alpha1.Cluster) { + c.ObjectMeta.ResourceVersion = "" + + taints := []corev1.Taint{} + for _, taint := range c.Spec.Taints { + taint.TimeAdded = nil + taints = append(taints, taint) + } + c.Spec.Taints = taints + + cond := []metav1.Condition{} + for _, condition := range c.Status.Conditions { + condition.LastTransitionTime = metav1.Time{} + cond = append(cond, condition) + } + c.Status.Conditions = cond +} diff --git a/pkg/controllers/cluster/taint_manager.go b/pkg/controllers/cluster/taint_manager.go index 1e8bcdee5f79..b1f3f459426c 100644 --- a/pkg/controllers/cluster/taint_manager.go +++ b/pkg/controllers/cluster/taint_manager.go @@ -39,7 +39,7 @@ import ( "github.com/karmada-io/karmada/pkg/util/helper" ) -// TaintManagerName is the controller name that will be used for taint management. +// TaintManagerName is the controller name that will be used when reporting events and metrics. const TaintManagerName = "taint-manager" // NoExecuteTaintManager listens to Taint/Toleration changes and is responsible for removing objects @@ -291,7 +291,7 @@ func (tc *NoExecuteTaintManager) needEviction(clusterName string, annotations ma // SetupWithManager creates a controller and register to controller manager. func (tc *NoExecuteTaintManager) SetupWithManager(mgr controllerruntime.Manager) error { return utilerrors.NewAggregate([]error{ - controllerruntime.NewControllerManagedBy(mgr).For(&clusterv1alpha1.Cluster{}).Complete(tc), + controllerruntime.NewControllerManagedBy(mgr).Named(TaintManagerName).For(&clusterv1alpha1.Cluster{}).Complete(tc), mgr.Add(tc), }) } diff --git a/pkg/controllers/cluster/taint_manager_test.go b/pkg/controllers/cluster/taint_manager_test.go new file mode 100644 index 000000000000..efac5255ef0a --- /dev/null +++ b/pkg/controllers/cluster/taint_manager_test.go @@ -0,0 +1,533 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cluster + +import ( + "context" + "reflect" + "testing" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1" + workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2" + "github.com/karmada-io/karmada/pkg/util" + "github.com/karmada-io/karmada/pkg/util/fedinformer/keys" + "github.com/karmada-io/karmada/pkg/util/gclient" +) + +func newNoExecuteTaintManager() *NoExecuteTaintManager { + rbIndexerFunc := func(obj client.Object) []string { + rb, ok := obj.(*workv1alpha2.ResourceBinding) + if !ok { + return nil + } + return util.GetBindingClusterNames(&rb.Spec) + } + + crbIndexerFunc := func(obj client.Object) []string { + crb, ok := obj.(*workv1alpha2.ClusterResourceBinding) + if !ok { + return nil + } + return util.GetBindingClusterNames(&crb.Spec) + } + + mgr := &NoExecuteTaintManager{ + Client: fake.NewClientBuilder().WithScheme(gclient.NewSchema()). + WithIndex(&workv1alpha2.ResourceBinding{}, rbClusterKeyIndex, rbIndexerFunc). + WithIndex(&workv1alpha2.ClusterResourceBinding{}, crbClusterKeyIndex, crbIndexerFunc).Build(), + } + bindingEvictionWorkerOptions := util.Options{ + Name: "binding-eviction", + KeyFunc: nil, + ReconcileFunc: mgr.syncBindingEviction, + } + mgr.bindingEvictionWorker = util.NewAsyncWorker(bindingEvictionWorkerOptions) + + clusterBindingEvictionWorkerOptions := util.Options{ + Name: "cluster-binding-eviction", + KeyFunc: nil, + ReconcileFunc: mgr.syncClusterBindingEviction, + } + mgr.clusterBindingEvictionWorker = util.NewAsyncWorker(clusterBindingEvictionWorkerOptions) + return mgr +} + +func TestNoExecuteTaintManager_Reconcile(t *testing.T) { + tests := []struct { + name string + cluster *clusterv1alpha1.Cluster + want reconcile.Result + wantErr bool + }{ + { + name: "no taints", + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + }, + Spec: clusterv1alpha1.ClusterSpec{}, + }, + want: reconcile.Result{}, + wantErr: false, + }, + { + name: "have taints", + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + }, + Spec: clusterv1alpha1.ClusterSpec{ + Taints: []corev1.Taint{ + { + Key: "test-taint", + Value: "test-value", + Effect: corev1.TaintEffectNoExecute, + }, + }, + }, + }, + want: reconcile.Result{}, + wantErr: false, + }, + { + name: "cluster not found", + want: reconcile.Result{}, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tc := newNoExecuteTaintManager() + if err := tc.Client.Create(context.Background(), &workv1alpha2.ResourceBinding{ + TypeMeta: metav1.TypeMeta{ + Kind: "ResourceBinding", + APIVersion: "work.karmada.io/v1alpha2", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-rb", + Namespace: "default", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Clusters: []workv1alpha2.TargetCluster{ + { + Name: "test-cluster", + Replicas: 1, + }, + }, + }, + }, &client.CreateOptions{}); err != nil { + t.Fatalf("failed to create rb, %v", err) + } + + if err := tc.Client.Create(context.Background(), &workv1alpha2.ClusterResourceBinding{ + TypeMeta: metav1.TypeMeta{ + Kind: "ClusterResourceBinding", + APIVersion: "work.karmada.io/v1alpha2", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-crb", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Clusters: []workv1alpha2.TargetCluster{ + { + Name: "test-cluster", + Replicas: 1, + }, + }, + }, + }, &client.CreateOptions{}); err != nil { + t.Fatalf("failed to create crb, %v", err) + } + + if tt.cluster != nil { + if err := tc.Client.Create(context.Background(), tt.cluster, &client.CreateOptions{}); err != nil { + t.Fatal(err) + return + } + } + + req := reconcile.Request{NamespacedName: types.NamespacedName{Name: "test-cluster"}} + got, err := tc.Reconcile(context.Background(), req) + if (err != nil) != tt.wantErr { + t.Errorf("NoExecuteTaintManager.Reconcile() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("NoExecuteTaintManager.Reconcile() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestNoExecuteTaintManager_syncBindingEviction(t *testing.T) { + replica := int32(1) + tests := []struct { + name string + rb *workv1alpha2.ResourceBinding + cluster *clusterv1alpha1.Cluster + wrb *workv1alpha2.ResourceBinding + wantErr bool + }{ + { + name: "rb without tolerations", + rb: &workv1alpha2.ResourceBinding{ + TypeMeta: metav1.TypeMeta{ + Kind: "ResourceBinding", + APIVersion: "work.karmada.io/v1alpha2", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-rb", + Namespace: "default", + Annotations: map[string]string{"policy.karmada.io/applied-placement": `{"clusterAffinity":{"clusterNames":["member1","member2"]},"clusterTolerations":[{"key":"cluster.karmada.io/unreachable","operator":"Exists","effect":"NoExecute","tolerationSeconds":30}],"replicaScheduling":{"replicaSchedulingType":"Divided","replicaDivisionPreference":"Weighted","weightPreference":{"staticWeightList":[{"targetCluster":{"clusterNames":["member1"]},"weight":1},{"targetCluster":{"clusterNames":["member2"]},"weight":1}]}}}`}, + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Clusters: []workv1alpha2.TargetCluster{ + { + Name: "test-cluster", + Replicas: 1, + }, + }, + }, + }, + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + }, + Spec: clusterv1alpha1.ClusterSpec{ + Taints: []corev1.Taint{ + { + Key: "cluster.karmada.io/not-ready", + Effect: corev1.TaintEffectNoExecute, + }, + }, + }, + }, + wrb: &workv1alpha2.ResourceBinding{ + TypeMeta: metav1.TypeMeta{ + Kind: "ResourceBinding", + APIVersion: "work.karmada.io/v1alpha2", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-rb", + Namespace: "default", + Annotations: map[string]string{"policy.karmada.io/applied-placement": `{"clusterAffinity":{"clusterNames":["member1","member2"]},"clusterTolerations":[{"key":"cluster.karmada.io/unreachable","operator":"Exists","effect":"NoExecute","tolerationSeconds":30}],"replicaScheduling":{"replicaSchedulingType":"Divided","replicaDivisionPreference":"Weighted","weightPreference":{"staticWeightList":[{"targetCluster":{"clusterNames":["member1"]},"weight":1},{"targetCluster":{"clusterNames":["member2"]},"weight":1}]}}}`}, + }, + Spec: workv1alpha2.ResourceBindingSpec{ + GracefulEvictionTasks: []workv1alpha2.GracefulEvictionTask{ + { + FromCluster: "test-cluster", + Replicas: &replica, + Reason: workv1alpha2.EvictionReasonTaintUntolerated, + Producer: workv1alpha2.EvictionProducerTaintManager, + }, + }, + }, + }, + wantErr: false, + }, + { + name: "rb with tolerations", + rb: &workv1alpha2.ResourceBinding{ + TypeMeta: metav1.TypeMeta{ + Kind: "ResourceBinding", + APIVersion: "work.karmada.io/v1alpha2", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-rb", + Namespace: "default", + Annotations: map[string]string{"policy.karmada.io/applied-placement": `{"clusterAffinity":{"clusterNames":["member1","member2"]},"clusterTolerations":[{"key":"cluster.karmada.io/not-ready","operator":"Exists","effect":"NoExecute","tolerationSeconds":30},{"key":"cluster.karmada.io/unreachable","operator":"Exists","effect":"NoExecute","tolerationSeconds":30}],"replicaScheduling":{"replicaSchedulingType":"Divided","replicaDivisionPreference":"Weighted","weightPreference":{"staticWeightList":[{"targetCluster":{"clusterNames":["member1"]},"weight":1},{"targetCluster":{"clusterNames":["member2"]},"weight":1}]}}}`}, + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Clusters: []workv1alpha2.TargetCluster{ + { + Name: "test-cluster", + Replicas: 1, + }, + }, + }, + }, + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + }, + Spec: clusterv1alpha1.ClusterSpec{ + Taints: []corev1.Taint{ + { + Key: "cluster.karmada.io/not-ready", + Effect: corev1.TaintEffectNoExecute, + }, + }, + }, + }, + wrb: &workv1alpha2.ResourceBinding{ + TypeMeta: metav1.TypeMeta{ + Kind: "ResourceBinding", + APIVersion: "work.karmada.io/v1alpha2", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-rb", + Namespace: "default", + Annotations: map[string]string{"policy.karmada.io/applied-placement": `{"clusterAffinity":{"clusterNames":["member1","member2"]},"clusterTolerations":[{"key":"cluster.karmada.io/unreachable","operator":"Exists","effect":"NoExecute","tolerationSeconds":30}],"replicaScheduling":{"replicaSchedulingType":"Divided","replicaDivisionPreference":"Weighted","weightPreference":{"staticWeightList":[{"targetCluster":{"clusterNames":["member1"]},"weight":1},{"targetCluster":{"clusterNames":["member2"]},"weight":1}]}}}`}, + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Clusters: []workv1alpha2.TargetCluster{ + { + Name: "test-cluster", + Replicas: 1, + }, + }, + }, + }, + wantErr: false, + }, + { + name: "rb not exist", + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tc := newNoExecuteTaintManager() + if tt.rb != nil { + if err := tc.Create(context.Background(), tt.rb, &client.CreateOptions{}); err != nil { + t.Fatalf("failed to create rb: %v", err) + } + } + + if tt.cluster != nil { + if err := tc.Create(context.Background(), tt.cluster, &client.CreateOptions{}); err != nil { + t.Fatalf("failed to create cluster: %v", err) + } + } + + key := keys.FederatedKey{ + Cluster: "test-cluster", + ClusterWideKey: keys.ClusterWideKey{ + Kind: "ResourceBinding", + Name: "test-rb", + Namespace: "default", + }, + } + if err := tc.syncBindingEviction(key); (err != nil) != tt.wantErr { + t.Errorf("NoExecuteTaintManager.syncBindingEviction() error = %v, wantErr %v", err, tt.wantErr) + return + } + + if tt.wrb != nil { + gRb := &workv1alpha2.ResourceBinding{} + if err := tc.Get(context.Background(), types.NamespacedName{Name: tt.wrb.Name, Namespace: tt.wrb.Namespace}, gRb, &client.GetOptions{}); err != nil { + t.Fatalf("failed to get rb, error %v", err) + } + + if !reflect.DeepEqual(tt.wrb.Spec, gRb.Spec) { + t.Errorf("ResourceBinding get %+v, want %+v", gRb.Spec, tt.wrb.Spec) + } + } + }) + } +} + +func TestNoExecuteTaintManager_syncClusterBindingEviction(t *testing.T) { + replica := int32(1) + tests := []struct { + name string + crb *workv1alpha2.ClusterResourceBinding + cluster *clusterv1alpha1.Cluster + wcrb *workv1alpha2.ClusterResourceBinding + wantErr bool + }{ + { + name: "crb without tolerations", + crb: &workv1alpha2.ClusterResourceBinding{ + TypeMeta: metav1.TypeMeta{ + Kind: "ClusterResourceBinding", + APIVersion: "work.karmada.io/v1alpha2", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-crb", + Annotations: map[string]string{"policy.karmada.io/applied-placement": `{"clusterAffinity":{"clusterNames":["member1","member2"]},"clusterTolerations":[{"key":"cluster.karmada.io/unreachable","operator":"Exists","effect":"NoExecute","tolerationSeconds":30}],"replicaScheduling":{"replicaSchedulingType":"Divided","replicaDivisionPreference":"Weighted","weightPreference":{"staticWeightList":[{"targetCluster":{"clusterNames":["member1"]},"weight":1},{"targetCluster":{"clusterNames":["member2"]},"weight":1}]}}}`}, + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Clusters: []workv1alpha2.TargetCluster{ + { + Name: "test-cluster", + Replicas: 1, + }, + }, + }, + }, + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + }, + Spec: clusterv1alpha1.ClusterSpec{ + Taints: []corev1.Taint{ + { + Key: "cluster.karmada.io/not-ready", + Effect: corev1.TaintEffectNoExecute, + }, + }, + }, + }, + wcrb: &workv1alpha2.ClusterResourceBinding{ + TypeMeta: metav1.TypeMeta{ + Kind: "ClusterResourceBinding", + APIVersion: "work.karmada.io/v1alpha2", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-crb", + Annotations: map[string]string{"policy.karmada.io/applied-placement": `{"clusterAffinity":{"clusterNames":["member1","member2"]},"clusterTolerations":[{"key":"cluster.karmada.io/unreachable","operator":"Exists","effect":"NoExecute","tolerationSeconds":30}],"replicaScheduling":{"replicaSchedulingType":"Divided","replicaDivisionPreference":"Weighted","weightPreference":{"staticWeightList":[{"targetCluster":{"clusterNames":["member1"]},"weight":1},{"targetCluster":{"clusterNames":["member2"]},"weight":1}]}}}`}, + }, + Spec: workv1alpha2.ResourceBindingSpec{ + GracefulEvictionTasks: []workv1alpha2.GracefulEvictionTask{ + { + FromCluster: "test-cluster", + Replicas: &replica, + Reason: workv1alpha2.EvictionReasonTaintUntolerated, + Producer: workv1alpha2.EvictionProducerTaintManager, + }, + }, + }, + }, + wantErr: false, + }, + { + name: "crb with tolerations", + crb: &workv1alpha2.ClusterResourceBinding{ + TypeMeta: metav1.TypeMeta{ + Kind: "ClusterResourceBinding", + APIVersion: "work.karmada.io/v1alpha2", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-crb", + Annotations: map[string]string{"policy.karmada.io/applied-placement": `{"clusterAffinity":{"clusterNames":["member1","member2"]},"clusterTolerations":[{"key":"cluster.karmada.io/not-ready","operator":"Exists","effect":"NoExecute","tolerationSeconds":30},{"key":"cluster.karmada.io/unreachable","operator":"Exists","effect":"NoExecute","tolerationSeconds":30}],"replicaScheduling":{"replicaSchedulingType":"Divided","replicaDivisionPreference":"Weighted","weightPreference":{"staticWeightList":[{"targetCluster":{"clusterNames":["member1"]},"weight":1},{"targetCluster":{"clusterNames":["member2"]},"weight":1}]}}}`}, + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Clusters: []workv1alpha2.TargetCluster{ + { + Name: "test-cluster", + Replicas: 1, + }, + }, + }, + }, + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + }, + Spec: clusterv1alpha1.ClusterSpec{ + Taints: []corev1.Taint{ + { + Key: "cluster.karmada.io/not-ready", + Effect: corev1.TaintEffectNoExecute, + }, + }, + }, + }, + wcrb: &workv1alpha2.ClusterResourceBinding{ + TypeMeta: metav1.TypeMeta{ + Kind: "ClusterResourceBinding", + APIVersion: "work.karmada.io/v1alpha2", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-crb", + Annotations: map[string]string{"policy.karmada.io/applied-placement": `{"clusterAffinity":{"clusterNames":["member1","member2"]},"clusterTolerations":[{"key":"cluster.karmada.io/unreachable","operator":"Exists","effect":"NoExecute","tolerationSeconds":30}],"replicaScheduling":{"replicaSchedulingType":"Divided","replicaDivisionPreference":"Weighted","weightPreference":{"staticWeightList":[{"targetCluster":{"clusterNames":["member1"]},"weight":1},{"targetCluster":{"clusterNames":["member2"]},"weight":1}]}}}`}, + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Clusters: []workv1alpha2.TargetCluster{ + { + Name: "test-cluster", + Replicas: 1, + }, + }, + }, + }, + wantErr: false, + }, + { + name: "crb not exist", + wantErr: false, + }, + { + name: "cluster not exist", + crb: &workv1alpha2.ClusterResourceBinding{ + TypeMeta: metav1.TypeMeta{ + Kind: "ClusterResourceBinding", + APIVersion: "work.karmada.io/v1alpha2", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-crb", + Annotations: map[string]string{"policy.karmada.io/applied-placement": `{"clusterAffinity":{"clusterNames":["member1","member2"]},"clusterTolerations":[{"key":"cluster.karmada.io/not-ready","operator":"Exists","effect":"NoExecute","tolerationSeconds":30},{"key":"cluster.karmada.io/unreachable","operator":"Exists","effect":"NoExecute","tolerationSeconds":30}],"replicaScheduling":{"replicaSchedulingType":"Divided","replicaDivisionPreference":"Weighted","weightPreference":{"staticWeightList":[{"targetCluster":{"clusterNames":["member1"]},"weight":1},{"targetCluster":{"clusterNames":["member2"]},"weight":1}]}}}`}, + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Clusters: []workv1alpha2.TargetCluster{ + { + Name: "test-cluster", + Replicas: 1, + }, + }, + }, + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tc := newNoExecuteTaintManager() + if tt.crb != nil { + if err := tc.Create(context.Background(), tt.crb, &client.CreateOptions{}); err != nil { + t.Fatalf("failed to create crb: %v", err) + } + } + + if tt.cluster != nil { + if err := tc.Create(context.Background(), tt.cluster, &client.CreateOptions{}); err != nil { + t.Fatalf("failed to create cluster: %v", err) + } + } + + key := keys.FederatedKey{ + Cluster: "test-cluster", + ClusterWideKey: keys.ClusterWideKey{ + Kind: "ClusterResourceBinding", + Name: "test-crb", + }, + } + if err := tc.syncClusterBindingEviction(key); (err != nil) != tt.wantErr { + t.Errorf("NoExecuteTaintManager.syncClusterBindingEviction() error = %v, wantErr %v", err, tt.wantErr) + } + + if tt.wcrb != nil { + gCRB := &workv1alpha2.ClusterResourceBinding{} + if err := tc.Get(context.Background(), types.NamespacedName{Name: tt.wcrb.Name}, gCRB, &client.GetOptions{}); err != nil { + t.Fatalf("failed to get rb, error %v", err) + } + + if !reflect.DeepEqual(tt.wcrb.Spec, gCRB.Spec) { + t.Errorf("ResourceBinding get %+v, want %+v", gCRB.Spec, tt.wcrb.Spec) + } + } + }) + } +} diff --git a/pkg/controllers/cronfederatedhpa/cronfederatedhpa_controller.go b/pkg/controllers/cronfederatedhpa/cronfederatedhpa_controller.go index ea310559efbf..47300dcd4c00 100755 --- a/pkg/controllers/cronfederatedhpa/cronfederatedhpa_controller.go +++ b/pkg/controllers/cronfederatedhpa/cronfederatedhpa_controller.go @@ -39,7 +39,7 @@ import ( ) const ( - // ControllerName is the controller name that will be used when reporting events. + // ControllerName is the controller name that will be used when reporting events and metrics. ControllerName = "cronfederatedhpa-controller" ) @@ -118,6 +118,7 @@ func (c *CronFHPAController) Reconcile(ctx context.Context, req controllerruntim func (c *CronFHPAController) SetupWithManager(mgr controllerruntime.Manager) error { c.CronHandler = NewCronHandler(mgr.GetClient(), mgr.GetEventRecorderFor(ControllerName)) return controllerruntime.NewControllerManagedBy(mgr). + Named(ControllerName). For(&autoscalingv1alpha1.CronFederatedHPA{}). WithOptions(controller.Options{RateLimiter: ratelimiterflag.DefaultControllerRateLimiter(c.RateLimiterOptions)}). WithEventFilter(predicate.GenerationChangedPredicate{}). diff --git a/pkg/controllers/cronfederatedhpa/cronfederatedhpa_handler_test.go b/pkg/controllers/cronfederatedhpa/cronfederatedhpa_handler_test.go new file mode 100644 index 000000000000..11fc45f6551a --- /dev/null +++ b/pkg/controllers/cronfederatedhpa/cronfederatedhpa_handler_test.go @@ -0,0 +1,298 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cronfederatedhpa + +import ( + "testing" + "time" + + "github.com/go-co-op/gocron" + "github.com/stretchr/testify/assert" + autoscalingv2 "k8s.io/api/autoscaling/v2" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + autoscalingv1alpha1 "github.com/karmada-io/karmada/pkg/apis/autoscaling/v1alpha1" + "github.com/karmada-io/karmada/pkg/util/helper" +) + +func TestCronFHPAScaleTargetRefUpdates(t *testing.T) { + tests := []struct { + name string + cronFHPAKey string + initialTarget autoscalingv2.CrossVersionObjectReference + updatedTarget autoscalingv2.CrossVersionObjectReference + expectedUpdate bool + }{ + { + name: "New scale target", + cronFHPAKey: "default/new-cronhpa", + initialTarget: autoscalingv2.CrossVersionObjectReference{}, // Empty for new target + updatedTarget: autoscalingv2.CrossVersionObjectReference{ + Kind: "Deployment", + Name: "test-deployment", + APIVersion: "apps/v1", + }, + expectedUpdate: false, + }, + { + name: "Same scale target", + cronFHPAKey: "default/test-cronhpa", + initialTarget: autoscalingv2.CrossVersionObjectReference{ + Kind: "Deployment", + Name: "test-deployment", + APIVersion: "apps/v1", + }, + updatedTarget: autoscalingv2.CrossVersionObjectReference{ + Kind: "Deployment", + Name: "test-deployment", + APIVersion: "apps/v1", + }, + expectedUpdate: false, + }, + { + name: "Different scale target", + cronFHPAKey: "default/test-cronhpa", + initialTarget: autoscalingv2.CrossVersionObjectReference{ + Kind: "Deployment", + Name: "test-deployment", + APIVersion: "apps/v1", + }, + updatedTarget: autoscalingv2.CrossVersionObjectReference{ + Kind: "StatefulSet", + Name: "test-statefulset", + APIVersion: "apps/v1", + }, + expectedUpdate: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + handler := NewCronHandler(fake.NewClientBuilder().Build(), record.NewFakeRecorder(100)) + + // Empty initialTarget will be skipped + if tt.initialTarget != (autoscalingv2.CrossVersionObjectReference{}) { + updated := handler.CronFHPAScaleTargetRefUpdates(tt.cronFHPAKey, tt.initialTarget) + assert.False(t, updated, "Initial target setting should return false") + } + + updated := handler.CronFHPAScaleTargetRefUpdates(tt.cronFHPAKey, tt.updatedTarget) + assert.Equal(t, tt.expectedUpdate, updated, "Unexpected result for %s", tt.name) + }) + } +} + +func TestAddCronExecutorIfNotExist(t *testing.T) { + handler := NewCronHandler(fake.NewClientBuilder().Build(), record.NewFakeRecorder(100)) + + cronFHPAKey := "default/test-cronhpa" + + // Adding new executor + handler.AddCronExecutorIfNotExist(cronFHPAKey) + assert.Contains(t, handler.cronExecutorMap, cronFHPAKey, "Executor should be added") + + // Adding existing executor + originalLen := len(handler.cronExecutorMap) + handler.AddCronExecutorIfNotExist(cronFHPAKey) + assert.Equal(t, originalLen, len(handler.cronExecutorMap), "Existing executor should not be added again") +} + +func TestRuleCronExecutorExists(t *testing.T) { + handler := NewCronHandler(fake.NewClientBuilder().Build(), record.NewFakeRecorder(100)) + + cronFHPAKey := "default/test-cronhpa" + ruleName := "test-rule" + + // non-existent executor + _, exists := handler.RuleCronExecutorExists(cronFHPAKey, ruleName) + assert.False(t, exists, "Non-existent executor should return false") + + // Add an executor + handler.AddCronExecutorIfNotExist(cronFHPAKey) + handler.cronExecutorMap[cronFHPAKey][ruleName] = RuleCron{ + CronFederatedHPARule: autoscalingv1alpha1.CronFederatedHPARule{ + Name: ruleName, + }, + } + + rule, exists := handler.RuleCronExecutorExists(cronFHPAKey, ruleName) + assert.True(t, exists, "Existing executor should return true") + assert.Equal(t, ruleName, rule.Name, "Returned rule should match the added rule") +} + +func TestStopRuleExecutor(t *testing.T) { + handler := NewCronHandler(fake.NewClientBuilder().Build(), record.NewFakeRecorder(100)) + + cronFHPAKey := "default/test-cronhpa" + ruleName := "test-rule" + + handler.cronExecutorMap = make(map[string]map[string]RuleCron) + handler.cronExecutorMap[cronFHPAKey] = make(map[string]RuleCron) + handler.cronExecutorMap[cronFHPAKey][ruleName] = RuleCron{ + Scheduler: gocron.NewScheduler(time.UTC), + } + _, err := handler.cronExecutorMap[cronFHPAKey][ruleName].Scheduler.Every(1).Minute().Do(func() {}) + assert.NoError(t, err) + handler.cronExecutorMap[cronFHPAKey][ruleName].Scheduler.StartAsync() + + assert.True(t, handler.cronExecutorMap[cronFHPAKey][ruleName].Scheduler.IsRunning()) + + handler.StopRuleExecutor(cronFHPAKey, ruleName) + + assert.NotContains(t, handler.cronExecutorMap[cronFHPAKey], ruleName) +} + +func TestStopCronFHPAExecutor(t *testing.T) { + handler := NewCronHandler(fake.NewClientBuilder().Build(), record.NewFakeRecorder(100)) + + cronFHPAKey := "default/test-cronhpa" + ruleName1 := "test-rule-1" + ruleName2 := "test-rule-2" + + handler.cronExecutorMap = make(map[string]map[string]RuleCron) + handler.cronExecutorMap[cronFHPAKey] = make(map[string]RuleCron) + handler.cronExecutorMap[cronFHPAKey][ruleName1] = RuleCron{ + Scheduler: gocron.NewScheduler(time.UTC), + } + handler.cronExecutorMap[cronFHPAKey][ruleName2] = RuleCron{ + Scheduler: gocron.NewScheduler(time.UTC), + } + + _, err := handler.cronExecutorMap[cronFHPAKey][ruleName1].Scheduler.Every(1).Minute().Do(func() {}) + assert.NoError(t, err) + + _, err = handler.cronExecutorMap[cronFHPAKey][ruleName2].Scheduler.Every(1).Minute().Do(func() {}) + assert.NoError(t, err) + + handler.cronExecutorMap[cronFHPAKey][ruleName1].Scheduler.StartAsync() + handler.cronExecutorMap[cronFHPAKey][ruleName2].Scheduler.StartAsync() + + assert.True(t, handler.cronExecutorMap[cronFHPAKey][ruleName1].Scheduler.IsRunning()) + assert.True(t, handler.cronExecutorMap[cronFHPAKey][ruleName2].Scheduler.IsRunning()) + + handler.StopCronFHPAExecutor(cronFHPAKey) + + assert.NotContains(t, handler.cronExecutorMap, cronFHPAKey) +} + +func TestCreateCronJobForExecutor(t *testing.T) { + handler := NewCronHandler(fake.NewClientBuilder().Build(), record.NewFakeRecorder(100)) + + cronFHPA := &autoscalingv1alpha1.CronFederatedHPA{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cronhpa", + Namespace: "default", + }, + } + + tests := []struct { + name string + rule autoscalingv1alpha1.CronFederatedHPARule + timeZone *string + wantErr bool + }{ + { + name: "Valid rule without time zone", + rule: autoscalingv1alpha1.CronFederatedHPARule{ + Name: "test-rule", + Schedule: "*/5 * * * *", + }, + timeZone: nil, + wantErr: false, + }, + { + name: "Valid rule with valid time zone", + rule: autoscalingv1alpha1.CronFederatedHPARule{ + Name: "test-rule-tz", + Schedule: "*/5 * * * *", + }, + timeZone: ptr.To[string]("America/New_York"), + wantErr: false, + }, + { + name: "Valid rule with invalid time zone", + rule: autoscalingv1alpha1.CronFederatedHPARule{ + Name: "test-rule-invalid-tz", + Schedule: "*/5 * * * *", + }, + timeZone: ptr.To[string]("Invalid/TimeZone"), + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cronFHPAKey := helper.GetCronFederatedHPAKey(cronFHPA) + handler.cronExecutorMap = make(map[string]map[string]RuleCron) + handler.cronExecutorMap[cronFHPAKey] = make(map[string]RuleCron) + + tt.rule.TimeZone = tt.timeZone + err := handler.CreateCronJobForExecutor(cronFHPA, tt.rule) + + if tt.wantErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Contains(t, handler.cronExecutorMap[cronFHPAKey], tt.rule.Name) + ruleCron := handler.cronExecutorMap[cronFHPAKey][tt.rule.Name] + assert.NotNil(t, ruleCron.Scheduler) + assert.True(t, ruleCron.Scheduler.IsRunning()) + } + + // Clean up + if !tt.wantErr { + handler.cronExecutorMap[cronFHPAKey][tt.rule.Name].Scheduler.Stop() + } + }) + } +} + +func TestGetRuleNextExecuteTime(t *testing.T) { + handler := NewCronHandler(fake.NewClientBuilder().Build(), record.NewFakeRecorder(100)) + + cronFHPA := &autoscalingv1alpha1.CronFederatedHPA{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cronhpa", + Namespace: "default", + }, + } + + rule := autoscalingv1alpha1.CronFederatedHPARule{ + Name: "test-rule", + Schedule: "*/5 * * * *", + } + + cronFHPAKey := helper.GetCronFederatedHPAKey(cronFHPA) + handler.cronExecutorMap = make(map[string]map[string]RuleCron) + handler.cronExecutorMap[cronFHPAKey] = make(map[string]RuleCron) + + err := handler.CreateCronJobForExecutor(cronFHPA, rule) + assert.NoError(t, err) + + nextTime, err := handler.GetRuleNextExecuteTime(cronFHPA, rule.Name) + assert.NoError(t, err) + assert.False(t, nextTime.IsZero()) + assert.True(t, nextTime.After(time.Now())) + + _, err = handler.GetRuleNextExecuteTime(cronFHPA, "non-existent-rule") + assert.Error(t, err) + + handler.cronExecutorMap[cronFHPAKey][rule.Name].Scheduler.Stop() +} diff --git a/pkg/controllers/cronfederatedhpa/cronfederatedhpa_job_test.go b/pkg/controllers/cronfederatedhpa/cronfederatedhpa_job_test.go new file mode 100644 index 000000000000..346e03ae6427 --- /dev/null +++ b/pkg/controllers/cronfederatedhpa/cronfederatedhpa_job_test.go @@ -0,0 +1,241 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cronfederatedhpa + +import ( + "context" + "testing" + "time" + + "github.com/go-co-op/gocron" + "github.com/stretchr/testify/assert" + autoscalingv2 "k8s.io/api/autoscaling/v2" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + autoscalingv1alpha1 "github.com/karmada-io/karmada/pkg/apis/autoscaling/v1alpha1" +) + +func TestNewCronFederatedHPAJob(t *testing.T) { + client := fake.NewClientBuilder().Build() + eventRecorder := record.NewFakeRecorder(100) + scheduler := gocron.NewScheduler(time.UTC) + cronFHPA := &autoscalingv1alpha1.CronFederatedHPA{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cron-fhpa", + Namespace: "default", + }, + } + rule := autoscalingv1alpha1.CronFederatedHPARule{ + Name: "test-rule", + } + + job := NewCronFederatedHPAJob(client, eventRecorder, scheduler, cronFHPA, rule) + + assert.NotNil(t, job) + assert.Equal(t, client, job.client) + assert.Equal(t, eventRecorder, job.eventRecorder) + assert.Equal(t, scheduler, job.scheduler) + assert.Equal(t, cronFHPA.Name, job.namespaceName.Name) + assert.Equal(t, cronFHPA.Namespace, job.namespaceName.Namespace) + assert.Equal(t, rule, job.rule) +} + +func TestScaleFHPA(t *testing.T) { + tests := []struct { + name string + cronFHPA *autoscalingv1alpha1.CronFederatedHPA + existingFHPA *autoscalingv1alpha1.FederatedHPA + rule autoscalingv1alpha1.CronFederatedHPARule + expectedUpdate bool + expectedErr bool + }{ + { + name: "Update MaxReplicas", + cronFHPA: &autoscalingv1alpha1.CronFederatedHPA{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cron-fhpa", + Namespace: "default", + }, + Spec: autoscalingv1alpha1.CronFederatedHPASpec{ + ScaleTargetRef: autoscalingv2.CrossVersionObjectReference{ + Name: "test-fhpa", + }, + }, + }, + existingFHPA: &autoscalingv1alpha1.FederatedHPA{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-fhpa", + Namespace: "default", + }, + Spec: autoscalingv1alpha1.FederatedHPASpec{ + MaxReplicas: 5, + }, + }, + rule: autoscalingv1alpha1.CronFederatedHPARule{ + TargetMaxReplicas: intPtr(10), + }, + expectedUpdate: true, + expectedErr: false, + }, + { + name: "Update MinReplicas", + cronFHPA: &autoscalingv1alpha1.CronFederatedHPA{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cron-fhpa", + Namespace: "default", + }, + Spec: autoscalingv1alpha1.CronFederatedHPASpec{ + ScaleTargetRef: autoscalingv2.CrossVersionObjectReference{ + Name: "test-fhpa", + }, + }, + }, + existingFHPA: &autoscalingv1alpha1.FederatedHPA{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-fhpa", + Namespace: "default", + }, + Spec: autoscalingv1alpha1.FederatedHPASpec{ + MinReplicas: intPtr(2), + }, + }, + rule: autoscalingv1alpha1.CronFederatedHPARule{ + TargetMinReplicas: intPtr(3), + }, + expectedUpdate: true, + expectedErr: false, + }, + { + name: "No Updates Needed", + cronFHPA: &autoscalingv1alpha1.CronFederatedHPA{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cron-fhpa", + Namespace: "default", + }, + Spec: autoscalingv1alpha1.CronFederatedHPASpec{ + ScaleTargetRef: autoscalingv2.CrossVersionObjectReference{ + Name: "test-fhpa", + }, + }, + }, + existingFHPA: &autoscalingv1alpha1.FederatedHPA{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-fhpa", + Namespace: "default", + }, + Spec: autoscalingv1alpha1.FederatedHPASpec{ + MinReplicas: intPtr(2), + MaxReplicas: 5, + }, + }, + rule: autoscalingv1alpha1.CronFederatedHPARule{ + TargetMinReplicas: intPtr(2), + TargetMaxReplicas: intPtr(5), + }, + expectedUpdate: false, + expectedErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + scheme := runtime.NewScheme() + _ = autoscalingv1alpha1.Install(scheme) + client := fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(tt.existingFHPA).Build() + + job := &ScalingJob{ + client: client, + rule: tt.rule, + } + + err := job.ScaleFHPA(tt.cronFHPA) + + if tt.expectedErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + + if tt.expectedUpdate { + updatedFHPA := &autoscalingv1alpha1.FederatedHPA{} + err := client.Get(context.TODO(), types.NamespacedName{Name: tt.existingFHPA.Name, Namespace: tt.existingFHPA.Namespace}, updatedFHPA) + assert.NoError(t, err) + if tt.rule.TargetMaxReplicas != nil { + assert.Equal(t, *tt.rule.TargetMaxReplicas, updatedFHPA.Spec.MaxReplicas) + } + if tt.rule.TargetMinReplicas != nil { + assert.Equal(t, *tt.rule.TargetMinReplicas, *updatedFHPA.Spec.MinReplicas) + } + } + }) + } +} + +func intPtr(i int32) *int32 { + return &i +} + +func TestFindExecutionHistory(t *testing.T) { + tests := []struct { + name string + histories []autoscalingv1alpha1.ExecutionHistory + ruleName string + expectedIndex int + }{ + { + name: "Found", + histories: []autoscalingv1alpha1.ExecutionHistory{ + {RuleName: "rule1"}, + {RuleName: "rule2"}, + {RuleName: "rule3"}, + }, + ruleName: "rule2", + expectedIndex: 1, + }, + { + name: "Not Found", + histories: []autoscalingv1alpha1.ExecutionHistory{ + {RuleName: "rule1"}, + {RuleName: "rule2"}, + }, + ruleName: "rule3", + expectedIndex: -1, + }, + { + name: "Empty History", + histories: []autoscalingv1alpha1.ExecutionHistory{}, + ruleName: "rule1", + expectedIndex: -1, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + job := &ScalingJob{ + rule: autoscalingv1alpha1.CronFederatedHPARule{ + Name: tt.ruleName, + }, + } + result := job.findExecutionHistory(tt.histories) + assert.Equal(t, tt.expectedIndex, result) + }) + } +} diff --git a/pkg/controllers/deploymentreplicassyncer/deployment_replicas_syncer_controller.go b/pkg/controllers/deploymentreplicassyncer/deployment_replicas_syncer_controller.go index b38d6937d465..d494da56fbfc 100644 --- a/pkg/controllers/deploymentreplicassyncer/deployment_replicas_syncer_controller.go +++ b/pkg/controllers/deploymentreplicassyncer/deployment_replicas_syncer_controller.go @@ -37,7 +37,7 @@ import ( ) const ( - // ControllerName is the controller name that will be used when reporting events. + // ControllerName is the controller name that will be used when reporting events and metrics. ControllerName = "deployment-replicas-syncer" waitDeploymentStatusInterval = 1 * time.Second diff --git a/pkg/controllers/execution/execution_controller.go b/pkg/controllers/execution/execution_controller.go index 679e8560b890..5d143e72aa74 100644 --- a/pkg/controllers/execution/execution_controller.go +++ b/pkg/controllers/execution/execution_controller.go @@ -30,6 +30,7 @@ import ( "k8s.io/client-go/tools/record" "k8s.io/client-go/util/retry" "k8s.io/klog/v2" + "k8s.io/utils/ptr" controllerruntime "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" @@ -37,7 +38,9 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/predicate" + clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1" workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1" + "github.com/karmada-io/karmada/pkg/detector" "github.com/karmada-io/karmada/pkg/events" "github.com/karmada-io/karmada/pkg/metrics" "github.com/karmada-io/karmada/pkg/sharedcli/ratelimiterflag" @@ -50,7 +53,7 @@ import ( ) const ( - // ControllerName is the controller name that will be used when reporting events. + // ControllerName is the controller name that will be used when reporting events and metrics. ControllerName = "execution-controller" // WorkSuspendDispatchingConditionMessage is the condition and event message when dispatching is suspended. WorkSuspendDispatchingConditionMessage = "Work dispatching is in a suspended state." @@ -102,15 +105,8 @@ func (c *Controller) Reconcile(ctx context.Context, req controllerruntime.Reques } if !work.DeletionTimestamp.IsZero() { - // Abort deleting workload if cluster is unready when unjoining cluster, otherwise the unjoin process will be failed. - if util.IsClusterReady(&cluster.Status) { - err := c.tryDeleteWorkload(ctx, clusterName, work) - if err != nil { - klog.Errorf("Failed to delete work %v, namespace is %v, err is %v", work.Name, work.Namespace, err) - return controllerruntime.Result{}, err - } - } else if cluster.DeletionTimestamp.IsZero() { // cluster is unready, but not terminating - return controllerruntime.Result{}, fmt.Errorf("cluster(%s) not ready", cluster.Name) + if err := c.handleWorkDelete(ctx, work, cluster); err != nil { + return controllerruntime.Result{}, err } return c.removeFinalizer(ctx, work) @@ -137,6 +133,7 @@ func (c *Controller) Reconcile(ctx context.Context, req controllerruntime.Reques // SetupWithManager creates a controller and register to controller manager. func (c *Controller) SetupWithManager(mgr controllerruntime.Manager) error { return controllerruntime.NewControllerManagedBy(mgr). + Named(ControllerName). For(&workv1alpha1.Work{}, builder.WithPredicates(c.PredicateFunc)). WithEventFilter(predicate.GenerationChangedPredicate{}). WithOptions(controller.Options{ @@ -151,16 +148,79 @@ func (c *Controller) syncWork(ctx context.Context, clusterName string, work *wor metrics.ObserveSyncWorkloadLatency(err, start) if err != nil { msg := fmt.Sprintf("Failed to sync work(%s/%s) to cluster(%s), err: %v", work.Namespace, work.Name, clusterName, err) - klog.Errorf(msg) + klog.Error(msg) c.EventRecorder.Event(work, corev1.EventTypeWarning, events.EventReasonSyncWorkloadFailed, msg) return controllerruntime.Result{}, err } msg := fmt.Sprintf("Sync work(%s/%s) to cluster(%s) successful.", work.Namespace, work.Name, clusterName) - klog.V(4).Infof(msg) + klog.V(4).Info(msg) c.EventRecorder.Event(work, corev1.EventTypeNormal, events.EventReasonSyncWorkloadSucceed, msg) return controllerruntime.Result{}, nil } +func (c *Controller) handleWorkDelete(ctx context.Context, work *workv1alpha1.Work, cluster *clusterv1alpha1.Cluster) error { + if ptr.Deref(work.Spec.PreserveResourcesOnDeletion, false) { + if err := c.cleanupPolicyClaimMetadata(ctx, work, cluster); err != nil { + klog.Errorf("Failed to remove annotations and labels in on cluster(%s)", cluster.Name) + return err + } + klog.V(4).Infof("Preserving resource on deletion from work(%s/%s) on cluster(%s)", work.Namespace, work.Name, cluster.Name) + return nil + } + + // Abort deleting workload if cluster is unready when unjoining cluster, otherwise the unjoin process will be failed. + if util.IsClusterReady(&cluster.Status) { + err := c.tryDeleteWorkload(ctx, cluster.Name, work) + if err != nil { + klog.Errorf("Failed to delete work %v, namespace is %v, err is %v", work.Name, work.Namespace, err) + return err + } + } else if cluster.DeletionTimestamp.IsZero() { // cluster is unready, but not terminating + return fmt.Errorf("cluster(%s) not ready", cluster.Name) + } + + return nil +} + +func (c *Controller) cleanupPolicyClaimMetadata(ctx context.Context, work *workv1alpha1.Work, cluster *clusterv1alpha1.Cluster) error { + for _, manifest := range work.Spec.Workload.Manifests { + workload := &unstructured.Unstructured{} + if err := workload.UnmarshalJSON(manifest.Raw); err != nil { + klog.Errorf("Failed to unmarshal workload from work(%s/%s), error is: %v", err, work.GetNamespace(), work.GetName()) + return err + } + + fedKey, err := keys.FederatedKeyFunc(cluster.Name, workload) + if err != nil { + klog.Errorf("Failed to get the federated key resource(kind=%s, %s/%s) from member cluster(%s), err is %v ", + workload.GetKind(), workload.GetNamespace(), workload.GetName(), cluster.Name, err) + return err + } + + clusterObj, err := helper.GetObjectFromCache(c.RESTMapper, c.InformerManager, fedKey) + if err != nil { + klog.Errorf("Failed to get the resource(kind=%s, %s/%s) from member cluster(%s) cache, err is %v ", + workload.GetKind(), workload.GetNamespace(), workload.GetName(), cluster.Name, err) + return err + } + + if workload.GetNamespace() == corev1.NamespaceAll { + detector.CleanupCPPClaimMetadata(workload) + } else { + detector.CleanupPPClaimMetadata(workload) + } + util.RemoveLabels(workload, util.ManagedResourceLabels...) + util.RemoveAnnotations(workload, util.ManagedResourceAnnotations...) + + if err := c.ObjectWatcher.Update(ctx, cluster.Name, workload, clusterObj); err != nil { + klog.Errorf("Failed to update metadata in the given member cluster %v, err is %v", cluster.Name, err) + return err + } + } + + return nil +} + // tryDeleteWorkload tries to delete resources in the given member cluster. func (c *Controller) tryDeleteWorkload(ctx context.Context, clusterName string, work *workv1alpha1.Work) error { for _, manifest := range work.Spec.Workload.Manifests { diff --git a/pkg/controllers/execution/execution_controller_test.go b/pkg/controllers/execution/execution_controller_test.go index 433d52d20aae..ce53d860f54e 100644 --- a/pkg/controllers/execution/execution_controller_test.go +++ b/pkg/controllers/execution/execution_controller_test.go @@ -23,6 +23,7 @@ import ( "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" @@ -33,11 +34,14 @@ import ( "k8s.io/client-go/tools/record" "k8s.io/utils/ptr" controllerruntime "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1" workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1" "github.com/karmada-io/karmada/pkg/events" + "github.com/karmada-io/karmada/pkg/resourceinterpreter" + "github.com/karmada-io/karmada/pkg/resourceinterpreter/default/native" "github.com/karmada-io/karmada/pkg/util" "github.com/karmada-io/karmada/pkg/util/fedinformer/genericmanager" "github.com/karmada-io/karmada/pkg/util/gclient" @@ -45,6 +49,18 @@ import ( testhelper "github.com/karmada-io/karmada/test/helper" ) +type FakeResourceInterpreter struct { + *native.DefaultInterpreter +} + +var _ resourceinterpreter.ResourceInterpreter = &FakeResourceInterpreter{} + +const ( + podNamespace = "default" + podName = "test" + clusterName = "cluster" +) + func TestExecutionController_Reconcile(t *testing.T) { tests := []struct { name string @@ -54,6 +70,7 @@ func TestExecutionController_Reconcile(t *testing.T) { expectCondition *metav1.Condition expectEventMessage string existErr bool + resourceExists *bool }{ { name: "work dispatching is suspended, no error, no apply", @@ -112,10 +129,52 @@ func TestExecutionController_Reconcile(t *testing.T) { work.Spec.SuspendDispatching = ptr.To(true) }), }, + { + name: "PreserveResourcesOnDeletion=true, deletion timestamp set, does not delete resource", + ns: "karmada-es-cluster", + expectRes: controllerruntime.Result{}, + existErr: false, + resourceExists: ptr.To(true), + work: newWork(func(work *workv1alpha1.Work) { + now := metav1.Now() + work.SetDeletionTimestamp(&now) + work.SetFinalizers([]string{util.ExecutionControllerFinalizer}) + work.Spec.PreserveResourcesOnDeletion = ptr.To(true) + }), + }, + { + name: "PreserveResourcesOnDeletion=false, deletion timestamp set, deletes resource", + ns: "karmada-es-cluster", + expectRes: controllerruntime.Result{}, + existErr: false, + resourceExists: ptr.To(false), + work: newWork(func(work *workv1alpha1.Work) { + now := metav1.Now() + work.SetDeletionTimestamp(&now) + work.SetFinalizers([]string{util.ExecutionControllerFinalizer}) + work.Spec.PreserveResourcesOnDeletion = ptr.To(false) + }), + }, + { + name: "PreserveResourcesOnDeletion unset, deletion timestamp set, deletes resource", + ns: "karmada-es-cluster", + expectRes: controllerruntime.Result{}, + existErr: false, + resourceExists: ptr.To(false), + work: newWork(func(work *workv1alpha1.Work) { + now := metav1.Now() + work.SetDeletionTimestamp(&now) + work.SetFinalizers([]string{util.ExecutionControllerFinalizer}) + }), + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + t.Cleanup(func() { + genericmanager.GetInstance().Stop(clusterName) + }) + req := controllerruntime.Request{ NamespacedName: types.NamespacedName{ Name: "work", @@ -143,32 +202,51 @@ func TestExecutionController_Reconcile(t *testing.T) { e := <-eventRecorder.Events assert.Equal(t, tt.expectEventMessage, e) } + + if tt.resourceExists != nil { + resourceInterface := c.InformerManager.GetSingleClusterManager(clusterName).GetClient(). + Resource(corev1.SchemeGroupVersion.WithResource("pods")).Namespace(podNamespace) + _, err = resourceInterface.Get(context.TODO(), podName, metav1.GetOptions{}) + if *tt.resourceExists { + assert.NoErrorf(t, err, "unable to query pod (%s/%s)", podNamespace, podName) + } else { + assert.True(t, apierrors.IsNotFound(err), "pod (%s/%s) was not deleted", podNamespace, podName) + } + } }) } } -func newController(work *workv1alpha1.Work, eventRecorder *record.FakeRecorder) Controller { - cluster := newCluster("cluster", clusterv1alpha1.ClusterConditionReady, metav1.ConditionTrue) - pod := testhelper.NewPod("default", "test") - client := fake.NewClientBuilder().WithScheme(gclient.NewSchema()).WithObjects(cluster, work, pod).WithStatusSubresource(work).Build() +func newController(work *workv1alpha1.Work, recorder *record.FakeRecorder) Controller { + cluster := newCluster(clusterName, clusterv1alpha1.ClusterConditionReady, metav1.ConditionTrue) + pod := testhelper.NewPod(podNamespace, podName) + pod.SetLabels(map[string]string{util.ManagedByKarmadaLabel: util.ManagedByKarmadaLabelValue}) restMapper := meta.NewDefaultRESTMapper([]schema.GroupVersion{corev1.SchemeGroupVersion}) restMapper.Add(corev1.SchemeGroupVersion.WithKind(pod.Kind), meta.RESTScopeNamespace) + fakeClient := fake.NewClientBuilder().WithScheme(gclient.NewSchema()).WithObjects(cluster, work).WithStatusSubresource(work).WithRESTMapper(restMapper).Build() dynamicClientSet := dynamicfake.NewSimpleDynamicClient(scheme.Scheme, pod) informerManager := genericmanager.GetInstance() informerManager.ForCluster(cluster.Name, dynamicClientSet, 0).Lister(corev1.SchemeGroupVersion.WithResource("pods")) informerManager.Start(cluster.Name) informerManager.WaitForCacheSync(cluster.Name) + clusterClientSetFunc := func(string, client.Client) (*util.DynamicClusterClient, error) { + return &util.DynamicClusterClient{ + ClusterName: clusterName, + DynamicClientSet: dynamicClientSet, + }, nil + } + resourceInterpreter := FakeResourceInterpreter{DefaultInterpreter: native.NewDefaultInterpreter()} return Controller{ - Client: client, + Client: fakeClient, InformerManager: informerManager, - EventRecorder: eventRecorder, + EventRecorder: recorder, RESTMapper: restMapper, - ObjectWatcher: objectwatcher.NewObjectWatcher(client, restMapper, util.NewClusterDynamicClientSetForAgent, nil), + ObjectWatcher: objectwatcher.NewObjectWatcher(fakeClient, restMapper, clusterClientSetFunc, resourceInterpreter), } } func newWork(applyFunc func(work *workv1alpha1.Work)) *workv1alpha1.Work { - pod := testhelper.NewPod("default", "test") + pod := testhelper.NewPod(podNamespace, podName) bytes, _ := json.Marshal(pod) work := testhelper.NewWork("work", "karmada-es-cluster", string(uuid.NewUUID()), bytes) if applyFunc != nil { @@ -193,3 +271,7 @@ func newCluster(name string, clusterType string, clusterStatus metav1.ConditionS }, } } + +func (f FakeResourceInterpreter) Start(context.Context) error { + return nil +} diff --git a/pkg/controllers/federatedhpa/config/types_test.go b/pkg/controllers/federatedhpa/config/types_test.go new file mode 100644 index 000000000000..d9af9317b949 --- /dev/null +++ b/pkg/controllers/federatedhpa/config/types_test.go @@ -0,0 +1,59 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "testing" + + "github.com/spf13/pflag" +) + +// TestHPAControllerConfiguration_AddFlags tests that AddFlags adds all expected flags +func TestHPAControllerConfiguration_AddFlags(t *testing.T) { + config := &HPAControllerConfiguration{} + fs := pflag.NewFlagSet("test", pflag.ContinueOnError) + + config.AddFlags(fs) + + expectedFlags := []string{ + "horizontal-pod-autoscaler-sync-period", + "horizontal-pod-autoscaler-upscale-delay", + "horizontal-pod-autoscaler-downscale-stabilization", + "horizontal-pod-autoscaler-downscale-delay", + "horizontal-pod-autoscaler-tolerance", + "horizontal-pod-autoscaler-cpu-initialization-period", + "horizontal-pod-autoscaler-initial-readiness-delay", + } + + for _, flagName := range expectedFlags { + if fs.Lookup(flagName) == nil { + t.Errorf("Expected flag %s not found", flagName) + } + } +} + +// TestHPAControllerConfiguration_AddFlags_NilReceiver tests AddFlags with a nil receiver +func TestHPAControllerConfiguration_AddFlags_NilReceiver(t *testing.T) { + var config *HPAControllerConfiguration + fs := pflag.NewFlagSet("test", pflag.ContinueOnError) + + config.AddFlags(fs) + + if fs.HasFlags() { + t.Error("Expected no flags to be added when receiver is nil, but flags were added") + } +} diff --git a/pkg/controllers/federatedhpa/federatedhpa_controller.go b/pkg/controllers/federatedhpa/federatedhpa_controller.go index 0134f3351462..600ad9c63d51 100644 --- a/pkg/controllers/federatedhpa/federatedhpa_controller.go +++ b/pkg/controllers/federatedhpa/federatedhpa_controller.go @@ -62,7 +62,7 @@ import ( // FederatedHPA-controller is borrowed from the HPA controller of Kubernetes. // The referenced code has been marked in the comment. -// ControllerName is the controller name that will be used when reporting events. +// ControllerName is the controller name that will be used when reporting events and metrics. const ControllerName = "federatedHPA-controller" var ( @@ -128,6 +128,7 @@ func (c *FHPAController) SetupWithManager(mgr controllerruntime.Manager) error { c.hpaSelectors = selectors.NewBiMultimap() c.monitor = monitor.New() return controllerruntime.NewControllerManagedBy(mgr). + Named(ControllerName). For(&autoscalingv1alpha1.FederatedHPA{}). WithOptions(controller.Options{RateLimiter: ratelimiterflag.DefaultControllerRateLimiter(c.RateLimiterOptions)}). WithEventFilter(predicate.GenerationChangedPredicate{}). diff --git a/pkg/controllers/federatedhpa/federatedhpa_controller_test.go b/pkg/controllers/federatedhpa/federatedhpa_controller_test.go new file mode 100644 index 000000000000..f23c8818146b --- /dev/null +++ b/pkg/controllers/federatedhpa/federatedhpa_controller_test.go @@ -0,0 +1,1704 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package federatedhpa + +import ( + "context" + "errors" + "fmt" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + autoscalingv2 "k8s.io/api/autoscaling/v2" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + + autoscalingv1alpha1 "github.com/karmada-io/karmada/pkg/apis/autoscaling/v1alpha1" + clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1" + policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" + workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2" + "github.com/karmada-io/karmada/pkg/util/lifted/selectors" +) + +type MockClient struct { + mock.Mock +} + +func (m *MockClient) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { + args := m.Called(ctx, key, obj, opts) + return args.Error(0) +} + +func (m *MockClient) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + args := m.Called(ctx, list, opts) + return args.Error(0) +} + +func (m *MockClient) Create(ctx context.Context, obj client.Object, opts ...client.CreateOption) error { + args := m.Called(ctx, obj, opts) + return args.Error(0) +} + +func (m *MockClient) Delete(ctx context.Context, obj client.Object, opts ...client.DeleteOption) error { + args := m.Called(ctx, obj, opts) + return args.Error(0) +} + +func (m *MockClient) Update(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + args := m.Called(ctx, obj, opts) + return args.Error(0) +} + +func (m *MockClient) Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + args := m.Called(ctx, obj, patch, opts) + return args.Error(0) +} + +func (m *MockClient) DeleteAllOf(ctx context.Context, obj client.Object, opts ...client.DeleteAllOfOption) error { + args := m.Called(ctx, obj, opts) + return args.Error(0) +} + +func (m *MockClient) Status() client.StatusWriter { + args := m.Called() + return args.Get(0).(client.StatusWriter) +} + +func (m *MockClient) Scheme() *runtime.Scheme { + args := m.Called() + return args.Get(0).(*runtime.Scheme) +} + +func (m *MockClient) SubResource(subResource string) client.SubResourceClient { + args := m.Called(subResource) + return args.Get(0).(client.SubResourceClient) +} + +func (m *MockClient) RESTMapper() meta.RESTMapper { + args := m.Called() + return args.Get(0).(meta.RESTMapper) +} + +func (m *MockClient) GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) { + args := m.Called(obj) + return args.Get(0).(schema.GroupVersionKind), args.Error(1) +} + +func (m *MockClient) IsObjectNamespaced(obj runtime.Object) (bool, error) { + args := m.Called(obj) + return args.Bool(0), args.Error(1) +} + +// TestGetBindingByLabel verifies the behavior of getBindingByLabel function +func TestGetBindingByLabel(t *testing.T) { + tests := []struct { + name string + resourceLabel map[string]string + resourceRef autoscalingv2.CrossVersionObjectReference + bindingList *workv1alpha2.ResourceBindingList + expectedError string + }{ + { + name: "Successful retrieval", + resourceLabel: map[string]string{ + policyv1alpha1.PropagationPolicyPermanentIDLabel: "test-policy-id", + }, + resourceRef: autoscalingv2.CrossVersionObjectReference{ + Kind: "Deployment", + Name: "test-deployment", + APIVersion: "apps/v1", + }, + bindingList: &workv1alpha2.ResourceBindingList{ + Items: []workv1alpha2.ResourceBinding{ + { + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "test-deployment", + }, + }, + }, + }, + }, + }, + { + name: "Empty resource label", + resourceLabel: map[string]string{}, + expectedError: "target resource has no label", + }, + { + name: "No matching bindings", + resourceLabel: map[string]string{ + policyv1alpha1.PropagationPolicyPermanentIDLabel: "test-policy-id", + }, + resourceRef: autoscalingv2.CrossVersionObjectReference{ + Kind: "Deployment", + Name: "non-existent-deployment", + APIVersion: "apps/v1", + }, + bindingList: &workv1alpha2.ResourceBindingList{ + Items: []workv1alpha2.ResourceBinding{}, + }, + expectedError: "length of binding list is zero", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mockClient := new(MockClient) + controller := &FHPAController{ + Client: mockClient, + } + + ctx := context.Background() + + if tt.bindingList != nil { + mockClient.On("List", ctx, mock.AnythingOfType("*v1alpha2.ResourceBindingList"), mock.Anything). + Return(nil). + Run(func(args mock.Arguments) { + arg := args.Get(1).(*workv1alpha2.ResourceBindingList) + *arg = *tt.bindingList + }) + } + + binding, err := controller.getBindingByLabel(ctx, tt.resourceLabel, tt.resourceRef) + + if tt.expectedError != "" { + assert.EqualError(t, err, tt.expectedError) + } else { + assert.NoError(t, err) + assert.NotNil(t, binding) + assert.Equal(t, tt.resourceRef.Name, binding.Spec.Resource.Name) + } + + mockClient.AssertExpectations(t) + }) + } +} + +// TestGetTargetCluster checks the getTargetCluster function's handling of various cluster states +func TestGetTargetCluster(t *testing.T) { + tests := []struct { + name string + binding *workv1alpha2.ResourceBinding + clusters map[string]*clusterv1alpha1.Cluster + getErrors map[string]error + expectedClusters []string + expectedError string + }{ + { + name: "Two clusters, one ready and one not ready", + binding: &workv1alpha2.ResourceBinding{ + Spec: workv1alpha2.ResourceBindingSpec{ + Clusters: []workv1alpha2.TargetCluster{ + {Name: "cluster1"}, + {Name: "cluster2"}, + }, + }, + }, + clusters: map[string]*clusterv1alpha1.Cluster{ + "cluster1": { + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + {Type: clusterv1alpha1.ClusterConditionReady, Status: metav1.ConditionTrue}, + }, + }, + }, + "cluster2": { + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + {Type: clusterv1alpha1.ClusterConditionReady, Status: metav1.ConditionFalse}, + }, + }, + }, + }, + expectedClusters: []string{"cluster1"}, + }, + { + name: "Empty binding.Spec.Clusters", + binding: &workv1alpha2.ResourceBinding{ + Spec: workv1alpha2.ResourceBindingSpec{ + Clusters: []workv1alpha2.TargetCluster{}, + }, + }, + expectedError: "binding has no schedulable clusters", + }, + { + name: "Client.Get returns error", + binding: &workv1alpha2.ResourceBinding{ + Spec: workv1alpha2.ResourceBindingSpec{ + Clusters: []workv1alpha2.TargetCluster{ + {Name: "cluster1"}, + }, + }, + }, + getErrors: map[string]error{ + "cluster1": errors.New("get error"), + }, + expectedError: "get error", + }, + { + name: "Multiple ready and not ready clusters", + binding: &workv1alpha2.ResourceBinding{ + Spec: workv1alpha2.ResourceBindingSpec{ + Clusters: []workv1alpha2.TargetCluster{ + {Name: "cluster1"}, + {Name: "cluster2"}, + {Name: "cluster3"}, + {Name: "cluster4"}, + {Name: "cluster5"}, + }, + }, + }, + clusters: map[string]*clusterv1alpha1.Cluster{ + "cluster1": { + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + {Type: clusterv1alpha1.ClusterConditionReady, Status: metav1.ConditionTrue}, + }, + }, + }, + "cluster2": { + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + {Type: clusterv1alpha1.ClusterConditionReady, Status: metav1.ConditionFalse}, + }, + }, + }, + "cluster3": { + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + {Type: clusterv1alpha1.ClusterConditionReady, Status: metav1.ConditionTrue}, + }, + }, + }, + "cluster4": { + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + {Type: clusterv1alpha1.ClusterConditionReady, Status: metav1.ConditionFalse}, + }, + }, + }, + "cluster5": { + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + {Type: clusterv1alpha1.ClusterConditionReady, Status: metav1.ConditionTrue}, + }, + }, + }, + }, + expectedClusters: []string{"cluster1", "cluster3", "cluster5"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mockClient := new(MockClient) + controller := &FHPAController{ + Client: mockClient, + } + + ctx := context.Background() + + for _, targetCluster := range tt.binding.Spec.Clusters { + var err error + if tt.getErrors != nil { + err = tt.getErrors[targetCluster.Name] + } + + mockClient.On("Get", ctx, types.NamespacedName{Name: targetCluster.Name}, mock.AnythingOfType("*v1alpha1.Cluster"), mock.Anything). + Return(err). + Run(func(args mock.Arguments) { + if tt.clusters != nil { + arg := args.Get(2).(*clusterv1alpha1.Cluster) + *arg = *tt.clusters[targetCluster.Name] + } + }) + } + + clusters, err := controller.getTargetCluster(ctx, tt.binding) + + if tt.expectedError != "" { + assert.EqualError(t, err, tt.expectedError) + } else { + assert.NoError(t, err) + assert.Equal(t, tt.expectedClusters, clusters) + } + + mockClient.AssertExpectations(t) + + for _, targetCluster := range tt.binding.Spec.Clusters { + mockClient.AssertCalled(t, "Get", ctx, types.NamespacedName{Name: targetCluster.Name}, mock.AnythingOfType("*v1alpha1.Cluster"), mock.Anything) + } + }) + } +} + +// TestValidateAndParseSelector ensures proper parsing and validation of selectors +func TestValidateAndParseSelector(t *testing.T) { + tests := []struct { + name string + selector string + expectedError bool + }{ + { + name: "Valid selector", + selector: "app=myapp", + expectedError: false, + }, + { + name: "Invalid selector", + selector: "invalid=selector=format", + expectedError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + controller := &FHPAController{ + hpaSelectors: selectors.NewBiMultimap(), + hpaSelectorsMux: sync.Mutex{}, + EventRecorder: &record.FakeRecorder{}, + } + + hpa := &autoscalingv1alpha1.FederatedHPA{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-hpa", + Namespace: "default", + }, + } + + parsedSelector, err := controller.validateAndParseSelector(hpa, tt.selector, []*corev1.Pod{}) + + if tt.expectedError { + assert.Error(t, err) + assert.Nil(t, parsedSelector) + assert.Contains(t, err.Error(), "couldn't convert selector into a corresponding internal selector object") + } else { + assert.NoError(t, err) + assert.NotNil(t, parsedSelector) + } + }) + } +} + +// TestRecordInitialRecommendation verifies correct recording of initial recommendations +func TestRecordInitialRecommendation(t *testing.T) { + tests := []struct { + name string + key string + currentReplicas int32 + initialRecs []timestampedRecommendation + expectedCount int + expectedReplicas int32 + }{ + { + name: "New recommendation", + key: "test-hpa-1", + currentReplicas: 3, + initialRecs: nil, + expectedCount: 1, + expectedReplicas: 3, + }, + { + name: "Existing recommendations", + key: "test-hpa-2", + currentReplicas: 5, + initialRecs: []timestampedRecommendation{ + {recommendation: 3, timestamp: time.Now().Add(-1 * time.Minute)}, + }, + expectedCount: 1, + expectedReplicas: 3, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + controller := &FHPAController{ + recommendations: make(map[string][]timestampedRecommendation), + } + + if tt.initialRecs != nil { + controller.recommendations[tt.key] = tt.initialRecs + } + + controller.recordInitialRecommendation(tt.currentReplicas, tt.key) + + assert.Len(t, controller.recommendations[tt.key], tt.expectedCount) + assert.Equal(t, tt.expectedReplicas, controller.recommendations[tt.key][0].recommendation) + + if tt.initialRecs == nil { + assert.WithinDuration(t, time.Now(), controller.recommendations[tt.key][0].timestamp, 2*time.Second) + } else { + assert.Equal(t, tt.initialRecs[0].timestamp, controller.recommendations[tt.key][0].timestamp) + } + }) + } +} + +// TestStabilizeRecommendation checks the stabilization logic for recommendations +func TestStabilizeRecommendation(t *testing.T) { + tests := []struct { + name string + key string + initialRecommendations []timestampedRecommendation + newRecommendation int32 + expectedStabilized int32 + expectedStoredCount int + }{ + { + name: "No previous recommendations", + key: "test-hpa-1", + initialRecommendations: []timestampedRecommendation{}, + newRecommendation: 5, + expectedStabilized: 5, + expectedStoredCount: 1, + }, + { + name: "With previous recommendations within window", + key: "test-hpa-2", + initialRecommendations: []timestampedRecommendation{ + {recommendation: 3, timestamp: time.Now().Add(-30 * time.Second)}, + {recommendation: 4, timestamp: time.Now().Add(-45 * time.Second)}, + }, + newRecommendation: 2, + expectedStabilized: 4, + expectedStoredCount: 3, + }, + { + name: "With old recommendation outside window", + key: "test-hpa-3", + initialRecommendations: []timestampedRecommendation{ + {recommendation: 7, timestamp: time.Now().Add(-2 * time.Minute)}, + {recommendation: 4, timestamp: time.Now().Add(-45 * time.Second)}, + }, + newRecommendation: 5, + expectedStabilized: 5, + expectedStoredCount: 2, + }, + { + name: "All recommendations outside window", + key: "test-hpa-4", + initialRecommendations: []timestampedRecommendation{ + {recommendation: 7, timestamp: time.Now().Add(-2 * time.Minute)}, + {recommendation: 8, timestamp: time.Now().Add(-3 * time.Minute)}, + }, + newRecommendation: 3, + expectedStabilized: 3, + expectedStoredCount: 2, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + controller := &FHPAController{ + recommendations: make(map[string][]timestampedRecommendation), + DownscaleStabilisationWindow: time.Minute, + } + controller.recommendations[tt.key] = tt.initialRecommendations + + stabilized := controller.stabilizeRecommendation(tt.key, tt.newRecommendation) + + assert.Equal(t, tt.expectedStabilized, stabilized, "Unexpected stabilized recommendation") + assert.Len(t, controller.recommendations[tt.key], tt.expectedStoredCount, "Unexpected number of stored recommendations") + assert.True(t, containsRecommendation(controller.recommendations[tt.key], tt.newRecommendation), "New recommendation not found in stored recommendations") + + oldCount := countOldRecommendations(controller.recommendations[tt.key], controller.DownscaleStabilisationWindow) + assert.LessOrEqual(t, oldCount, 1, "Too many recommendations older than stabilization window") + }) + } +} + +// TestNormalizeDesiredReplicas verifies the normalization of desired replicas +func TestNormalizeDesiredReplicas(t *testing.T) { + testCases := []struct { + name string + currentReplicas int32 + desiredReplicas int32 + minReplicas int32 + maxReplicas int32 + recommendations []timestampedRecommendation + expectedReplicas int32 + expectedAbleToScale autoscalingv2.HorizontalPodAutoscalerConditionType + expectedAbleToScaleReason string + expectedScalingLimited corev1.ConditionStatus + expectedScalingLimitedReason string + }{ + { + name: "scale up within limits", + currentReplicas: 2, + desiredReplicas: 4, + minReplicas: 1, + maxReplicas: 10, + recommendations: []timestampedRecommendation{{4, time.Now()}}, + expectedReplicas: 4, + expectedAbleToScale: autoscalingv2.AbleToScale, + expectedAbleToScaleReason: "ReadyForNewScale", + expectedScalingLimited: corev1.ConditionFalse, + expectedScalingLimitedReason: "DesiredWithinRange", + }, + { + name: "scale down stabilized", + currentReplicas: 5, + desiredReplicas: 3, + minReplicas: 1, + maxReplicas: 10, + recommendations: []timestampedRecommendation{{4, time.Now().Add(-1 * time.Minute)}, {3, time.Now()}}, + expectedReplicas: 4, + expectedAbleToScale: autoscalingv2.AbleToScale, + expectedAbleToScaleReason: "ScaleDownStabilized", + expectedScalingLimited: corev1.ConditionFalse, + expectedScalingLimitedReason: "DesiredWithinRange", + }, + { + name: "at min replicas", + currentReplicas: 2, + desiredReplicas: 1, + minReplicas: 2, + maxReplicas: 10, + recommendations: []timestampedRecommendation{{1, time.Now()}}, + expectedReplicas: 2, + expectedAbleToScale: autoscalingv2.AbleToScale, + expectedAbleToScaleReason: "ReadyForNewScale", + expectedScalingLimited: corev1.ConditionTrue, + expectedScalingLimitedReason: "TooFewReplicas", + }, + { + name: "at max replicas", + currentReplicas: 10, + desiredReplicas: 12, + minReplicas: 1, + maxReplicas: 10, + recommendations: []timestampedRecommendation{{12, time.Now()}}, + expectedReplicas: 10, + expectedAbleToScale: autoscalingv2.AbleToScale, + expectedAbleToScaleReason: "ReadyForNewScale", + expectedScalingLimited: corev1.ConditionTrue, + expectedScalingLimitedReason: "TooManyReplicas", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + controller := &FHPAController{ + recommendations: make(map[string][]timestampedRecommendation), + DownscaleStabilisationWindow: 5 * time.Minute, + } + controller.recommendations["test-hpa"] = tc.recommendations + + hpa := &autoscalingv1alpha1.FederatedHPA{ + Spec: autoscalingv1alpha1.FederatedHPASpec{ + MinReplicas: &tc.minReplicas, + MaxReplicas: tc.maxReplicas, + }, + } + + normalized := controller.normalizeDesiredReplicas(hpa, "test-hpa", tc.currentReplicas, tc.desiredReplicas, tc.minReplicas) + + assert.Equal(t, tc.expectedReplicas, normalized, "Unexpected normalized replicas") + + ableToScaleCondition := getCondition(hpa.Status.Conditions, autoscalingv2.AbleToScale) + assert.NotNil(t, ableToScaleCondition, "AbleToScale condition not found") + assert.Equal(t, corev1.ConditionTrue, ableToScaleCondition.Status, "Unexpected AbleToScale condition status") + assert.Equal(t, tc.expectedAbleToScaleReason, ableToScaleCondition.Reason, "Unexpected AbleToScale condition reason") + + scalingLimitedCondition := getCondition(hpa.Status.Conditions, autoscalingv2.ScalingLimited) + assert.NotNil(t, scalingLimitedCondition, "ScalingLimited condition not found") + assert.Equal(t, tc.expectedScalingLimited, scalingLimitedCondition.Status, "Unexpected ScalingLimited condition status") + assert.Equal(t, tc.expectedScalingLimitedReason, scalingLimitedCondition.Reason, "Unexpected ScalingLimited condition reason") + }) + } +} + +// TestNormalizeDesiredReplicasWithBehaviors checks replica normalization with scaling behaviors +func TestNormalizeDesiredReplicasWithBehaviors(t *testing.T) { + defaultStabilizationWindowSeconds := int32(300) + defaultSelectPolicy := autoscalingv2.MaxChangePolicySelect + + tests := []struct { + name string + hpa *autoscalingv1alpha1.FederatedHPA + key string + currentReplicas int32 + prenormalizedReplicas int32 + expectedReplicas int32 + }{ + { + name: "Scale up with behavior", + hpa: createTestHPA(1, 10, &autoscalingv2.HorizontalPodAutoscalerBehavior{ + ScaleUp: createTestScalingRules(&defaultStabilizationWindowSeconds, &defaultSelectPolicy, []autoscalingv2.HPAScalingPolicy{{Type: autoscalingv2.PercentScalingPolicy, Value: 200, PeriodSeconds: 60}}), + ScaleDown: createTestScalingRules(&defaultStabilizationWindowSeconds, &defaultSelectPolicy, []autoscalingv2.HPAScalingPolicy{{Type: autoscalingv2.PercentScalingPolicy, Value: 100, PeriodSeconds: 60}}), + }), + key: "test-hpa", + currentReplicas: 5, + prenormalizedReplicas: 15, + expectedReplicas: 10, + }, + { + name: "Scale down with behavior", + hpa: createTestHPA(1, 10, &autoscalingv2.HorizontalPodAutoscalerBehavior{ + ScaleUp: createTestScalingRules(&defaultStabilizationWindowSeconds, &defaultSelectPolicy, []autoscalingv2.HPAScalingPolicy{{Type: autoscalingv2.PercentScalingPolicy, Value: 200, PeriodSeconds: 60}}), + ScaleDown: createTestScalingRules(&defaultStabilizationWindowSeconds, &defaultSelectPolicy, []autoscalingv2.HPAScalingPolicy{{Type: autoscalingv2.PercentScalingPolicy, Value: 50, PeriodSeconds: 60}}), + }), + key: "test-hpa", + currentReplicas: 8, + prenormalizedReplicas: 2, + expectedReplicas: 4, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + controller := &FHPAController{ + recommendations: make(map[string][]timestampedRecommendation), + DownscaleStabilisationWindow: 5 * time.Minute, + } + + normalized := controller.normalizeDesiredReplicasWithBehaviors(tt.hpa, tt.key, tt.currentReplicas, tt.prenormalizedReplicas, *tt.hpa.Spec.MinReplicas) + assert.Equal(t, tt.expectedReplicas, normalized, "Unexpected normalized replicas") + }) + } +} + +// TestGetReplicasChangePerPeriod ensures correct calculation of replica changes over time +func TestGetReplicasChangePerPeriod(t *testing.T) { + now := time.Now() + tests := []struct { + name string + periodSeconds int32 + scaleEvents []timestampedScaleEvent + expectedChange int32 + }{ + { + name: "No events", + periodSeconds: 60, + scaleEvents: []timestampedScaleEvent{}, + expectedChange: 0, + }, + { + name: "Single event within period", + periodSeconds: 60, + scaleEvents: []timestampedScaleEvent{ + {replicaChange: 3, timestamp: now.Add(-30 * time.Second)}, + }, + expectedChange: 3, + }, + { + name: "Multiple events, some outside period", + periodSeconds: 60, + scaleEvents: []timestampedScaleEvent{ + {replicaChange: 3, timestamp: now.Add(-30 * time.Second)}, + {replicaChange: 2, timestamp: now.Add(-45 * time.Second)}, + {replicaChange: 1, timestamp: now.Add(-70 * time.Second)}, + }, + expectedChange: 5, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + change := getReplicasChangePerPeriod(tt.periodSeconds, tt.scaleEvents) + assert.Equal(t, tt.expectedChange, change, "Unexpected change in replicas") + }) + } +} + +// TestGetUnableComputeReplicaCountCondition verifies condition creation for compute failures +func TestGetUnableComputeReplicaCountCondition(t *testing.T) { + tests := []struct { + name string + object runtime.Object + reason string + err error + expectedEvent string + expectedMessage string + }{ + { + name: "FederatedHPA with simple error", + object: createTestFederatedHPA("test-hpa", "default"), + reason: "TestReason", + err: fmt.Errorf("test error"), + expectedEvent: "Warning TestReason test error", + expectedMessage: "the HPA was unable to compute the replica count: test error", + }, + { + name: "Different object type", + object: &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "test-pod", Namespace: "default"}}, + reason: "PodError", + err: fmt.Errorf("pod error"), + expectedEvent: "Warning PodError pod error", + expectedMessage: "the HPA was unable to compute the replica count: pod error", + }, + { + name: "Complex error message", + object: createTestFederatedHPA("complex-hpa", "default"), + reason: "ComplexError", + err: fmt.Errorf("error: %v", fmt.Errorf("nested error")), + expectedEvent: "Warning ComplexError error: nested error", + expectedMessage: "the HPA was unable to compute the replica count: error: nested error", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fakeRecorder := record.NewFakeRecorder(10) + controller := &FHPAController{ + EventRecorder: fakeRecorder, + } + + condition := controller.getUnableComputeReplicaCountCondition(tt.object, tt.reason, tt.err) + + assert.Equal(t, autoscalingv2.ScalingActive, condition.Type, "Unexpected condition type") + assert.Equal(t, corev1.ConditionFalse, condition.Status, "Unexpected condition status") + assert.Equal(t, tt.reason, condition.Reason, "Unexpected condition reason") + assert.Equal(t, tt.expectedMessage, condition.Message, "Unexpected condition message") + + select { + case event := <-fakeRecorder.Events: + assert.Equal(t, tt.expectedEvent, event, "Unexpected event recorded") + case <-time.After(time.Second): + t.Error("Expected an event to be recorded, but none was") + } + }) + } +} + +// TestStoreScaleEvent checks proper storage of scaling events +func TestStoreScaleEvent(t *testing.T) { + tests := []struct { + name string + behavior *autoscalingv2.HorizontalPodAutoscalerBehavior + key string + prevReplicas int32 + newReplicas int32 + expectedUp int + expectedDown int + }{ + { + name: "Scale up event", + behavior: &autoscalingv2.HorizontalPodAutoscalerBehavior{ + ScaleUp: &autoscalingv2.HPAScalingRules{ + StabilizationWindowSeconds: ptr.To[int32](int32(60)), + }, + }, + key: "test-hpa", + prevReplicas: 5, + newReplicas: 10, + expectedUp: 1, + expectedDown: 0, + }, + { + name: "Scale down event", + behavior: &autoscalingv2.HorizontalPodAutoscalerBehavior{ + ScaleDown: &autoscalingv2.HPAScalingRules{ + StabilizationWindowSeconds: ptr.To[int32](int32(60)), + }, + }, + key: "test-hpa", + prevReplicas: 10, + newReplicas: 5, + expectedUp: 0, + expectedDown: 1, + }, + { + name: "Nil behavior", + behavior: nil, + key: "test-hpa", + prevReplicas: 5, + newReplicas: 5, + expectedUp: 0, + expectedDown: 0, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + controller := &FHPAController{ + scaleUpEvents: make(map[string][]timestampedScaleEvent), + scaleDownEvents: make(map[string][]timestampedScaleEvent), + } + + controller.storeScaleEvent(tt.behavior, tt.key, tt.prevReplicas, tt.newReplicas) + + assert.Len(t, controller.scaleUpEvents[tt.key], tt.expectedUp, "Unexpected number of scale up events") + assert.Len(t, controller.scaleDownEvents[tt.key], tt.expectedDown, "Unexpected number of scale down events") + }) + } +} + +// TestStabilizeRecommendationWithBehaviors verifies recommendation stabilization with behaviors +func TestStabilizeRecommendationWithBehaviors(t *testing.T) { + now := time.Now() + upWindow := int32(300) // 5 minutes + downWindow := int32(600) // 10 minutes + + tests := []struct { + name string + args NormalizationArg + initialRecommendations []timestampedRecommendation + expectedReplicas int32 + expectedReason string + expectedMessage string + }{ + { + name: "Scale up stabilized", + args: NormalizationArg{ + Key: "test-hpa-1", + DesiredReplicas: 10, + CurrentReplicas: 5, + ScaleUpBehavior: &autoscalingv2.HPAScalingRules{ + StabilizationWindowSeconds: &upWindow, + }, + ScaleDownBehavior: &autoscalingv2.HPAScalingRules{ + StabilizationWindowSeconds: &downWindow, + }, + }, + initialRecommendations: []timestampedRecommendation{ + {recommendation: 8, timestamp: now.Add(-2 * time.Minute)}, + {recommendation: 7, timestamp: now.Add(-4 * time.Minute)}, + }, + expectedReplicas: 7, + expectedReason: "ScaleUpStabilized", + expectedMessage: "recent recommendations were lower than current one, applying the lowest recent recommendation", + }, + { + name: "Scale down stabilized", + args: NormalizationArg{ + Key: "test-hpa-2", + DesiredReplicas: 3, + CurrentReplicas: 8, + ScaleUpBehavior: &autoscalingv2.HPAScalingRules{ + StabilizationWindowSeconds: &upWindow, + }, + ScaleDownBehavior: &autoscalingv2.HPAScalingRules{ + StabilizationWindowSeconds: &downWindow, + }, + }, + initialRecommendations: []timestampedRecommendation{ + {recommendation: 5, timestamp: now.Add(-5 * time.Minute)}, + {recommendation: 4, timestamp: now.Add(-8 * time.Minute)}, + }, + expectedReplicas: 5, + expectedReason: "ScaleDownStabilized", + expectedMessage: "recent recommendations were higher than current one, applying the highest recent recommendation", + }, + { + name: "No change needed", + args: NormalizationArg{ + Key: "test-hpa-3", + DesiredReplicas: 5, + CurrentReplicas: 5, + ScaleUpBehavior: &autoscalingv2.HPAScalingRules{ + StabilizationWindowSeconds: &upWindow, + }, + ScaleDownBehavior: &autoscalingv2.HPAScalingRules{ + StabilizationWindowSeconds: &downWindow, + }, + }, + initialRecommendations: []timestampedRecommendation{ + {recommendation: 5, timestamp: now.Add(-1 * time.Minute)}, + }, + expectedReplicas: 5, + expectedReason: "ScaleUpStabilized", + expectedMessage: "recent recommendations were lower than current one, applying the lowest recent recommendation", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + controller := &FHPAController{ + recommendations: make(map[string][]timestampedRecommendation), + } + + controller.recommendations[tt.args.Key] = tt.initialRecommendations + + gotReplicas, gotReason, gotMessage := controller.stabilizeRecommendationWithBehaviors(tt.args) + + assert.Equal(t, tt.expectedReplicas, gotReplicas, "Unexpected stabilized replicas") + assert.Equal(t, tt.expectedReason, gotReason, "Unexpected stabilization reason") + assert.Equal(t, tt.expectedMessage, gotMessage, "Unexpected stabilization message") + + storedRecommendations := controller.recommendations[tt.args.Key] + assert.True(t, containsRecommendation(storedRecommendations, tt.args.DesiredReplicas), "New recommendation not found in stored recommendations") + assert.Len(t, storedRecommendations, len(tt.initialRecommendations)+1, "Unexpected number of stored recommendations") + }) + } +} + +// TestConvertDesiredReplicasWithBehaviorRate verifies replica conversion with behavior rates +func TestConvertDesiredReplicasWithBehaviorRate(t *testing.T) { + tests := []struct { + name string + args NormalizationArg + scaleUpEvents []timestampedScaleEvent + scaleDownEvents []timestampedScaleEvent + expectedReplicas int32 + expectedReason string + expectedMessage string + }{ + { + name: "Scale up within limits", + args: NormalizationArg{ + Key: "test-hpa", + ScaleUpBehavior: &autoscalingv2.HPAScalingRules{ + Policies: []autoscalingv2.HPAScalingPolicy{ + {Type: autoscalingv2.PercentScalingPolicy, Value: 200, PeriodSeconds: 60}, + }, + }, + MinReplicas: 1, + MaxReplicas: 10, + CurrentReplicas: 5, + DesiredReplicas: 8, + }, + expectedReplicas: 8, + expectedReason: "DesiredWithinRange", + expectedMessage: "the desired count is within the acceptable range", + }, + { + name: "Scale down within limits", + args: NormalizationArg{ + Key: "test-hpa", + ScaleDownBehavior: &autoscalingv2.HPAScalingRules{ + Policies: []autoscalingv2.HPAScalingPolicy{ + {Type: autoscalingv2.PercentScalingPolicy, Value: 100, PeriodSeconds: 60}, + }, + }, + MinReplicas: 1, + MaxReplicas: 10, + CurrentReplicas: 5, + DesiredReplicas: 3, + }, + expectedReplicas: 3, + expectedReason: "DesiredWithinRange", + expectedMessage: "the desired count is within the acceptable range", + }, + { + name: "Scale up beyond MaxReplicas", + args: NormalizationArg{ + Key: "test-hpa", + ScaleUpBehavior: &autoscalingv2.HPAScalingRules{ + Policies: []autoscalingv2.HPAScalingPolicy{ + {Type: autoscalingv2.PercentScalingPolicy, Value: 200, PeriodSeconds: 60}, + }, + }, + MinReplicas: 1, + MaxReplicas: 10, + CurrentReplicas: 8, + DesiredReplicas: 12, + }, + expectedReplicas: 10, + expectedReason: "TooManyReplicas", + expectedMessage: "the desired replica count is more than the maximum replica count", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + controller := &FHPAController{ + scaleUpEvents: make(map[string][]timestampedScaleEvent), + scaleDownEvents: make(map[string][]timestampedScaleEvent), + } + controller.scaleUpEvents[tt.args.Key] = tt.scaleUpEvents + controller.scaleDownEvents[tt.args.Key] = tt.scaleDownEvents + + replicas, reason, message := controller.convertDesiredReplicasWithBehaviorRate(tt.args) + + assert.Equal(t, tt.expectedReplicas, replicas, "Unexpected number of replicas") + assert.Equal(t, tt.expectedReason, reason, "Unexpected reason") + assert.Equal(t, tt.expectedMessage, message, "Unexpected message") + }) + } +} + +// TestConvertDesiredReplicasWithRules checks replica conversion using basic rules +func TestConvertDesiredReplicasWithRules(t *testing.T) { + tests := []struct { + name string + currentReplicas int32 + desiredReplicas int32 + hpaMinReplicas int32 + hpaMaxReplicas int32 + expectedReplicas int32 + expectedCondition string + expectedReason string + }{ + { + name: "Desired within range", + currentReplicas: 5, + desiredReplicas: 7, + hpaMinReplicas: 3, + hpaMaxReplicas: 10, + expectedReplicas: 7, + expectedCondition: "DesiredWithinRange", + expectedReason: "the desired count is within the acceptable range", + }, + { + name: "Desired below min", + currentReplicas: 5, + desiredReplicas: 2, + hpaMinReplicas: 3, + hpaMaxReplicas: 10, + expectedReplicas: 3, + expectedCondition: "TooFewReplicas", + expectedReason: "the desired replica count is less than the minimum replica count", + }, + { + name: "Desired above max", + currentReplicas: 5, + desiredReplicas: 15, + hpaMinReplicas: 3, + hpaMaxReplicas: 10, + expectedReplicas: 10, + expectedCondition: "TooManyReplicas", + expectedReason: "the desired replica count is more than the maximum replica count", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + replicas, condition, reason := convertDesiredReplicasWithRules(tt.currentReplicas, tt.desiredReplicas, tt.hpaMinReplicas, tt.hpaMaxReplicas) + assert.Equal(t, tt.expectedReplicas, replicas, "Unexpected number of replicas") + assert.Equal(t, tt.expectedCondition, condition, "Unexpected condition") + assert.Equal(t, tt.expectedReason, reason, "Unexpected reason") + }) + } +} + +// TestCalculateScaleUpLimitWithScalingRules verifies scale-up limit calculation with rules +func TestCalculateScaleUpLimit(t *testing.T) { + tests := []struct { + name string + currentReplicas int32 + expectedLimit int32 + }{ + { + name: "Small scale up", + currentReplicas: 1, + expectedLimit: 4, + }, + { + name: "Medium scale up", + currentReplicas: 10, + expectedLimit: 20, + }, + { + name: "Large scale up", + currentReplicas: 100, + expectedLimit: 200, + }, + { + name: "Zero replicas", + currentReplicas: 0, + expectedLimit: 4, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + limit := calculateScaleUpLimit(tt.currentReplicas) + assert.Equal(t, tt.expectedLimit, limit, "Unexpected scale up limit") + }) + } +} + +// TestMarkScaleEventsOutdated ensures proper marking of outdated scale events +func TestMarkScaleEventsOutdated(t *testing.T) { + now := time.Now() + tests := []struct { + name string + scaleEvents []timestampedScaleEvent + longestPolicyPeriod int32 + expectedOutdated []bool + }{ + { + name: "All events within period", + scaleEvents: []timestampedScaleEvent{ + {timestamp: now.Add(-30 * time.Second)}, + {timestamp: now.Add(-60 * time.Second)}, + }, + longestPolicyPeriod: 120, + expectedOutdated: []bool{false, false}, + }, + { + name: "Some events outdated", + scaleEvents: []timestampedScaleEvent{ + {timestamp: now.Add(-30 * time.Second)}, + {timestamp: now.Add(-90 * time.Second)}, + {timestamp: now.Add(-150 * time.Second)}, + }, + longestPolicyPeriod: 120, + expectedOutdated: []bool{false, false, true}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + markScaleEventsOutdated(tt.scaleEvents, tt.longestPolicyPeriod) + for i, event := range tt.scaleEvents { + assert.Equal(t, tt.expectedOutdated[i], event.outdated, "Unexpected outdated status for event %d", i) + } + }) + } +} + +// TestGetLongestPolicyPeriod checks retrieval of the longest policy period +func TestGetLongestPolicyPeriod(t *testing.T) { + tests := []struct { + name string + scalingRules *autoscalingv2.HPAScalingRules + expectedPeriod int32 + }{ + { + name: "Single policy", + scalingRules: &autoscalingv2.HPAScalingRules{ + Policies: []autoscalingv2.HPAScalingPolicy{ + {PeriodSeconds: 60}, + }, + }, + expectedPeriod: 60, + }, + { + name: "Multiple policies", + scalingRules: &autoscalingv2.HPAScalingRules{ + Policies: []autoscalingv2.HPAScalingPolicy{ + {PeriodSeconds: 60}, + {PeriodSeconds: 120}, + {PeriodSeconds: 30}, + }, + }, + expectedPeriod: 120, + }, + { + name: "No policies", + scalingRules: &autoscalingv2.HPAScalingRules{ + Policies: []autoscalingv2.HPAScalingPolicy{}, + }, + expectedPeriod: 0, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + period := getLongestPolicyPeriod(tt.scalingRules) + assert.Equal(t, tt.expectedPeriod, period, "Unexpected longest policy period") + }) + } +} + +// TestCalculateScaleUpLimitWithScalingRules verifies scale-up limit calculation with rules +func TestCalculateScaleUpLimitWithScalingRules(t *testing.T) { + baseTime := time.Now() + disabledPolicy := autoscalingv2.DisabledPolicySelect + minChangePolicy := autoscalingv2.MinChangePolicySelect + + tests := []struct { + name string + currentReplicas int32 + scaleUpEvents []timestampedScaleEvent + scaleDownEvents []timestampedScaleEvent + scalingRules *autoscalingv2.HPAScalingRules + expectedLimit int32 + }{ + { + name: "No previous events", + currentReplicas: 5, + scaleUpEvents: []timestampedScaleEvent{}, + scaleDownEvents: []timestampedScaleEvent{}, + scalingRules: &autoscalingv2.HPAScalingRules{ + Policies: []autoscalingv2.HPAScalingPolicy{ + {Type: autoscalingv2.PodsScalingPolicy, Value: 4, PeriodSeconds: 60}, + }, + }, + expectedLimit: 9, + }, + { + name: "With previous scale up event", + currentReplicas: 5, + scaleUpEvents: []timestampedScaleEvent{ + {replicaChange: 2, timestamp: baseTime.Add(-30 * time.Second)}, + }, + scaleDownEvents: []timestampedScaleEvent{}, + scalingRules: &autoscalingv2.HPAScalingRules{ + Policies: []autoscalingv2.HPAScalingPolicy{ + {Type: autoscalingv2.PodsScalingPolicy, Value: 4, PeriodSeconds: 60}, + }, + }, + expectedLimit: 7, + }, + { + name: "Disabled policy", + currentReplicas: 5, + scaleUpEvents: []timestampedScaleEvent{}, + scaleDownEvents: []timestampedScaleEvent{}, + scalingRules: &autoscalingv2.HPAScalingRules{ + SelectPolicy: &disabledPolicy, + Policies: []autoscalingv2.HPAScalingPolicy{ + {Type: autoscalingv2.PodsScalingPolicy, Value: 4, PeriodSeconds: 60}, + }, + }, + expectedLimit: 5, + }, + { + name: "MinChange policy", + currentReplicas: 5, + scaleUpEvents: []timestampedScaleEvent{}, + scaleDownEvents: []timestampedScaleEvent{}, + scalingRules: &autoscalingv2.HPAScalingRules{ + SelectPolicy: &minChangePolicy, + Policies: []autoscalingv2.HPAScalingPolicy{ + {Type: autoscalingv2.PodsScalingPolicy, Value: 4, PeriodSeconds: 60}, + {Type: autoscalingv2.PodsScalingPolicy, Value: 2, PeriodSeconds: 60}, + }, + }, + expectedLimit: 7, + }, + { + name: "Percent scaling policy", + currentReplicas: 10, + scaleUpEvents: []timestampedScaleEvent{}, + scaleDownEvents: []timestampedScaleEvent{}, + scalingRules: &autoscalingv2.HPAScalingRules{ + Policies: []autoscalingv2.HPAScalingPolicy{ + {Type: autoscalingv2.PercentScalingPolicy, Value: 50, PeriodSeconds: 60}, + }, + }, + expectedLimit: 15, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + limit := calculateScaleUpLimitWithScalingRules(tt.currentReplicas, tt.scaleUpEvents, tt.scaleDownEvents, tt.scalingRules) + assert.Equal(t, tt.expectedLimit, limit, "Unexpected scale up limit") + }) + } +} + +// TestCalculateScaleDownLimitWithBehaviors checks scale-down limit calculation with behaviors +func TestCalculateScaleDownLimitWithBehaviors(t *testing.T) { + baseTime := time.Now() + disabledPolicy := autoscalingv2.DisabledPolicySelect + minChangePolicy := autoscalingv2.MinChangePolicySelect + + tests := []struct { + name string + currentReplicas int32 + scaleUpEvents []timestampedScaleEvent + scaleDownEvents []timestampedScaleEvent + scalingRules *autoscalingv2.HPAScalingRules + expectedLimit int32 + }{ + { + name: "No previous events", + currentReplicas: 10, + scaleUpEvents: []timestampedScaleEvent{}, + scaleDownEvents: []timestampedScaleEvent{}, + scalingRules: &autoscalingv2.HPAScalingRules{ + Policies: []autoscalingv2.HPAScalingPolicy{ + {Type: autoscalingv2.PercentScalingPolicy, Value: 20, PeriodSeconds: 60}, + }, + }, + expectedLimit: 8, + }, + { + name: "With previous scale down event", + currentReplicas: 10, + scaleUpEvents: []timestampedScaleEvent{}, + scaleDownEvents: []timestampedScaleEvent{ + {replicaChange: 1, timestamp: baseTime.Add(-30 * time.Second)}, + }, + scalingRules: &autoscalingv2.HPAScalingRules{ + Policies: []autoscalingv2.HPAScalingPolicy{ + {Type: autoscalingv2.PercentScalingPolicy, Value: 20, PeriodSeconds: 60}, + }, + }, + expectedLimit: 8, + }, + { + name: "Multiple policies", + currentReplicas: 100, + scaleUpEvents: []timestampedScaleEvent{}, + scaleDownEvents: []timestampedScaleEvent{}, + scalingRules: &autoscalingv2.HPAScalingRules{ + Policies: []autoscalingv2.HPAScalingPolicy{ + {Type: autoscalingv2.PercentScalingPolicy, Value: 10, PeriodSeconds: 60}, + {Type: autoscalingv2.PodsScalingPolicy, Value: 5, PeriodSeconds: 60}, + }, + }, + expectedLimit: 90, + }, + { + name: "Disabled policy", + currentReplicas: 10, + scaleUpEvents: []timestampedScaleEvent{}, + scaleDownEvents: []timestampedScaleEvent{}, + scalingRules: &autoscalingv2.HPAScalingRules{ + SelectPolicy: &disabledPolicy, + Policies: []autoscalingv2.HPAScalingPolicy{ + {Type: autoscalingv2.PercentScalingPolicy, Value: 20, PeriodSeconds: 60}, + }, + }, + expectedLimit: 10, + }, + { + name: "MinChange policy", + currentReplicas: 100, + scaleUpEvents: []timestampedScaleEvent{}, + scaleDownEvents: []timestampedScaleEvent{}, + scalingRules: &autoscalingv2.HPAScalingRules{ + SelectPolicy: &minChangePolicy, + Policies: []autoscalingv2.HPAScalingPolicy{ + {Type: autoscalingv2.PercentScalingPolicy, Value: 10, PeriodSeconds: 60}, + {Type: autoscalingv2.PodsScalingPolicy, Value: 15, PeriodSeconds: 60}, + }, + }, + expectedLimit: 90, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + limit := calculateScaleDownLimitWithBehaviors(tt.currentReplicas, tt.scaleUpEvents, tt.scaleDownEvents, tt.scalingRules) + assert.Equal(t, tt.expectedLimit, limit, "Unexpected scale down limit") + }) + } +} + +// TestSetCurrentReplicasInStatus verifies setting of current replicas in HPA status +func TestSetCurrentReplicasInStatus(t *testing.T) { + controller := &FHPAController{} + hpa := &autoscalingv1alpha1.FederatedHPA{ + Status: autoscalingv2.HorizontalPodAutoscalerStatus{ + DesiredReplicas: 5, + CurrentMetrics: []autoscalingv2.MetricStatus{ + {Type: autoscalingv2.ResourceMetricSourceType}, + }, + }, + } + + controller.setCurrentReplicasInStatus(hpa, 3) + + assert.Equal(t, int32(3), hpa.Status.CurrentReplicas) + assert.Equal(t, int32(5), hpa.Status.DesiredReplicas) + assert.Len(t, hpa.Status.CurrentMetrics, 1) + assert.Nil(t, hpa.Status.LastScaleTime) +} + +// TestSetStatus ensures correct status setting for FederatedHPA +func TestSetStatus(t *testing.T) { + tests := []struct { + name string + currentReplicas int32 + desiredReplicas int32 + metricStatuses []autoscalingv2.MetricStatus + rescale bool + initialLastScale *metav1.Time + }{ + { + name: "Update without rescale", + currentReplicas: 3, + desiredReplicas: 5, + metricStatuses: []autoscalingv2.MetricStatus{ + {Type: autoscalingv2.ResourceMetricSourceType}, + }, + rescale: false, + initialLastScale: nil, + }, + { + name: "Update with rescale", + currentReplicas: 3, + desiredReplicas: 5, + metricStatuses: []autoscalingv2.MetricStatus{ + {Type: autoscalingv2.ResourceMetricSourceType}, + }, + rescale: true, + initialLastScale: &metav1.Time{Time: time.Now().Add(-1 * time.Hour)}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + controller := &FHPAController{} + hpa := &autoscalingv1alpha1.FederatedHPA{ + Status: autoscalingv2.HorizontalPodAutoscalerStatus{ + LastScaleTime: tt.initialLastScale, + Conditions: []autoscalingv2.HorizontalPodAutoscalerCondition{ + {Type: autoscalingv2.ScalingActive}, + }, + }, + } + + controller.setStatus(hpa, tt.currentReplicas, tt.desiredReplicas, tt.metricStatuses, tt.rescale) + + assert.Equal(t, tt.currentReplicas, hpa.Status.CurrentReplicas) + assert.Equal(t, tt.desiredReplicas, hpa.Status.DesiredReplicas) + assert.Equal(t, tt.metricStatuses, hpa.Status.CurrentMetrics) + assert.Len(t, hpa.Status.Conditions, 1) + + if tt.rescale { + assert.NotNil(t, hpa.Status.LastScaleTime) + assert.True(t, hpa.Status.LastScaleTime.After(time.Now().Add(-1*time.Second))) + } else { + assert.Equal(t, tt.initialLastScale, hpa.Status.LastScaleTime) + } + }) + } +} + +// TestSetCondition verifies proper condition setting in FederatedHPA +func TestSetCondition(t *testing.T) { + tests := []struct { + name string + initialHPA *autoscalingv1alpha1.FederatedHPA + conditionType autoscalingv2.HorizontalPodAutoscalerConditionType + status corev1.ConditionStatus + reason string + message string + args []interface{} + expectedLength int + checkIndex int + }{ + { + name: "Add new condition", + initialHPA: &autoscalingv1alpha1.FederatedHPA{}, + conditionType: autoscalingv2.ScalingActive, + status: corev1.ConditionTrue, + reason: "TestReason", + message: "Test message", + expectedLength: 1, + checkIndex: 0, + }, + { + name: "Update existing condition", + initialHPA: &autoscalingv1alpha1.FederatedHPA{ + Status: autoscalingv2.HorizontalPodAutoscalerStatus{ + Conditions: []autoscalingv2.HorizontalPodAutoscalerCondition{ + { + Type: autoscalingv2.ScalingActive, + Status: corev1.ConditionFalse, + }, + }, + }, + }, + conditionType: autoscalingv2.ScalingActive, + status: corev1.ConditionTrue, + reason: "UpdatedReason", + message: "Updated message", + expectedLength: 1, + checkIndex: 0, + }, + { + name: "Add condition with formatted message", + initialHPA: &autoscalingv1alpha1.FederatedHPA{}, + conditionType: autoscalingv2.AbleToScale, + status: corev1.ConditionTrue, + reason: "FormattedReason", + message: "Formatted message: %d", + args: []interface{}{42}, + expectedLength: 1, + checkIndex: 0, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + setCondition(tt.initialHPA, tt.conditionType, tt.status, tt.reason, tt.message, tt.args...) + + assert.Len(t, tt.initialHPA.Status.Conditions, tt.expectedLength, "Unexpected number of conditions") + + condition := tt.initialHPA.Status.Conditions[tt.checkIndex] + assert.Equal(t, tt.conditionType, condition.Type, "Unexpected condition type") + assert.Equal(t, tt.status, condition.Status, "Unexpected condition status") + assert.Equal(t, tt.reason, condition.Reason, "Unexpected condition reason") + + expectedMessage := tt.message + if len(tt.args) > 0 { + expectedMessage = fmt.Sprintf(tt.message, tt.args...) + } + assert.Equal(t, expectedMessage, condition.Message, "Unexpected condition message") + assert.False(t, condition.LastTransitionTime.IsZero(), "LastTransitionTime should be set") + }) + } +} + +// TestSetConditionInList ensures proper condition setting in a list of conditions +func TestSetConditionInList(t *testing.T) { + tests := []struct { + name string + inputList []autoscalingv2.HorizontalPodAutoscalerCondition + conditionType autoscalingv2.HorizontalPodAutoscalerConditionType + status corev1.ConditionStatus + reason string + message string + args []interface{} + expectedLength int + checkIndex int + }{ + { + name: "Add new condition", + inputList: []autoscalingv2.HorizontalPodAutoscalerCondition{}, + conditionType: autoscalingv2.ScalingActive, + status: corev1.ConditionTrue, + reason: "TestReason", + message: "Test message", + expectedLength: 1, + checkIndex: 0, + }, + { + name: "Update existing condition", + inputList: []autoscalingv2.HorizontalPodAutoscalerCondition{ + { + Type: autoscalingv2.ScalingActive, + Status: corev1.ConditionFalse, + }, + }, + conditionType: autoscalingv2.ScalingActive, + status: corev1.ConditionTrue, + reason: "UpdatedReason", + message: "Updated message", + expectedLength: 1, + checkIndex: 0, + }, + { + name: "Add condition with formatted message", + inputList: []autoscalingv2.HorizontalPodAutoscalerCondition{ + { + Type: autoscalingv2.ScalingActive, + Status: corev1.ConditionTrue, + }, + }, + conditionType: autoscalingv2.AbleToScale, + status: corev1.ConditionTrue, + reason: "FormattedReason", + message: "Formatted message: %d", + args: []interface{}{42}, + expectedLength: 2, + checkIndex: 1, + }, + { + name: "Update condition without changing status", + inputList: []autoscalingv2.HorizontalPodAutoscalerCondition{ + { + Type: autoscalingv2.ScalingActive, + Status: corev1.ConditionTrue, + LastTransitionTime: metav1.Now(), + }, + }, + conditionType: autoscalingv2.ScalingActive, + status: corev1.ConditionTrue, + reason: "NewReason", + message: "New message", + expectedLength: 1, + checkIndex: 0, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := setConditionInList(tt.inputList, tt.conditionType, tt.status, tt.reason, tt.message, tt.args...) + + assert.Len(t, result, tt.expectedLength, "Unexpected length of result list") + + condition := result[tt.checkIndex] + assert.Equal(t, tt.conditionType, condition.Type, "Unexpected condition type") + assert.Equal(t, tt.status, condition.Status, "Unexpected condition status") + assert.Equal(t, tt.reason, condition.Reason, "Unexpected condition reason") + + expectedMessage := tt.message + if len(tt.args) > 0 { + expectedMessage = fmt.Sprintf(tt.message, tt.args...) + } + assert.Equal(t, expectedMessage, condition.Message, "Unexpected condition message") + + if tt.name == "Update existing condition" { + assert.False(t, condition.LastTransitionTime.IsZero(), "LastTransitionTime should be set") + } + + if tt.name == "Update condition without changing status" { + assert.Equal(t, tt.inputList[0].LastTransitionTime, condition.LastTransitionTime, "LastTransitionTime should not change") + } + }) + } +} + +// Helper functions +func getCondition(conditions []autoscalingv2.HorizontalPodAutoscalerCondition, conditionType autoscalingv2.HorizontalPodAutoscalerConditionType) *autoscalingv2.HorizontalPodAutoscalerCondition { + for _, condition := range conditions { + if condition.Type == conditionType { + return &condition + } + } + return nil +} + +func createTestFederatedHPA(name, namespace string) *autoscalingv1alpha1.FederatedHPA { + return &autoscalingv1alpha1.FederatedHPA{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + } +} + +func createTestHPA(minReplicas, maxReplicas int32, behavior *autoscalingv2.HorizontalPodAutoscalerBehavior) *autoscalingv1alpha1.FederatedHPA { + return &autoscalingv1alpha1.FederatedHPA{ + Spec: autoscalingv1alpha1.FederatedHPASpec{ + MinReplicas: &minReplicas, + MaxReplicas: maxReplicas, + Behavior: behavior, + }, + } +} + +func createTestScalingRules(stabilizationWindowSeconds *int32, selectPolicy *autoscalingv2.ScalingPolicySelect, policies []autoscalingv2.HPAScalingPolicy) *autoscalingv2.HPAScalingRules { + return &autoscalingv2.HPAScalingRules{ + StabilizationWindowSeconds: stabilizationWindowSeconds, + SelectPolicy: selectPolicy, + Policies: policies, + } +} + +func countOldRecommendations(recommendations []timestampedRecommendation, window time.Duration) int { + count := 0 + now := time.Now() + for _, rec := range recommendations { + if rec.timestamp.Before(now.Add(-window)) { + count++ + } + } + return count +} + +func containsRecommendation(slice []timestampedRecommendation, recommendation int32) bool { + for _, item := range slice { + if item.recommendation == recommendation { + return true + } + } + return false +} diff --git a/pkg/controllers/federatedhpa/metrics/client_test.go b/pkg/controllers/federatedhpa/metrics/client_test.go new file mode 100644 index 000000000000..e3824ad3a5c5 --- /dev/null +++ b/pkg/controllers/federatedhpa/metrics/client_test.go @@ -0,0 +1,452 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "context" + "errors" + "testing" + "time" + + autoscalingv2 "k8s.io/api/autoscaling/v2" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" + customapi "k8s.io/metrics/pkg/apis/custom_metrics/v1beta2" + externalapi "k8s.io/metrics/pkg/apis/external_metrics/v1beta1" + "k8s.io/metrics/pkg/apis/metrics/v1beta1" + resourceclient "k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1" + customclient "k8s.io/metrics/pkg/client/custom_metrics" + externalclient "k8s.io/metrics/pkg/client/external_metrics" +) + +// Mock clients and interfaces +type mockResourceClient struct { + resourceclient.PodMetricsesGetter +} + +type mockCustomClient struct { + customclient.CustomMetricsClient +} + +type mockExternalClient struct { + externalclient.ExternalMetricsClient +} + +type mockExternalMetricsClient struct { + externalclient.ExternalMetricsClient + metrics *externalapi.ExternalMetricValueList + err error +} + +type mockExternalMetricsInterface struct { + externalclient.MetricsInterface + metrics *externalapi.ExternalMetricValueList + err error +} + +type mockCustomMetricsClient struct { + customclient.CustomMetricsClient + metrics *customapi.MetricValueList + err error +} + +type mockCustomMetricsInterface struct { + customclient.MetricsInterface + metrics *customapi.MetricValueList + err error +} + +type mockPodMetricsGetter struct { + metrics *v1beta1.PodMetricsList + err error +} + +type mockPodMetricsInterface struct { + resourceclient.PodMetricsInterface + metrics *v1beta1.PodMetricsList + err error +} + +func (m *mockExternalMetricsClient) NamespacedMetrics(_ string) externalclient.MetricsInterface { + return &mockExternalMetricsInterface{metrics: m.metrics, err: m.err} +} + +func (m *mockExternalMetricsInterface) List(_ string, _ labels.Selector) (*externalapi.ExternalMetricValueList, error) { + return m.metrics, m.err +} + +func (m *mockCustomMetricsClient) NamespacedMetrics(_ string) customclient.MetricsInterface { + return &mockCustomMetricsInterface{metrics: m.metrics, err: m.err} +} + +func (m *mockCustomMetricsInterface) GetForObjects(_ schema.GroupKind, _ labels.Selector, _ string, _ labels.Selector) (*customapi.MetricValueList, error) { + return m.metrics, m.err +} + +func (m *mockCustomMetricsInterface) GetForObject(_ schema.GroupKind, _ string, _ string, _ labels.Selector) (*customapi.MetricValue, error) { + if len(m.metrics.Items) > 0 { + return &m.metrics.Items[0], m.err + } + return nil, m.err +} + +func (m *mockPodMetricsGetter) PodMetricses(_ string) resourceclient.PodMetricsInterface { + return &mockPodMetricsInterface{metrics: m.metrics, err: m.err} +} + +func (m *mockPodMetricsInterface) List(_ context.Context, _ metav1.ListOptions) (*v1beta1.PodMetricsList, error) { + return m.metrics, m.err +} + +// Test functions + +// NewRESTMetricsClient creates a new REST metrics client with the given clients. +func TestNewRESTMetricsClient(t *testing.T) { + resourceClient := &mockResourceClient{} + customClient := &mockCustomClient{} + externalClient := &mockExternalClient{} + + client := NewRESTMetricsClient(resourceClient, customClient, externalClient) + + if client == nil { + t.Error("Expected non-nil client, got nil") + } +} + +// TestGetResourceMetric tests the GetResourceMetric function with various scenarios. +func TestGetResourceMetric(t *testing.T) { + tests := []struct { + name string + mockMetrics *v1beta1.PodMetricsList + mockError error + container string + expectedError string + expectedResult PodMetricsInfo + }{ + { + name: "Successful retrieval", + mockMetrics: &v1beta1.PodMetricsList{ + Items: []v1beta1.PodMetrics{ + createPodMetrics("pod1", "container1", 100), + }, + }, + expectedResult: PodMetricsInfo{ + "pod1": {Value: 100}, + }, + }, + { + name: "API error", + mockError: errors.New("API error"), + expectedError: "unable to fetch metrics from resource metrics API: API error", + }, + { + name: "Empty metrics", + mockMetrics: &v1beta1.PodMetricsList{}, + expectedError: "no metrics returned from resource metrics API", + }, + { + name: "Container-specific metrics", + mockMetrics: &v1beta1.PodMetricsList{ + Items: []v1beta1.PodMetrics{ + { + ObjectMeta: metav1.ObjectMeta{Name: "pod1", Namespace: "default"}, + Containers: []v1beta1.ContainerMetrics{ + createPodMetrics("pod1", "container1", 100).Containers[0], + createPodMetrics("pod1", "container2", 200).Containers[0], + }, + }, + }, + }, + container: "container2", + expectedResult: PodMetricsInfo{ + "pod1": {Value: 200}, + }, + }, + { + name: "Container not found", + mockMetrics: &v1beta1.PodMetricsList{ + Items: []v1beta1.PodMetrics{ + createPodMetrics("pod1", "container1", 100), + }, + }, + container: "nonexistent", + expectedError: "failed to get container metrics: container nonexistent not present in metrics for pod default/pod1", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + client := setupMockResourceClient(tt.mockMetrics, tt.mockError) + result, _, err := client.GetResourceMetric(context.Background(), corev1.ResourceCPU, "default", labels.Everything(), tt.container) + + assertError(t, err, tt.expectedError) + assertPodMetricsInfoEqual(t, result, tt.expectedResult) + }) + } +} + +// TestGetExternalMetric tests the retrieval of external metrics. +func TestGetExternalMetric(t *testing.T) { + tests := []struct { + name string + mockMetrics *externalapi.ExternalMetricValueList + mockError error + expectedValues []int64 + expectedError string + }{ + { + name: "Successful retrieval", + mockMetrics: &externalapi.ExternalMetricValueList{ + Items: []externalapi.ExternalMetricValue{ + {Value: *resource.NewQuantity(100, resource.DecimalSI)}, + {Value: *resource.NewQuantity(200, resource.DecimalSI)}, + }, + }, + expectedValues: []int64{100000, 200000}, + }, + { + name: "API error", + mockError: errors.New("API error"), + expectedError: "unable to fetch metrics from external metrics API: API error", + }, + { + name: "Empty metrics", + mockMetrics: &externalapi.ExternalMetricValueList{}, + expectedError: "no metrics returned from external metrics API", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + client := setupMockExternalClient(tt.mockMetrics, tt.mockError) + values, _, err := client.GetExternalMetric("test-metric", "default", labels.Everything()) + + assertError(t, err, tt.expectedError) + assertInt64SliceEqual(t, values, tt.expectedValues) + }) + } +} + +// TestGetRawMetric tests the retrieval of raw custom metrics. +func TestGetRawMetric(t *testing.T) { + tests := []struct { + name string + mockMetrics *customapi.MetricValueList + mockError error + expectedResult PodMetricsInfo + expectedError string + }{ + { + name: "Successful retrieval", + mockMetrics: &customapi.MetricValueList{ + Items: []customapi.MetricValue{ + { + DescribedObject: corev1.ObjectReference{ + Kind: "Pod", + Name: "pod1", + APIVersion: "v1", + }, + Metric: customapi.MetricIdentifier{ + Name: "test-metric", + }, + Timestamp: metav1.Time{Time: time.Now()}, + Value: *resource.NewQuantity(100, resource.DecimalSI), + }, + }, + }, + expectedResult: PodMetricsInfo{ + "pod1": {Value: 100000}, + }, + }, + { + name: "API error", + mockError: errors.New("API error"), + expectedError: "unable to fetch metrics from custom metrics API: API error", + }, + { + name: "Empty metrics", + mockMetrics: &customapi.MetricValueList{}, + expectedError: "no metrics returned from custom metrics API", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + client := setupMockCustomClient(tt.mockMetrics, tt.mockError) + result, _, err := client.GetRawMetric("test-metric", "default", labels.Everything(), labels.Everything()) + + assertError(t, err, tt.expectedError) + assertPodMetricsInfoEqual(t, result, tt.expectedResult) + }) + } +} + +// TestGetObjectMetric tests the retrieval of object-specific custom metrics. +func TestGetObjectMetric(t *testing.T) { + tests := []struct { + name string + mockMetrics *customapi.MetricValueList + mockError error + objectRef *autoscalingv2.CrossVersionObjectReference + expectedValue int64 + expectedError string + }{ + { + name: "Successful retrieval", + mockMetrics: &customapi.MetricValueList{ + Items: []customapi.MetricValue{ + { + DescribedObject: corev1.ObjectReference{ + Kind: "Deployment", + Name: "test-deployment", + APIVersion: "apps/v1", + }, + Metric: customapi.MetricIdentifier{ + Name: "test-metric", + }, + Timestamp: metav1.Time{Time: time.Now()}, + Value: *resource.NewQuantity(100, resource.DecimalSI), + }, + }, + }, + objectRef: &autoscalingv2.CrossVersionObjectReference{ + Kind: "Deployment", + Name: "test-deployment", + APIVersion: "apps/v1", + }, + expectedValue: 100000, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + client := setupMockCustomClient(tt.mockMetrics, tt.mockError) + value, _, err := client.GetObjectMetric("test-metric", "default", tt.objectRef, labels.Everything()) + + assertError(t, err, tt.expectedError) + assertInt64Equal(t, value, tt.expectedValue) + }) + } +} + +// Helper functions + +// createPodMetrics creates a PodMetrics object with specified name, container name, and CPU value +func createPodMetrics(name string, containerName string, cpuValue int64) v1beta1.PodMetrics { + return v1beta1.PodMetrics{ + ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: "default"}, + Timestamp: metav1.Time{Time: time.Now()}, + Window: metav1.Duration{Duration: time.Minute}, + Containers: []v1beta1.ContainerMetrics{ + { + Name: containerName, + Usage: corev1.ResourceList{ + corev1.ResourceCPU: *resource.NewMilliQuantity(cpuValue, resource.DecimalSI), + }, + }, + }, + } +} + +// setupMockResourceClient creates a mock resource metrics client for testing +func setupMockResourceClient(mockMetrics *v1beta1.PodMetricsList, mockError error) *resourceMetricsClient { + mockClient := &mockResourceClient{} + mockClient.PodMetricsesGetter = &mockPodMetricsGetter{ + metrics: mockMetrics, + err: mockError, + } + return &resourceMetricsClient{client: mockClient} +} + +// setupMockExternalClient creates a mock external metrics client for testing +func setupMockExternalClient(mockMetrics *externalapi.ExternalMetricValueList, mockError error) *externalMetricsClient { + mockClient := &mockExternalMetricsClient{ + metrics: mockMetrics, + err: mockError, + } + return &externalMetricsClient{client: mockClient} +} + +// setupMockCustomClient creates a mock custom metrics client for testing +func setupMockCustomClient(mockMetrics *customapi.MetricValueList, mockError error) *customMetricsClient { + mockClient := &mockCustomMetricsClient{ + metrics: mockMetrics, + err: mockError, + } + return &customMetricsClient{client: mockClient} +} + +// assertError checks if the error matches the expected error string +func assertError(t *testing.T, got error, want string) { + if want == "" { + if got != nil { + t.Errorf("Unexpected error: %v", got) + } + } else if got == nil || got.Error() != want { + t.Errorf("Expected error '%s', got '%v'", want, got) + } +} + +// assertPodMetricsInfoEqual compares two PodMetricsInfo objects for equality +func assertPodMetricsInfoEqual(t *testing.T, got, want PodMetricsInfo) { + if !podMetricsInfoEqual(got, want) { + t.Errorf("Expected result %v, got %v", want, got) + } +} + +// assertInt64SliceEqual compares two int64 slices for equality +func assertInt64SliceEqual(t *testing.T, got, want []int64) { + if !int64SliceEqual(got, want) { + t.Errorf("Expected values %v, got %v", want, got) + } +} + +// assertInt64Equal compares two int64 values for equality +func assertInt64Equal(t *testing.T, got, want int64) { + if got != want { + t.Errorf("Expected value %d, got %d", want, got) + } +} + +// int64SliceEqual checks if two int64 slices are equal +func int64SliceEqual(a, b []int64) bool { + if len(a) != len(b) { + return false + } + for i, v := range a { + if v != b[i] { + return false + } + } + return true +} + +// podMetricsInfoEqual checks if two PodMetricsInfo objects are equal +func podMetricsInfoEqual(a, b PodMetricsInfo) bool { + if len(a) != len(b) { + return false + } + for k, v := range a { + if bv, ok := b[k]; !ok || v.Value != bv.Value { + return false + } + } + return true +} diff --git a/pkg/controllers/federatedhpa/replica_calculator_test.go b/pkg/controllers/federatedhpa/replica_calculator_test.go new file mode 100644 index 000000000000..2814939259df --- /dev/null +++ b/pkg/controllers/federatedhpa/replica_calculator_test.go @@ -0,0 +1,1478 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package federatedhpa + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + autoscalingv2 "k8s.io/api/autoscaling/v2" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/sets" + + metricsclient "github.com/karmada-io/karmada/pkg/controllers/federatedhpa/metrics" +) + +// MockQueryClient implements a mock for the metrics client +type MockQueryClient struct { + mock.Mock +} + +func (m *MockQueryClient) GetResourceMetric(ctx context.Context, resource corev1.ResourceName, namespace string, selector labels.Selector, container string) (metricsclient.PodMetricsInfo, time.Time, error) { + args := m.Called(ctx, resource, namespace, selector, container) + return args.Get(0).(metricsclient.PodMetricsInfo), args.Get(1).(time.Time), args.Error(2) +} + +func (m *MockQueryClient) GetRawMetric(metricName string, namespace string, selector labels.Selector, metricSelector labels.Selector) (metricsclient.PodMetricsInfo, time.Time, error) { + args := m.Called(metricName, namespace, selector, metricSelector) + return args.Get(0).(metricsclient.PodMetricsInfo), args.Get(1).(time.Time), args.Error(2) +} + +func (m *MockQueryClient) GetObjectMetric(metricName string, namespace string, objectRef *autoscalingv2.CrossVersionObjectReference, metricSelector labels.Selector) (int64, time.Time, error) { + args := m.Called(metricName, namespace, objectRef, metricSelector) + return args.Get(0).(int64), args.Get(1).(time.Time), args.Error(2) +} + +func (m *MockQueryClient) GetExternalMetric(metricName string, namespace string, selector labels.Selector) ([]int64, time.Time, error) { + args := m.Called(metricName, namespace, selector) + return args.Get(0).([]int64), args.Get(1).(time.Time), args.Error(2) +} + +// TestNewReplicaCalculator verifies the creation of a new ReplicaCalculator +func TestNewReplicaCalculator(t *testing.T) { + const ( + defaultTolerance = 0.1 + defaultCPUInitPeriod = 5 * time.Minute + defaultDelayInitReadinessStatus = 30 * time.Second + ) + + tests := []struct { + name string + tolerance float64 + cpuInitPeriod time.Duration + delayInitReadinessStatus time.Duration + }{ + { + name: "Default values", + tolerance: defaultTolerance, + cpuInitPeriod: defaultCPUInitPeriod, + delayInitReadinessStatus: defaultDelayInitReadinessStatus, + }, + { + name: "Zero values", + tolerance: 0, + cpuInitPeriod: 0, + delayInitReadinessStatus: 0, + }, + { + name: "Custom values", + tolerance: 0.2, + cpuInitPeriod: 10 * time.Minute, + delayInitReadinessStatus: 1 * time.Minute, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mockClient := &MockQueryClient{} + calculator := NewReplicaCalculator(mockClient, + tt.tolerance, + tt.cpuInitPeriod, + tt.delayInitReadinessStatus) + + assert.NotNil(t, calculator, "Calculator should not be nil") + assert.Equal(t, mockClient, calculator.metricsClient, "Metrics client should match") + assert.Equal(t, tt.tolerance, calculator.tolerance, "Tolerance should match") + assert.Equal(t, tt.cpuInitPeriod, calculator.cpuInitializationPeriod, "CPU initialization period should match") + assert.Equal(t, tt.delayInitReadinessStatus, calculator.delayOfInitialReadinessStatus, "Delay of initial readiness status should match") + }) + } +} + +// TestGetResourceReplicas checks the calculation of resource-based replicas +func TestGetResourceReplicas(t *testing.T) { + const ( + defaultNamespace = "default" + defaultContainer = "" + defaultCalibration = 1.0 + defaultTolerance = 0.1 + ) + + mockTime := time.Date(2023, 1, 1, 0, 0, 0, 0, time.UTC) + + testCases := []struct { + name string + currentReplicas int32 + targetUtilization int32 + pods []*corev1.Pod + metrics metricsclient.PodMetricsInfo + expectedReplicas int32 + expectedUtilization int32 + expectedRawUtilization int64 + calibration float64 + tolerance float64 + expectError bool + }{ + { + name: "Scale up", + currentReplicas: 2, + targetUtilization: 50, + pods: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPod("pod2", 100, 200), + }, + metrics: metricsclient.PodMetricsInfo{ + "pod1": {Value: 150}, + "pod2": {Value: 150}, + }, + expectedReplicas: 6, + expectedUtilization: 150, + expectedRawUtilization: 150, + calibration: defaultCalibration, + tolerance: defaultTolerance, + }, + { + name: "Scale down", + currentReplicas: 4, + targetUtilization: 50, + pods: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPod("pod2", 100, 200), + createPod("pod3", 100, 200), + createPod("pod4", 100, 200), + }, + metrics: metricsclient.PodMetricsInfo{ + "pod1": {Value: 50}, + "pod2": {Value: 50}, + "pod3": {Value: 50}, + "pod4": {Value: 50}, + }, + expectedReplicas: 4, + expectedUtilization: 50, + expectedRawUtilization: 50, + calibration: defaultCalibration, + tolerance: defaultTolerance, + }, + { + name: "No change (within tolerance)", + currentReplicas: 2, + targetUtilization: 50, + pods: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPod("pod2", 100, 200), + }, + metrics: metricsclient.PodMetricsInfo{ + "pod1": {Value: 52}, + "pod2": {Value: 48}, + }, + expectedReplicas: 2, + expectedUtilization: 50, + expectedRawUtilization: 50, + calibration: defaultCalibration, + tolerance: defaultTolerance, + }, + { + name: "Scale up with unready pods", + currentReplicas: 3, + targetUtilization: 50, + pods: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPod("pod2", 100, 200), + createUnreadyPod("pod3", 100, 200), + }, + metrics: metricsclient.PodMetricsInfo{ + "pod1": {Value: 150}, + "pod2": {Value: 150}, + }, + expectedReplicas: 6, + expectedUtilization: 150, + expectedRawUtilization: 150, + calibration: defaultCalibration, + tolerance: defaultTolerance, + }, + { + name: "Scale with calibration", + currentReplicas: 2, + targetUtilization: 50, + pods: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPod("pod2", 100, 200), + }, + metrics: metricsclient.PodMetricsInfo{ + "pod1": {Value: 150}, + "pod2": {Value: 150}, + }, + expectedReplicas: 12, + expectedUtilization: 150, + expectedRawUtilization: 150, + calibration: 0.5, + tolerance: defaultTolerance, + }, + { + name: "Error: No pods", + currentReplicas: 2, + targetUtilization: 50, + pods: []*corev1.Pod{}, + metrics: metricsclient.PodMetricsInfo{}, + expectError: true, + }, + { + name: "Error: No metrics for ready pods", + currentReplicas: 2, + targetUtilization: 50, + pods: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPod("pod2", 100, 200), + }, + metrics: metricsclient.PodMetricsInfo{}, + expectError: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + mockClient := &MockQueryClient{} + calculator := NewReplicaCalculator(mockClient, + tc.tolerance, + 5*time.Minute, + 30*time.Second) + + mockClient.On("GetResourceMetric", mock.Anything, corev1.ResourceCPU, defaultNamespace, labels.Everything(), defaultContainer). + Return(tc.metrics, mockTime, nil).Once() + + replicas, utilization, rawUtilization, timestamp, err := calculator.GetResourceReplicas( + context.Background(), tc.currentReplicas, tc.targetUtilization, corev1.ResourceCPU, + defaultNamespace, labels.Everything(), defaultContainer, tc.pods, tc.calibration) + + if tc.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, tc.expectedReplicas, replicas) + assert.Equal(t, tc.expectedUtilization, utilization) + assert.Equal(t, tc.expectedRawUtilization, rawUtilization) + assert.Equal(t, mockTime, timestamp) + } + + mockClient.AssertExpectations(t) + }) + } +} + +// TestGetRawResourceReplicas verifies the calculation of raw resource-based replicas +func TestGetRawResourceReplicas(t *testing.T) { + const ( + defaultNamespace = "default" + defaultContainer = "" + cpuInitializationPeriod = 5 * time.Minute + delayOfInitialReadinessStatus = 30 * time.Second + ) + + mockTime := time.Date(2023, 1, 1, 0, 0, 0, 0, time.UTC) + + testCases := []struct { + name string + currentReplicas int32 + targetUsage int64 + resource corev1.ResourceName + namespace string + selector labels.Selector + container string + podList []*corev1.Pod + metrics metricsclient.PodMetricsInfo + calibration float64 + expectedReplicas int32 + expectedUsage int64 + expectError bool + }{ + { + name: "Scale up based on raw metrics", + currentReplicas: 2, + targetUsage: 100, + resource: corev1.ResourceCPU, + namespace: defaultNamespace, + selector: labels.Everything(), + container: defaultContainer, + podList: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPod("pod2", 100, 200), + }, + metrics: metricsclient.PodMetricsInfo{ + "pod1": {Value: 150}, + "pod2": {Value: 150}, + }, + calibration: 1.0, + expectedReplicas: 3, + expectedUsage: 150, + expectError: false, + }, + { + name: "Scale down based on raw metrics", + currentReplicas: 4, + targetUsage: 100, + resource: corev1.ResourceCPU, + namespace: defaultNamespace, + selector: labels.Everything(), + container: defaultContainer, + podList: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPod("pod2", 100, 200), + createPod("pod3", 100, 200), + createPod("pod4", 100, 200), + }, + metrics: metricsclient.PodMetricsInfo{ + "pod1": {Value: 50}, + "pod2": {Value: 50}, + "pod3": {Value: 50}, + "pod4": {Value: 50}, + }, + calibration: 1.0, + expectedReplicas: 2, + expectedUsage: 50, + expectError: false, + }, + { + name: "No change (at target usage)", + currentReplicas: 2, + targetUsage: 100, + resource: corev1.ResourceCPU, + namespace: defaultNamespace, + selector: labels.Everything(), + container: defaultContainer, + podList: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPod("pod2", 100, 200), + }, + metrics: metricsclient.PodMetricsInfo{ + "pod1": {Value: 100}, + "pod2": {Value: 100}, + }, + calibration: 1.0, + expectedReplicas: 2, + expectedUsage: 100, + expectError: false, + }, + { + name: "Scale with calibration", + currentReplicas: 2, + targetUsage: 100, + resource: corev1.ResourceCPU, + namespace: defaultNamespace, + selector: labels.Everything(), + container: defaultContainer, + podList: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPod("pod2", 100, 200), + }, + metrics: metricsclient.PodMetricsInfo{ + "pod1": {Value: 150}, + "pod2": {Value: 150}, + }, + calibration: 0.8, + expectedReplicas: 4, + expectedUsage: 150, + expectError: false, + }, + { + name: "Error: No pods", + currentReplicas: 2, + targetUsage: 100, + resource: corev1.ResourceCPU, + namespace: defaultNamespace, + selector: labels.Everything(), + container: defaultContainer, + podList: []*corev1.Pod{}, + metrics: metricsclient.PodMetricsInfo{}, + calibration: 1.0, + expectError: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + mockClient := &MockQueryClient{} + calculator := NewReplicaCalculator( + mockClient, + 0.1, + cpuInitializationPeriod, + delayOfInitialReadinessStatus, + ) + + mockClient.On("GetResourceMetric", + mock.Anything, + tc.resource, + tc.namespace, + tc.selector, + tc.container, + ).Return(tc.metrics, mockTime, nil).Once() + + replicas, usage, timestamp, err := calculator.GetRawResourceReplicas( + context.Background(), + tc.currentReplicas, + tc.targetUsage, + tc.resource, + tc.namespace, + tc.selector, + tc.container, + tc.podList, + tc.calibration, + ) + + if tc.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, tc.expectedReplicas, replicas) + assert.Equal(t, tc.expectedUsage, usage) + assert.Equal(t, mockTime, timestamp) + } + + mockClient.AssertExpectations(t) + }) + } +} + +// TestGetMetricReplicas checks the calculation of metric-based replicas +func TestGetMetricReplicas(t *testing.T) { + const ( + defaultNamespace = "default" + cpuInitializationPeriod = 5 * time.Minute + delayOfInitialReadinessStatus = 30 * time.Second + ) + + mockTime := time.Date(2023, 1, 1, 0, 0, 0, 0, time.UTC) + + testCases := []struct { + name string + currentReplicas int32 + targetUsage int64 + metricName string + namespace string + selector labels.Selector + metricSelector labels.Selector + podList []*corev1.Pod + metrics metricsclient.PodMetricsInfo + calibration float64 + expectedReplicas int32 + expectedUsage int64 + expectError bool + }{ + { + name: "Scale up based on custom metrics", + currentReplicas: 2, + targetUsage: 10, + metricName: "custom_metric", + namespace: defaultNamespace, + selector: labels.Everything(), + metricSelector: labels.Everything(), + podList: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPod("pod2", 100, 200), + }, + metrics: metricsclient.PodMetricsInfo{ + "pod1": {Value: 15}, + "pod2": {Value: 15}, + }, + calibration: 1.0, + expectedReplicas: 3, + expectedUsage: 15, + expectError: false, + }, + { + name: "Scale down based on custom metrics", + currentReplicas: 4, + targetUsage: 20, + metricName: "custom_metric", + namespace: defaultNamespace, + selector: labels.Everything(), + metricSelector: labels.Everything(), + podList: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPod("pod2", 100, 200), + createPod("pod3", 100, 200), + createPod("pod4", 100, 200), + }, + metrics: metricsclient.PodMetricsInfo{ + "pod1": {Value: 10}, + "pod2": {Value: 10}, + "pod3": {Value: 10}, + "pod4": {Value: 10}, + }, + calibration: 1.0, + expectedReplicas: 2, + expectedUsage: 10, + expectError: false, + }, + { + name: "No change (at target usage)", + currentReplicas: 2, + targetUsage: 15, + metricName: "custom_metric", + namespace: defaultNamespace, + selector: labels.Everything(), + metricSelector: labels.Everything(), + podList: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPod("pod2", 100, 200), + }, + metrics: metricsclient.PodMetricsInfo{ + "pod1": {Value: 15}, + "pod2": {Value: 15}, + }, + calibration: 1.0, + expectedReplicas: 2, + expectedUsage: 15, + expectError: false, + }, + { + name: "Scale with calibration", + currentReplicas: 2, + targetUsage: 10, + metricName: "custom_metric", + namespace: defaultNamespace, + selector: labels.Everything(), + metricSelector: labels.Everything(), + podList: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPod("pod2", 100, 200), + }, + metrics: metricsclient.PodMetricsInfo{ + "pod1": {Value: 15}, + "pod2": {Value: 15}, + }, + calibration: 0.8, + expectedReplicas: 4, + expectedUsage: 15, + expectError: false, + }, + { + name: "Error: No metrics", + currentReplicas: 2, + targetUsage: 10, + metricName: "custom_metric", + namespace: defaultNamespace, + selector: labels.Everything(), + metricSelector: labels.Everything(), + podList: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPod("pod2", 100, 200), + }, + metrics: metricsclient.PodMetricsInfo{}, + calibration: 1.0, + expectError: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + mockClient := &MockQueryClient{} + calculator := NewReplicaCalculator( + mockClient, + 0.1, + cpuInitializationPeriod, + delayOfInitialReadinessStatus, + ) + + mockClient.On("GetRawMetric", + tc.metricName, + tc.namespace, + tc.selector, + tc.metricSelector, + ).Return(tc.metrics, mockTime, nil).Once() + + replicas, usage, timestamp, err := calculator.GetMetricReplicas( + tc.currentReplicas, + tc.targetUsage, + tc.metricName, + tc.namespace, + tc.selector, + tc.metricSelector, + tc.podList, + tc.calibration, + ) + + if tc.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, tc.expectedReplicas, replicas) + assert.Equal(t, tc.expectedUsage, usage) + assert.Equal(t, mockTime, timestamp) + } + + mockClient.AssertExpectations(t) + }) + } +} + +// TestCalcPlainMetricReplicas verifies the calculation of plain metric-based replicas +func TestCalcPlainMetricReplicas(t *testing.T) { + const ( + cpuInitializationPeriod = 5 * time.Minute + delayOfInitialReadinessStatus = 30 * time.Second + defaultTolerance = 0.1 + ) + + testCases := []struct { + name string + metrics metricsclient.PodMetricsInfo + currentReplicas int32 + targetUsage int64 + resource corev1.ResourceName + podList []*corev1.Pod + calibration float64 + expectedReplicas int32 + expectedUsage int64 + expectError bool + }{ + { + name: "Scale up", + metrics: metricsclient.PodMetricsInfo{ + "pod1": {Value: 15}, + "pod2": {Value: 15}, + }, + currentReplicas: 2, + targetUsage: 10, + resource: corev1.ResourceCPU, + podList: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPod("pod2", 100, 200), + }, + calibration: 1.0, + expectedReplicas: 3, + expectedUsage: 15, + expectError: false, + }, + { + name: "Scale down", + metrics: metricsclient.PodMetricsInfo{ + "pod1": {Value: 5}, + "pod2": {Value: 5}, + "pod3": {Value: 5}, + "pod4": {Value: 5}, + }, + currentReplicas: 4, + targetUsage: 10, + resource: corev1.ResourceCPU, + podList: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPod("pod2", 100, 200), + createPod("pod3", 100, 200), + createPod("pod4", 100, 200), + }, + calibration: 1.0, + expectedReplicas: 2, + expectedUsage: 5, + expectError: false, + }, + { + name: "No change (within tolerance)", + metrics: metricsclient.PodMetricsInfo{ + "pod1": {Value: 10}, + "pod2": {Value: 10}, + }, + currentReplicas: 2, + targetUsage: 10, + resource: corev1.ResourceCPU, + podList: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPod("pod2", 100, 200), + }, + calibration: 1.0, + expectedReplicas: 2, + expectedUsage: 10, + expectError: false, + }, + { + name: "Scale up with unready pods", + metrics: metricsclient.PodMetricsInfo{ + "pod1": {Value: 15}, + "pod2": {Value: 15}, + }, + currentReplicas: 3, + targetUsage: 10, + resource: corev1.ResourceCPU, + podList: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPod("pod2", 100, 200), + createUnreadyPod("pod3", 100, 200), + }, + calibration: 1.0, + expectedReplicas: 3, + expectedUsage: 15, + expectError: false, + }, + { + name: "Scale down with missing pods", + metrics: metricsclient.PodMetricsInfo{ + "pod1": {Value: 5}, + "pod2": {Value: 5}, + }, + currentReplicas: 3, + targetUsage: 10, + resource: corev1.ResourceCPU, + podList: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPod("pod2", 100, 200), + createPod("pod3", 100, 200), + }, + calibration: 1.0, + expectedReplicas: 2, + expectedUsage: 5, + expectError: false, + }, + { + name: "Scale with calibration", + metrics: metricsclient.PodMetricsInfo{ + "pod1": {Value: 15}, + "pod2": {Value: 15}, + }, + currentReplicas: 2, + targetUsage: 10, + resource: corev1.ResourceCPU, + podList: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPod("pod2", 100, 200), + }, + calibration: 0.8, + expectedReplicas: 4, + expectedUsage: 15, + expectError: false, + }, + { + name: "Error: No pods", + metrics: metricsclient.PodMetricsInfo{}, + currentReplicas: 2, + targetUsage: 10, + resource: corev1.ResourceCPU, + podList: []*corev1.Pod{}, + calibration: 1.0, + expectError: true, + }, + { + name: "Error: No metrics for ready pods", + metrics: metricsclient.PodMetricsInfo{}, + currentReplicas: 2, + targetUsage: 10, + resource: corev1.ResourceCPU, + podList: []*corev1.Pod{ + createUnreadyPod("pod1", 100, 200), + createUnreadyPod("pod2", 100, 200), + }, + calibration: 1.0, + expectError: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + calculator := NewReplicaCalculator( + nil, // metrics client not needed for this test + defaultTolerance, + cpuInitializationPeriod, + delayOfInitialReadinessStatus, + ) + + replicas, usage, err := calculator.calcPlainMetricReplicas( + tc.metrics, + tc.currentReplicas, + tc.targetUsage, + tc.resource, + tc.podList, + tc.calibration, + ) + + if tc.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, tc.expectedReplicas, replicas) + assert.Equal(t, tc.expectedUsage, usage) + } + }) + } +} + +// Helper function to create an unready pod +func createUnreadyPod(name string, request, limit int64) *corev1.Pod { + pod := createPod(name, request, limit) + pod.Status.Conditions = []corev1.PodCondition{ + { + Type: corev1.PodReady, + Status: corev1.ConditionFalse, + }, + } + return pod +} + +func TestGetObjectMetricReplicas(t *testing.T) { + mockClient := &MockQueryClient{} + calculator := NewReplicaCalculator(mockClient, 0.1, 5*time.Minute, 30*time.Second) + + testCases := []struct { + name string + currentReplicas int32 + targetUsage int64 + metricName string + namespace string + objectRef *autoscalingv2.CrossVersionObjectReference + metricSelector labels.Selector + podList []*corev1.Pod + objectMetric int64 + calibration float64 + expectedReplicas int32 + expectedError bool + }{ + { + name: "Scale up based on object metrics", + currentReplicas: 2, + targetUsage: 10, + metricName: "queue_length", + namespace: "default", + objectRef: &autoscalingv2.CrossVersionObjectReference{Kind: "Service", Name: "my-svc"}, + metricSelector: labels.Everything(), + podList: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPod("pod2", 100, 200), + }, + objectMetric: 30, + calibration: 1.0, + expectedReplicas: 6, + expectedError: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + mockClient.On("GetObjectMetric", tc.metricName, tc.namespace, tc.objectRef, tc.metricSelector).Return(tc.objectMetric, time.Now(), nil).Once() + + replicas, _, _, err := calculator.GetObjectMetricReplicas(tc.currentReplicas, tc.targetUsage, tc.metricName, tc.namespace, tc.objectRef, tc.metricSelector, tc.podList, tc.calibration) + + if tc.expectedError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, tc.expectedReplicas, replicas) + } + }) + } +} + +// TestGetObjectPerPodMetricReplicas verifies the calculation of per-pod object metric-based replicas +func TestGetObjectPerPodMetricReplicas(t *testing.T) { + const ( + defaultNamespace = "default" + cpuInitializationPeriod = 5 * time.Minute + delayOfInitialReadinessStatus = 30 * time.Second + defaultTolerance = 0.1 + ) + + defaultObjectRef := &autoscalingv2.CrossVersionObjectReference{Kind: "Service", Name: "my-svc"} + + testCases := []struct { + name string + statusReplicas int32 + targetAverageUsage int64 + metricName string + namespace string + objectRef *autoscalingv2.CrossVersionObjectReference + metricSelector labels.Selector + objectMetric int64 + calibration float64 + tolerance float64 + expectedReplicas int32 + expectedUsage int64 + expectError bool + }{ + { + name: "Scale up based on per-pod object metrics", + statusReplicas: 2, + targetAverageUsage: 10, + metricName: "requests_per_pod", + namespace: defaultNamespace, + objectRef: defaultObjectRef, + metricSelector: labels.Everything(), + objectMetric: 30, + calibration: 1.0, + tolerance: defaultTolerance, + expectedReplicas: 3, + expectedUsage: 15, + expectError: false, + }, + { + name: "Scale down based on per-pod object metrics", + statusReplicas: 4, + targetAverageUsage: 20, + metricName: "requests_per_pod", + namespace: defaultNamespace, + objectRef: defaultObjectRef, + metricSelector: labels.Everything(), + objectMetric: 60, + calibration: 1.0, + tolerance: defaultTolerance, + expectedReplicas: 3, + expectedUsage: 15, + expectError: false, + }, + { + name: "No change due to tolerance", + statusReplicas: 3, + targetAverageUsage: 10, + metricName: "requests_per_pod", + namespace: defaultNamespace, + objectRef: defaultObjectRef, + metricSelector: labels.Everything(), + objectMetric: 32, // Just within tolerance (10% of 30) + calibration: 1.0, + tolerance: defaultTolerance, + expectedReplicas: 3, + expectedUsage: 11, + expectError: false, + }, + { + name: "Scale with calibration", + statusReplicas: 2, + targetAverageUsage: 10, + metricName: "requests_per_pod", + namespace: defaultNamespace, + objectRef: defaultObjectRef, + metricSelector: labels.Everything(), + objectMetric: 30, + calibration: 0.5, + tolerance: defaultTolerance, + expectedReplicas: 12, + expectedUsage: 15, + expectError: false, + }, + { + name: "Error getting metric", + statusReplicas: 2, + targetAverageUsage: 10, + metricName: "requests_per_pod", + namespace: defaultNamespace, + objectRef: defaultObjectRef, + metricSelector: labels.Everything(), + objectMetric: 0, + calibration: 1.0, + tolerance: defaultTolerance, + expectError: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + mockClient := &MockQueryClient{} + calculator := NewReplicaCalculator( + mockClient, + tc.tolerance, + cpuInitializationPeriod, + delayOfInitialReadinessStatus, + ) + + if tc.expectError { + mockClient.On("GetObjectMetric", tc.metricName, tc.namespace, tc.objectRef, tc.metricSelector). + Return(int64(0), time.Time{}, fmt.Errorf("metric not available")).Once() + } else { + mockClient.On("GetObjectMetric", tc.metricName, tc.namespace, tc.objectRef, tc.metricSelector). + Return(tc.objectMetric, time.Now(), nil).Once() + } + + replicas, usage, timestamp, err := calculator.GetObjectPerPodMetricReplicas( + tc.statusReplicas, + tc.targetAverageUsage, + tc.metricName, + tc.namespace, + tc.objectRef, + tc.metricSelector, + tc.calibration, + ) + + if tc.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, tc.expectedReplicas, replicas, "Unexpected replica count") + assert.Equal(t, tc.expectedUsage, usage, "Unexpected usage value") + assert.False(t, timestamp.IsZero(), "Timestamp should not be zero") + } + + mockClient.AssertExpectations(t) + }) + } +} + +// TestGetUsageRatioReplicaCount checks the calculation of usage ratio-based replica count +func TestGetUsageRatioReplicaCount(t *testing.T) { + const ( + cpuInitializationPeriod = 5 * time.Minute + delayOfInitialReadinessStatus = 30 * time.Second + defaultTolerance = 0.1 + ) + + testCases := []struct { + name string + currentReplicas int32 + usageRatio float64 + podList []*corev1.Pod + calibration float64 + tolerance float64 + expectedReplicas int32 + expectError bool + }{ + { + name: "Scale up", + currentReplicas: 2, + usageRatio: 1.5, + podList: []*corev1.Pod{createPod("pod1", 100, 200), createPod("pod2", 100, 200)}, + calibration: 1.0, + tolerance: defaultTolerance, + expectedReplicas: 3, + expectError: false, + }, + { + name: "Scale down", + currentReplicas: 4, + usageRatio: 0.5, + podList: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPod("pod2", 100, 200), + createPod("pod3", 100, 200), + createPod("pod4", 100, 200), + }, + calibration: 1.0, + tolerance: defaultTolerance, + expectedReplicas: 2, + expectError: false, + }, + { + name: "No change due to tolerance", + currentReplicas: 3, + usageRatio: 1.05, + podList: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPod("pod2", 100, 200), + createPod("pod3", 100, 200), + }, + calibration: 1.0, + tolerance: defaultTolerance, + expectedReplicas: 3, + expectError: false, + }, + { + name: "Scale to zero", + currentReplicas: 0, + usageRatio: 0.0, + podList: []*corev1.Pod{}, + calibration: 1.0, + tolerance: defaultTolerance, + expectedReplicas: 0, + expectError: false, + }, + { + name: "Scale from zero", + currentReplicas: 0, + usageRatio: 1.5, + podList: []*corev1.Pod{}, + calibration: 1.0, + tolerance: defaultTolerance, + expectedReplicas: 2, + expectError: false, + }, + { + name: "Scale with calibration", + currentReplicas: 2, + usageRatio: 1.5, + podList: []*corev1.Pod{createPod("pod1", 100, 200), createPod("pod2", 100, 200)}, + calibration: 0.5, + tolerance: defaultTolerance, + expectedReplicas: 6, + expectError: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + mockClient := &MockQueryClient{} + calculator := NewReplicaCalculator( + mockClient, + tc.tolerance, + cpuInitializationPeriod, + delayOfInitialReadinessStatus, + ) + + replicas, timestamp, err := calculator.getUsageRatioReplicaCount( + tc.currentReplicas, + tc.usageRatio, + tc.podList, + tc.calibration, + ) + + assert.NoError(t, err, "Unexpected error: %v", err) + assert.Equal(t, tc.expectedReplicas, replicas, "Unexpected replica count") + assert.True(t, timestamp.IsZero(), "Expected zero timestamp, but got: %v", timestamp) + }) + } +} + +// TestGetReadyPodsCount verifies the counting of ready pods +func TestGetReadyPodsCount(t *testing.T) { + testCases := []struct { + name string + podList []*corev1.Pod + expectedCount int64 + expectError bool + }{ + { + name: "All pods ready", + podList: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPod("pod2", 100, 200), + createPod("pod3", 100, 200), + }, + expectedCount: 3, + expectError: false, + }, + { + name: "Mixed ready and unready pods", + podList: []*corev1.Pod{ + createPod("pod1", 100, 200), + createUnreadyPod("pod2", 100, 200), + createPod("pod3", 100, 200), + createUnreadyPod("pod4", 100, 200), + }, + expectedCount: 2, + expectError: false, + }, + { + name: "All pods unready", + podList: []*corev1.Pod{ + createUnreadyPod("pod1", 100, 200), + createUnreadyPod("pod2", 100, 200), + }, + expectedCount: 0, + expectError: false, + }, + { + name: "Empty pod list", + podList: []*corev1.Pod{}, + expectedCount: 0, + expectError: true, + }, + { + name: "Pods with different phases", + podList: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPodWithPhase("pod2", 100, 200, corev1.PodPending), + createPodWithPhase("pod3", 100, 200, corev1.PodSucceeded), + createPodWithPhase("pod4", 100, 200, corev1.PodFailed), + }, + expectedCount: 1, + expectError: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + calculator := &ReplicaCalculator{} // Don't need to initialize other fields for this test + + count, err := calculator.getReadyPodsCount(tc.podList) + + if tc.expectError { + assert.Error(t, err, "Expected an error, but got none") + } else { + assert.NoError(t, err, "Unexpected error: %v", err) + assert.Equal(t, tc.expectedCount, count, "Unexpected ready pod count") + } + }) + } +} + +// TestGroupPods checks the grouping of pods based on their status and metrics +func TestGroupPods(t *testing.T) { + now := time.Now() + + testCases := []struct { + name string + pods []*corev1.Pod + metrics metricsclient.PodMetricsInfo + resource corev1.ResourceName + cpuInitializationPeriod time.Duration + delayOfInitialReadinessStatus time.Duration + expectedReadyCount int + expectedUnreadyPods []string + expectedMissingPods []string + expectedIgnoredPods []string + }{ + { + name: "All pods ready and with metrics", + pods: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPod("pod2", 100, 200), + }, + metrics: metricsclient.PodMetricsInfo{ + "pod1": {Value: 150, Window: time.Minute, Timestamp: now}, + "pod2": {Value: 150, Window: time.Minute, Timestamp: now}, + }, + resource: corev1.ResourceCPU, + cpuInitializationPeriod: 5 * time.Minute, + delayOfInitialReadinessStatus: 30 * time.Second, + expectedReadyCount: 2, + }, + { + name: "One pod unready (Pending)", + pods: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPodWithPhase("pod2", 100, 200, corev1.PodPending), + }, + metrics: metricsclient.PodMetricsInfo{ + "pod1": {Value: 150, Window: time.Minute, Timestamp: now}, + "pod2": {Value: 150, Window: time.Minute, Timestamp: now}, + }, + resource: corev1.ResourceCPU, + cpuInitializationPeriod: 5 * time.Minute, + delayOfInitialReadinessStatus: 30 * time.Second, + expectedReadyCount: 1, + expectedUnreadyPods: []string{"pod2"}, + }, + { + name: "One pod missing metrics", + pods: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPod("pod2", 100, 200), + }, + metrics: metricsclient.PodMetricsInfo{ + "pod1": {Value: 150, Window: time.Minute, Timestamp: now}, + }, + resource: corev1.ResourceCPU, + cpuInitializationPeriod: 5 * time.Minute, + delayOfInitialReadinessStatus: 30 * time.Second, + expectedReadyCount: 1, + expectedMissingPods: []string{"pod2"}, + }, + { + name: "One pod ignored (Failed)", + pods: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPodWithPhase("pod2", 100, 200, corev1.PodFailed), + }, + metrics: metricsclient.PodMetricsInfo{ + "pod1": {Value: 150, Window: time.Minute, Timestamp: now}, + "pod2": {Value: 150, Window: time.Minute, Timestamp: now}, + }, + resource: corev1.ResourceCPU, + cpuInitializationPeriod: 5 * time.Minute, + delayOfInitialReadinessStatus: 30 * time.Second, + expectedReadyCount: 1, + expectedIgnoredPods: []string{"pod2"}, + }, + { + name: "Pod within CPU initialization period", + pods: []*corev1.Pod{ + createPodWithStartTime("pod1", 100, 200, now.Add(-2*time.Minute)), + }, + metrics: metricsclient.PodMetricsInfo{ + "pod1": {Value: 150, Window: time.Minute, Timestamp: now}, + }, + resource: corev1.ResourceCPU, + cpuInitializationPeriod: 5 * time.Minute, + delayOfInitialReadinessStatus: 30 * time.Second, + expectedReadyCount: 1, + expectedUnreadyPods: []string{}, + }, + { + name: "Non-CPU resource", + pods: []*corev1.Pod{ + createPod("pod1", 100, 200), + createUnreadyPod("pod2", 100, 200), + }, + metrics: metricsclient.PodMetricsInfo{ + "pod1": {Value: 150, Window: time.Minute, Timestamp: now}, + "pod2": {Value: 150, Window: time.Minute, Timestamp: now}, + }, + resource: corev1.ResourceMemory, + cpuInitializationPeriod: 5 * time.Minute, + delayOfInitialReadinessStatus: 30 * time.Second, + expectedReadyCount: 2, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + readyCount, unreadyPods, missingPods, ignoredPods := groupPods(tc.pods, tc.metrics, tc.resource, tc.cpuInitializationPeriod, tc.delayOfInitialReadinessStatus) + + assert.Equal(t, tc.expectedReadyCount, readyCount, "Ready pod count mismatch") + assertSetContains(t, unreadyPods, tc.expectedUnreadyPods, "Unready pods mismatch") + assertSetContains(t, missingPods, tc.expectedMissingPods, "Missing pods mismatch") + assertSetContains(t, ignoredPods, tc.expectedIgnoredPods, "Ignored pods mismatch") + }) + } +} + +// TestCalculatePodRequests verifies the calculation of pod resource requests +func TestCalculatePodRequests(t *testing.T) { + testCases := []struct { + name string + pods []*corev1.Pod + container string + resource corev1.ResourceName + expectedResult map[string]int64 + expectedError bool + }{ + { + name: "Calculate CPU requests for all containers", + pods: []*corev1.Pod{ + createPod("pod1", 100, 200), + createPod("pod2", 200, 300), + }, + container: "", + resource: corev1.ResourceCPU, + expectedResult: map[string]int64{"pod1": 100, "pod2": 200}, + expectedError: false, + }, + { + name: "Calculate memory requests for specific container", + pods: []*corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{Name: "pod1"}, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "container1", + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("100Mi"), + }, + }, + }, + { + Name: "container2", + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("200Mi"), + }, + }, + }, + }, + }, + }, + }, + container: "container2", + resource: corev1.ResourceMemory, + expectedResult: map[string]int64{"pod1": 209715200000}, + expectedError: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result, err := calculatePodRequests(tc.pods, tc.container, tc.resource) + + if tc.expectedError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, tc.expectedResult, result) + } + }) + } +} + +// TestRemoveMetricsForPods checks the removal of metrics for specified pods +func TestRemoveMetricsForPods(t *testing.T) { + metrics := metricsclient.PodMetricsInfo{ + "pod1": {Value: 100}, + "pod2": {Value: 200}, + "pod3": {Value: 300}, + } + + podsToRemove := sets.New("pod1", "pod3") + + removeMetricsForPods(metrics, podsToRemove) + + assert.Equal(t, 1, len(metrics)) + assert.Contains(t, metrics, "pod2") + assert.NotContains(t, metrics, "pod1") + assert.NotContains(t, metrics, "pod3") +} + +// Helper Functions + +// Helper function to create a pod +func createPod(name string, request, limit int64) *corev1.Pod { + return &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Labels: map[string]string{ + "test": "true", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "test-container", + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: *resource.NewMilliQuantity(request, resource.DecimalSI), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: *resource.NewMilliQuantity(limit, resource.DecimalSI), + }, + }, + }, + }, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + Conditions: []corev1.PodCondition{ + { + Type: corev1.PodReady, + Status: corev1.ConditionTrue, + }, + }, + StartTime: &metav1.Time{Time: time.Now().Add(-1 * time.Hour)}, + }, + } +} + +// Helper function to create a pod with a specific phase +func createPodWithPhase(name string, request, limit int64, phase corev1.PodPhase) *corev1.Pod { + pod := createPod(name, request, limit) + pod.Status.Phase = phase + return pod +} + +// Helper function to assert that a set contains expected elements +func assertSetContains(t *testing.T, set sets.Set[string], expected []string, message string) { + assert.Equal(t, len(expected), set.Len(), message) + for _, item := range expected { + assert.True(t, set.Has(item), fmt.Sprintf("%s: %s not found", message, item)) + } +} + +// Helper function to create a pod with a specific start time +func createPodWithStartTime(name string, request, limit int64, startTime time.Time) *corev1.Pod { + pod := createPod(name, request, limit) + pod.Status.StartTime = &metav1.Time{Time: startTime} + return pod +} diff --git a/pkg/controllers/federatedresourcequota/federated_resource_quota_status_controller.go b/pkg/controllers/federatedresourcequota/federated_resource_quota_status_controller.go index 4950014c080b..051fcabb15c3 100644 --- a/pkg/controllers/federatedresourcequota/federated_resource_quota_status_controller.go +++ b/pkg/controllers/federatedresourcequota/federated_resource_quota_status_controller.go @@ -48,7 +48,7 @@ import ( ) const ( - // StatusControllerName is the controller name that will be used when reporting events. + // StatusControllerName is the controller name that will be used when reporting events and metrics. StatusControllerName = "federated-resource-quota-status-controller" ) @@ -129,6 +129,7 @@ func (c *StatusController) SetupWithManager(mgr controllerruntime.Manager) error }, }) return controllerruntime.NewControllerManagedBy(mgr). + Named(StatusControllerName). For(&policyv1alpha1.FederatedResourceQuota{}). Watches(&workv1alpha1.Work{}, handler.EnqueueRequestsFromMapFunc(fn), workPredicate). Complete(c) diff --git a/pkg/controllers/federatedresourcequota/federated_resource_quota_status_controller_test.go b/pkg/controllers/federatedresourcequota/federated_resource_quota_status_controller_test.go new file mode 100644 index 000000000000..7afeaedeae15 --- /dev/null +++ b/pkg/controllers/federatedresourcequota/federated_resource_quota_status_controller_test.go @@ -0,0 +1,335 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package federatedresourcequota + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" + workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1" +) + +// TestAggregatedStatusFormWorks tests the aggregatedStatusFormWorks function +func TestAggregatedStatusFormWorks(t *testing.T) { + tests := []struct { + name string + works []workv1alpha1.Work + expected []policyv1alpha1.ClusterQuotaStatus + expectedError bool + }{ + { + name: "Single work, applied successfully", + works: []workv1alpha1.Work{ + { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "karmada-es-member-cluster-1", + }, + Status: workv1alpha1.WorkStatus{ + Conditions: []metav1.Condition{ + { + Type: workv1alpha1.WorkApplied, + Status: metav1.ConditionTrue, + }, + }, + ManifestStatuses: []workv1alpha1.ManifestStatus{ + { + Status: &runtime.RawExtension{Raw: mustMarshal(corev1.ResourceQuotaStatus{ + Used: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + }, + })}, + }, + }, + }, + }, + }, + expected: []policyv1alpha1.ClusterQuotaStatus{ + { + ClusterName: "member-cluster-1", + ResourceQuotaStatus: corev1.ResourceQuotaStatus{ + Used: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + }, + }, + }, + }, + expectedError: false, + }, + { + name: "Work not applied", + works: []workv1alpha1.Work{ + { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "karmada-es-member-cluster-1", + }, + Status: workv1alpha1.WorkStatus{ + Conditions: []metav1.Condition{ + { + Type: workv1alpha1.WorkApplied, + Status: metav1.ConditionFalse, + }, + }, + }, + }, + }, + expected: nil, + expectedError: false, + }, + { + name: "Multiple works from different clusters", + works: []workv1alpha1.Work{ + { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "karmada-es-member-cluster-1", + }, + Status: workv1alpha1.WorkStatus{ + Conditions: []metav1.Condition{ + { + Type: workv1alpha1.WorkApplied, + Status: metav1.ConditionTrue, + }, + }, + ManifestStatuses: []workv1alpha1.ManifestStatus{ + { + Status: &runtime.RawExtension{Raw: mustMarshal(corev1.ResourceQuotaStatus{ + Used: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + }, + })}, + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "karmada-es-member-cluster-2", + }, + Status: workv1alpha1.WorkStatus{ + Conditions: []metav1.Condition{ + { + Type: workv1alpha1.WorkApplied, + Status: metav1.ConditionTrue, + }, + }, + ManifestStatuses: []workv1alpha1.ManifestStatus{ + { + Status: &runtime.RawExtension{Raw: mustMarshal(corev1.ResourceQuotaStatus{ + Used: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + })}, + }, + }, + }, + }, + }, + expected: []policyv1alpha1.ClusterQuotaStatus{ + { + ClusterName: "member-cluster-1", + ResourceQuotaStatus: corev1.ResourceQuotaStatus{ + Used: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + }, + }, + }, + { + ClusterName: "member-cluster-2", + ResourceQuotaStatus: corev1.ResourceQuotaStatus{ + Used: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + }, + }, + }, + expectedError: false, + }, + { + name: "Work with empty ManifestStatuses", + works: []workv1alpha1.Work{ + { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "karmada-es-member-cluster-1", + }, + Status: workv1alpha1.WorkStatus{ + Conditions: []metav1.Condition{ + { + Type: workv1alpha1.WorkApplied, + Status: metav1.ConditionTrue, + }, + }, + ManifestStatuses: []workv1alpha1.ManifestStatus{}, + }, + }, + }, + expected: nil, + expectedError: false, + }, + { + name: "Work with invalid JSON in ManifestStatuses", + works: []workv1alpha1.Work{ + { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "karmada-es-member-cluster-1", + }, + Status: workv1alpha1.WorkStatus{ + Conditions: []metav1.Condition{ + { + Type: workv1alpha1.WorkApplied, + Status: metav1.ConditionTrue, + }, + }, + ManifestStatuses: []workv1alpha1.ManifestStatus{ + { + Status: &runtime.RawExtension{Raw: []byte(`invalid json`)}, + }, + }, + }, + }, + }, + expected: nil, + expectedError: true, + }, + { + name: "Work with invalid namespace", + works: []workv1alpha1.Work{ + { + ObjectMeta: metav1.ObjectMeta{ + Namespace: "invalid-namespace", + }, + Status: workv1alpha1.WorkStatus{ + Conditions: []metav1.Condition{ + { + Type: workv1alpha1.WorkApplied, + Status: metav1.ConditionTrue, + }, + }, + ManifestStatuses: []workv1alpha1.ManifestStatus{ + { + Status: &runtime.RawExtension{Raw: mustMarshal(corev1.ResourceQuotaStatus{ + Used: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + }, + })}, + }, + }, + }, + }, + }, + expected: nil, + expectedError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := aggregatedStatusFormWorks(tt.works) + if tt.expectedError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + if tt.expected == nil { + assert.Nil(t, result) + } else { + assert.Equal(t, len(tt.expected), len(result)) + for i, expected := range tt.expected { + assert.Equal(t, expected.ClusterName, result[i].ClusterName) + assert.Equal(t, expected.ResourceQuotaStatus.Used, result[i].ResourceQuotaStatus.Used) + } + } + } + }) + } +} + +// TestCalculateUsed tests the calculateUsed function +func TestCalculateUsed(t *testing.T) { + tests := []struct { + name string + aggregatedStatuses []policyv1alpha1.ClusterQuotaStatus + expectedUsed corev1.ResourceList + }{ + { + name: "Single cluster", + aggregatedStatuses: []policyv1alpha1.ClusterQuotaStatus{ + { + ClusterName: "cluster1", + ResourceQuotaStatus: corev1.ResourceQuotaStatus{ + Used: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + }, + }, + }, + expectedUsed: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + }, + { + name: "Multiple clusters", + aggregatedStatuses: []policyv1alpha1.ClusterQuotaStatus{ + { + ClusterName: "cluster1", + ResourceQuotaStatus: corev1.ResourceQuotaStatus{ + Used: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + }, + }, + { + ClusterName: "cluster2", + ResourceQuotaStatus: corev1.ResourceQuotaStatus{ + Used: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("2Gi"), + }, + }, + }, + }, + expectedUsed: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("3"), + corev1.ResourceMemory: resource.MustParse("3Gi"), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := calculateUsed(tt.aggregatedStatuses) + assert.Equal(t, tt.expectedUsed.Cpu().Value(), result.Cpu().Value()) + assert.Equal(t, tt.expectedUsed.Memory().Value(), result.Memory().Value()) + }) + } +} + +// Helper function to marshal ResourceQuotaStatus to JSON +func mustMarshal(v interface{}) []byte { + b, err := json.Marshal(v) + if err != nil { + panic(err) + } + return b +} diff --git a/pkg/controllers/federatedresourcequota/federated_resource_quota_sync_controller.go b/pkg/controllers/federatedresourcequota/federated_resource_quota_sync_controller.go index 5e555a58b364..59406bb596ac 100644 --- a/pkg/controllers/federatedresourcequota/federated_resource_quota_sync_controller.go +++ b/pkg/controllers/federatedresourcequota/federated_resource_quota_sync_controller.go @@ -44,7 +44,7 @@ import ( ) const ( - // SyncControllerName is the controller name that will be used when reporting events. + // SyncControllerName is the controller name that will be used when reporting events and metrics. SyncControllerName = "federated-resource-quota-sync-controller" ) @@ -129,6 +129,7 @@ func (c *SyncController) SetupWithManager(mgr controllerruntime.Manager) error { }) return controllerruntime.NewControllerManagedBy(mgr). + Named(SyncControllerName). For(&policyv1alpha1.FederatedResourceQuota{}). Watches(&clusterv1alpha1.Cluster{}, handler.EnqueueRequestsFromMapFunc(fn), clusterPredicate). Complete(c) @@ -183,7 +184,7 @@ func (c *SyncController) buildWorks(ctx context.Context, quota *policyv1alpha1.F }, } - err = helper.CreateOrUpdateWork(ctx, c.Client, objectMeta, resourceQuotaObj, nil) + err = helper.CreateOrUpdateWork(ctx, c.Client, objectMeta, resourceQuotaObj) if err != nil { errs = append(errs, err) } diff --git a/pkg/controllers/federatedresourcequota/federated_resource_quota_sync_controller_test.go b/pkg/controllers/federatedresourcequota/federated_resource_quota_sync_controller_test.go new file mode 100644 index 000000000000..2029362c6bda --- /dev/null +++ b/pkg/controllers/federatedresourcequota/federated_resource_quota_sync_controller_test.go @@ -0,0 +1,294 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package federatedresourcequota + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1" + policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" + workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1" + "github.com/karmada-io/karmada/pkg/util" +) + +// setupTest initializes a test environment with the given runtime objects +// It returns a fake client and a SyncController for use in tests +func setupTest(t *testing.T, objs ...runtime.Object) (client.Client, *SyncController) { + scheme := runtime.NewScheme() + assert.NoError(t, policyv1alpha1.Install(scheme)) + assert.NoError(t, workv1alpha1.Install(scheme)) + assert.NoError(t, clusterv1alpha1.Install(scheme)) + assert.NoError(t, corev1.AddToScheme(scheme)) + + fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(objs...).Build() + controller := &SyncController{ + Client: fakeClient, + EventRecorder: record.NewFakeRecorder(100), + } + return fakeClient, controller +} + +// TestCleanUpWorks tests the cleanUpWorks function of the SyncController +func TestCleanUpWorks(t *testing.T) { + tests := []struct { + name string + existingWorks []runtime.Object + namespace string + quotaName string + expectedError bool + }{ + { + name: "Successfully delete works", + existingWorks: []runtime.Object{ + &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "work-1", + Namespace: "default", + Labels: map[string]string{ + util.FederatedResourceQuotaNamespaceLabel: "default", + util.FederatedResourceQuotaNameLabel: "test-quota", + }, + }, + }, + &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "work-2", + Namespace: "default", + Labels: map[string]string{ + util.FederatedResourceQuotaNamespaceLabel: "default", + util.FederatedResourceQuotaNameLabel: "test-quota", + }, + }, + }, + }, + namespace: "default", + quotaName: "test-quota", + expectedError: false, + }, + { + name: "No works to delete", + existingWorks: []runtime.Object{}, + namespace: "default", + quotaName: "test-quota", + expectedError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fakeClient, controller := setupTest(t, tt.existingWorks...) + + err := controller.cleanUpWorks(context.Background(), tt.namespace, tt.quotaName) + + if tt.expectedError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + + // Verify that works are deleted + workList := &workv1alpha1.WorkList{} + err = fakeClient.List(context.Background(), workList, client.MatchingLabels{ + util.FederatedResourceQuotaNamespaceLabel: tt.namespace, + util.FederatedResourceQuotaNameLabel: tt.quotaName, + }) + assert.NoError(t, err) + assert.Empty(t, workList.Items) + }) + } +} + +// It verifies that works are correctly created for the given FederatedResourceQuota and clusters +func TestBuildWorks(t *testing.T) { + tests := []struct { + name string + quota *policyv1alpha1.FederatedResourceQuota + clusters []clusterv1alpha1.Cluster + expectedError bool + expectedWorks int + }{ + { + name: "Successfully build works for all clusters", + quota: &policyv1alpha1.FederatedResourceQuota{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-quota", + Namespace: "default", + }, + Spec: policyv1alpha1.FederatedResourceQuotaSpec{ + StaticAssignments: []policyv1alpha1.StaticClusterAssignment{ + { + ClusterName: "cluster1", + Hard: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + }, + }, + { + ClusterName: "cluster2", + Hard: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("2"), + }, + }, + }, + }, + }, + clusters: []clusterv1alpha1.Cluster{ + {ObjectMeta: metav1.ObjectMeta{Name: "cluster1"}}, + {ObjectMeta: metav1.ObjectMeta{Name: "cluster2"}}, + }, + expectedError: false, + expectedWorks: 2, + }, + { + name: "No clusters available", + quota: &policyv1alpha1.FederatedResourceQuota{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-quota", + Namespace: "default", + }, + Spec: policyv1alpha1.FederatedResourceQuotaSpec{ + StaticAssignments: []policyv1alpha1.StaticClusterAssignment{ + { + ClusterName: "cluster1", + Hard: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + }, + }, + }, + }, + }, + clusters: []clusterv1alpha1.Cluster{}, + expectedError: false, + expectedWorks: 0, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fakeClient, controller := setupTest(t) + + err := controller.buildWorks(context.Background(), tt.quota, tt.clusters) + + if tt.expectedError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + + // Verify the number of created works + workList := &workv1alpha1.WorkList{} + err = fakeClient.List(context.Background(), workList, client.MatchingLabels{ + util.FederatedResourceQuotaNamespaceLabel: tt.quota.Namespace, + util.FederatedResourceQuotaNameLabel: tt.quota.Name, + }) + assert.NoError(t, err) + assert.Len(t, workList.Items, tt.expectedWorks) + }) + } +} + +// TestExtractClusterHardResourceList tests the extractClusterHardResourceList function +func TestExtractClusterHardResourceList(t *testing.T) { + tests := []struct { + name string + spec policyv1alpha1.FederatedResourceQuotaSpec + clusterName string + expectedResult corev1.ResourceList + }{ + { + name: "Cluster found in static assignments", + spec: policyv1alpha1.FederatedResourceQuotaSpec{ + StaticAssignments: []policyv1alpha1.StaticClusterAssignment{ + { + ClusterName: "cluster1", + Hard: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + }, + }, + }, + clusterName: "cluster1", + expectedResult: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + }, + { + name: "Cluster not found in static assignments", + spec: policyv1alpha1.FederatedResourceQuotaSpec{ + StaticAssignments: []policyv1alpha1.StaticClusterAssignment{ + { + ClusterName: "cluster1", + Hard: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + }, + }, + }, + }, + clusterName: "cluster2", + expectedResult: nil, + }, + { + name: "Empty static assignments", + spec: policyv1alpha1.FederatedResourceQuotaSpec{ + StaticAssignments: []policyv1alpha1.StaticClusterAssignment{}, + }, + clusterName: "cluster1", + expectedResult: nil, + }, + { + name: "Multiple static assignments", + spec: policyv1alpha1.FederatedResourceQuotaSpec{ + StaticAssignments: []policyv1alpha1.StaticClusterAssignment{ + { + ClusterName: "cluster1", + Hard: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + }, + }, + { + ClusterName: "cluster2", + Hard: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("2"), + }, + }, + }, + }, + clusterName: "cluster2", + expectedResult: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("2"), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := extractClusterHardResourceList(tt.spec, tt.clusterName) + assert.Equal(t, tt.expectedResult, result) + }) + } +} diff --git a/pkg/controllers/gracefuleviction/crb_graceful_eviction_controller.go b/pkg/controllers/gracefuleviction/crb_graceful_eviction_controller.go index 3fac1d851bb0..089055a3f0c7 100644 --- a/pkg/controllers/gracefuleviction/crb_graceful_eviction_controller.go +++ b/pkg/controllers/gracefuleviction/crb_graceful_eviction_controller.go @@ -37,7 +37,7 @@ import ( "github.com/karmada-io/karmada/pkg/util/helper" ) -// CRBGracefulEvictionControllerName is the controller name that will be used when reporting events. +// CRBGracefulEvictionControllerName is the controller name that will be used when reporting events and metrics. const CRBGracefulEvictionControllerName = "cluster-resource-binding-graceful-eviction-controller" // CRBGracefulEvictionController is to sync ClusterResourceBinding.spec.gracefulEvictionTasks. @@ -125,6 +125,7 @@ func (c *CRBGracefulEvictionController) SetupWithManager(mgr controllerruntime.M } return controllerruntime.NewControllerManagedBy(mgr). + Named(CRBGracefulEvictionControllerName). For(&workv1alpha2.ClusterResourceBinding{}, builder.WithPredicates(clusterResourceBindingPredicateFn)). WithOptions(controller.Options{RateLimiter: ratelimiterflag.DefaultControllerRateLimiter(c.RateLimiterOptions)}). Complete(c) diff --git a/pkg/controllers/gracefuleviction/crb_graceful_eviction_controller_test.go b/pkg/controllers/gracefuleviction/crb_graceful_eviction_controller_test.go new file mode 100644 index 000000000000..c77faa4216f6 --- /dev/null +++ b/pkg/controllers/gracefuleviction/crb_graceful_eviction_controller_test.go @@ -0,0 +1,306 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gracefuleviction + +import ( + "context" + "encoding/json" + "math" + "testing" + "time" + + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + controllerruntime "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2" + "github.com/karmada-io/karmada/pkg/sharedcli/ratelimiterflag" +) + +func TestCRBGracefulEvictionController_Reconcile(t *testing.T) { + scheme := runtime.NewScheme() + err := workv1alpha2.Install(scheme) + assert.NoError(t, err, "Failed to add workv1alpha2 to scheme") + now := metav1.Now() + testCases := []struct { + name string + binding *workv1alpha2.ClusterResourceBinding + expectedResult controllerruntime.Result + expectedError bool + expectedRequeue bool + notFound bool + }{ + { + name: "binding with no graceful eviction tasks", + binding: &workv1alpha2.ClusterResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + GracefulEvictionTasks: []workv1alpha2.GracefulEvictionTask{}, + }, + }, + expectedResult: controllerruntime.Result{}, + expectedError: false, + expectedRequeue: false, + }, + { + name: "binding with active graceful eviction tasks", + binding: &workv1alpha2.ClusterResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + GracefulEvictionTasks: []workv1alpha2.GracefulEvictionTask{ + { + FromCluster: "cluster1", + CreationTimestamp: &now, + }, + }, + }, + Status: workv1alpha2.ResourceBindingStatus{ + SchedulerObservedGeneration: 1, + }, + }, + expectedResult: controllerruntime.Result{}, + expectedError: false, + expectedRequeue: false, + }, + { + name: "binding marked for deletion", + binding: &workv1alpha2.ClusterResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + DeletionTimestamp: &now, + Finalizers: []string{"test-finalizer"}, + }, + }, + expectedResult: controllerruntime.Result{}, + expectedError: false, + expectedRequeue: false, + }, + { + name: "binding not found", + binding: &workv1alpha2.ClusterResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "non-existent-binding", + }, + }, + expectedResult: controllerruntime.Result{}, + expectedError: false, + expectedRequeue: false, + notFound: true, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + // Create a fake client with or without the binding object + var client client.Client + if tc.notFound { + client = fake.NewClientBuilder().WithScheme(scheme).Build() + } else { + client = fake.NewClientBuilder().WithScheme(scheme).WithObjects(tc.binding).Build() + } + c := &CRBGracefulEvictionController{ + Client: client, + EventRecorder: record.NewFakeRecorder(10), + RateLimiterOptions: ratelimiterflag.Options{}, + GracefulEvictionTimeout: 5 * time.Minute, + } + result, err := c.Reconcile(context.TODO(), controllerruntime.Request{ + NamespacedName: types.NamespacedName{ + Name: tc.binding.Name, + }, + }) + if tc.expectedError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + assert.Equal(t, tc.expectedResult, result) + if tc.expectedRequeue { + assert.True(t, result.RequeueAfter > 0, "Expected requeue, but got no requeue") + } else { + assert.Zero(t, result.RequeueAfter, "Expected no requeue, but got requeue") + } + // Verify the binding was updated, unless it's the "not found" case + if !tc.notFound { + updatedBinding := &workv1alpha2.ClusterResourceBinding{} + err = client.Get(context.TODO(), types.NamespacedName{Name: tc.binding.Name}, updatedBinding) + assert.NoError(t, err) + } + }) + } +} + +func TestCRBGracefulEvictionController_syncBinding(t *testing.T) { + now := metav1.Now() + timeout := 5 * time.Minute + + s := runtime.NewScheme() + err := workv1alpha2.Install(s) + assert.NoError(t, err, "Failed to add workv1alpha2 to scheme") + + tests := []struct { + name string + binding *workv1alpha2.ClusterResourceBinding + expectedRetryAfter time.Duration + expectedEvictionLen int + expectedError bool + }{ + { + name: "no tasks", + binding: &workv1alpha2.ClusterResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "test-deployment", + }, + Clusters: []workv1alpha2.TargetCluster{ + {Name: "cluster1"}, + }, + }, + }, + expectedRetryAfter: 0, + expectedEvictionLen: 0, + expectedError: false, + }, + { + name: "task not expired", + binding: &workv1alpha2.ClusterResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "test-deployment", + }, + Clusters: []workv1alpha2.TargetCluster{ + {Name: "cluster1"}, + }, + GracefulEvictionTasks: []workv1alpha2.GracefulEvictionTask{ + { + FromCluster: "cluster1", + CreationTimestamp: &metav1.Time{Time: now.Add(-2 * time.Minute)}, + }, + }, + }, + Status: workv1alpha2.ResourceBindingStatus{ + AggregatedStatus: []workv1alpha2.AggregatedStatusItem{ + { + ClusterName: "cluster1", + Status: createRawExtension("Bound"), + }, + }, + }, + }, + expectedRetryAfter: 3 * time.Minute, + expectedEvictionLen: 1, + expectedError: false, + }, + { + name: "task expired", + binding: &workv1alpha2.ClusterResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "test-deployment", + }, + Clusters: []workv1alpha2.TargetCluster{ + {Name: "cluster1"}, + }, + GracefulEvictionTasks: []workv1alpha2.GracefulEvictionTask{ + { + FromCluster: "cluster1", + CreationTimestamp: &metav1.Time{Time: now.Add(-6 * time.Minute)}, + }, + }, + }, + Status: workv1alpha2.ResourceBindingStatus{ + AggregatedStatus: []workv1alpha2.AggregatedStatusItem{ + { + ClusterName: "cluster1", + Status: createRawExtension("Bound"), + }, + }, + }, + }, + expectedRetryAfter: 0, + expectedEvictionLen: 0, + expectedError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create a fake client with the binding object + client := fake.NewClientBuilder().WithScheme(s).WithObjects(tt.binding).Build() + c := &CRBGracefulEvictionController{ + Client: client, + EventRecorder: record.NewFakeRecorder(10), + GracefulEvictionTimeout: timeout, + } + + retryAfter, err := c.syncBinding(context.Background(), tt.binding) + + if tt.expectedError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + + assert.True(t, almostEqual(retryAfter, tt.expectedRetryAfter, 100*time.Millisecond), + "Expected retry after %v, but got %v", tt.expectedRetryAfter, retryAfter) + + // Check the updated binding + updatedBinding := &workv1alpha2.ClusterResourceBinding{} + err = client.Get(context.Background(), types.NamespacedName{Name: tt.binding.Name}, updatedBinding) + assert.NoError(t, err, "Failed to get updated binding") + + actualEvictionLen := len(updatedBinding.Spec.GracefulEvictionTasks) + assert.Equal(t, tt.expectedEvictionLen, actualEvictionLen, + "Expected %d eviction tasks, but got %d", tt.expectedEvictionLen, actualEvictionLen) + }) + } +} + +// Helper function to create a RawExtension from a status string +func createRawExtension(status string) *runtime.RawExtension { + raw, _ := json.Marshal(status) + return &runtime.RawExtension{Raw: raw} +} + +// Helper function to compare two time.Duration values with a tolerance +func almostEqual(a, b time.Duration, tolerance time.Duration) bool { + diff := a - b + return math.Abs(float64(diff)) < float64(tolerance) +} diff --git a/pkg/controllers/gracefuleviction/evictiontask_test.go b/pkg/controllers/gracefuleviction/evictiontask_test.go index d6c248c60f80..bee8d0b72f8a 100644 --- a/pkg/controllers/gracefuleviction/evictiontask_test.go +++ b/pkg/controllers/gracefuleviction/evictiontask_test.go @@ -626,6 +626,21 @@ func Test_nextRetry(t *testing.T) { }, want: 0, }, + { + name: "task with custom grace period - not expired", + args: args{ + task: []workv1alpha2.GracefulEvictionTask{ + { + FromCluster: "member1", + CreationTimestamp: &metav1.Time{Time: timeNow.Add(time.Minute * -5)}, + GracePeriodSeconds: ptr.To[int32](600), + }, + }, + timeout: timeout, + timeNow: timeNow.Time, + }, + want: time.Minute * 5, // 10 minutes (grace period) - 5 minutes (elapsed time) + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/pkg/controllers/gracefuleviction/rb_graceful_eviction_controller.go b/pkg/controllers/gracefuleviction/rb_graceful_eviction_controller.go index 0c08f3dddf1f..5d26bbe2fd8d 100644 --- a/pkg/controllers/gracefuleviction/rb_graceful_eviction_controller.go +++ b/pkg/controllers/gracefuleviction/rb_graceful_eviction_controller.go @@ -37,7 +37,7 @@ import ( "github.com/karmada-io/karmada/pkg/util/helper" ) -// RBGracefulEvictionControllerName is the controller name that will be used when reporting events. +// RBGracefulEvictionControllerName is the controller name that will be used when reporting events and metrics. const RBGracefulEvictionControllerName = "resource-binding-graceful-eviction-controller" // RBGracefulEvictionController is to sync ResourceBinding.spec.gracefulEvictionTasks. @@ -125,6 +125,7 @@ func (c *RBGracefulEvictionController) SetupWithManager(mgr controllerruntime.Ma } return controllerruntime.NewControllerManagedBy(mgr). + Named(RBGracefulEvictionControllerName). For(&workv1alpha2.ResourceBinding{}, builder.WithPredicates(resourceBindingPredicateFn)). WithOptions(controller.Options{RateLimiter: ratelimiterflag.DefaultControllerRateLimiter(c.RateLimiterOptions)}). Complete(c) diff --git a/pkg/controllers/gracefuleviction/rb_graceful_eviction_controller_test.go b/pkg/controllers/gracefuleviction/rb_graceful_eviction_controller_test.go new file mode 100644 index 000000000000..41596c20566d --- /dev/null +++ b/pkg/controllers/gracefuleviction/rb_graceful_eviction_controller_test.go @@ -0,0 +1,270 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gracefuleviction + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + controllerruntime "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2" + "github.com/karmada-io/karmada/pkg/sharedcli/ratelimiterflag" +) + +func TestRBGracefulEvictionController_Reconcile(t *testing.T) { + scheme := runtime.NewScheme() + err := workv1alpha2.Install(scheme) + require.NoError(t, err, "Failed to add workv1alpha2 to scheme") + + now := metav1.Now() + + testCases := []struct { + name string + binding *workv1alpha2.ResourceBinding + expectedResult controllerruntime.Result + expectedError bool + expectedRequeue bool + notFound bool + }{ + { + name: "binding with no graceful eviction tasks", + binding: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + Namespace: "default", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + GracefulEvictionTasks: []workv1alpha2.GracefulEvictionTask{}, + }, + }, + expectedResult: controllerruntime.Result{}, + expectedError: false, + expectedRequeue: false, + }, + { + name: "binding with active graceful eviction tasks", + binding: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + Namespace: "default", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + GracefulEvictionTasks: []workv1alpha2.GracefulEvictionTask{ + { + FromCluster: "cluster1", + CreationTimestamp: &now, + }, + }, + }, + Status: workv1alpha2.ResourceBindingStatus{ + SchedulerObservedGeneration: 1, + }, + }, + expectedResult: controllerruntime.Result{}, + expectedError: false, + expectedRequeue: false, + }, + { + name: "binding marked for deletion", + binding: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + Namespace: "default", + DeletionTimestamp: &now, + Finalizers: []string{"test-finalizer"}, + }, + }, + expectedResult: controllerruntime.Result{}, + expectedError: false, + expectedRequeue: false, + }, + { + name: "binding not found", + binding: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "non-existent-binding", + Namespace: "default", + }, + }, + expectedResult: controllerruntime.Result{}, + expectedError: false, + expectedRequeue: false, + notFound: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + // Create a fake client with or without the binding object + var client client.Client + if tc.notFound { + client = fake.NewClientBuilder().WithScheme(scheme).Build() + } else { + client = fake.NewClientBuilder().WithScheme(scheme).WithObjects(tc.binding).Build() + } + + c := &RBGracefulEvictionController{ + Client: client, + EventRecorder: record.NewFakeRecorder(10), + RateLimiterOptions: ratelimiterflag.Options{}, + GracefulEvictionTimeout: 5 * time.Minute, + } + + result, err := c.Reconcile(context.TODO(), controllerruntime.Request{ + NamespacedName: types.NamespacedName{ + Namespace: tc.binding.Namespace, + Name: tc.binding.Name, + }, + }) + + if tc.expectedError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + + assert.Equal(t, tc.expectedResult, result) + + if tc.expectedRequeue { + assert.True(t, result.RequeueAfter > 0, "Expected requeue, but got no requeue") + } else { + assert.Zero(t, result.RequeueAfter, "Expected no requeue, but got requeue") + } + + // Verify the binding was updated, unless it's the "not found" case + if !tc.notFound { + updatedBinding := &workv1alpha2.ResourceBinding{} + err = client.Get(context.TODO(), types.NamespacedName{Namespace: tc.binding.Namespace, Name: tc.binding.Name}, updatedBinding) + assert.NoError(t, err) + } + }) + } +} + +func TestRBGracefulEvictionController_syncBinding(t *testing.T) { + scheme := runtime.NewScheme() + err := workv1alpha2.Install(scheme) + if err != nil { + t.Fatalf("Failed to add workv1alpha2 to scheme: %v", err) + } + + now := metav1.Now() + + testCases := []struct { + name string + binding *workv1alpha2.ResourceBinding + expectedRetryAfter time.Duration + expectedEvictionLen int + expectedError bool + }{ + { + name: "no eviction tasks", + binding: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + Namespace: "default", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + GracefulEvictionTasks: []workv1alpha2.GracefulEvictionTask{}, + }, + }, + expectedRetryAfter: 0, + expectedEvictionLen: 0, + expectedError: false, + }, + { + name: "active eviction task", + binding: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + Namespace: "default", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + GracefulEvictionTasks: []workv1alpha2.GracefulEvictionTask{ + { + FromCluster: "cluster1", + CreationTimestamp: &now, + }, + }, + }, + }, + expectedRetryAfter: 0, + expectedEvictionLen: 0, + expectedError: false, + }, + { + name: "expired eviction task", + binding: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + Namespace: "default", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + GracefulEvictionTasks: []workv1alpha2.GracefulEvictionTask{ + { + FromCluster: "cluster1", + CreationTimestamp: &metav1.Time{Time: now.Add(-10 * time.Minute)}, + }, + }, + }, + }, + expectedRetryAfter: 0, + expectedEvictionLen: 0, + expectedError: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + // Create a fake client with the binding object + client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(tc.binding).Build() + c := &RBGracefulEvictionController{ + Client: client, + EventRecorder: &record.FakeRecorder{}, + RateLimiterOptions: ratelimiterflag.Options{}, + GracefulEvictionTimeout: 5 * time.Minute, + } + + retryAfter, err := c.syncBinding(context.TODO(), tc.binding) + + if tc.expectedError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + + assert.Equal(t, tc.expectedRetryAfter, retryAfter) + + // Check the updated binding + updatedBinding := &workv1alpha2.ResourceBinding{} + err = client.Get(context.TODO(), types.NamespacedName{Namespace: tc.binding.Namespace, Name: tc.binding.Name}, updatedBinding) + assert.NoError(t, err) + + assert.Equal(t, tc.expectedEvictionLen, len(updatedBinding.Spec.GracefulEvictionTasks)) + }) + } +} diff --git a/pkg/controllers/hpascaletargetmarker/hpa_scale_target_marker_controller.go b/pkg/controllers/hpascaletargetmarker/hpa_scale_target_marker_controller.go index df96a7b6eab4..0951fd9fa6ec 100644 --- a/pkg/controllers/hpascaletargetmarker/hpa_scale_target_marker_controller.go +++ b/pkg/controllers/hpascaletargetmarker/hpa_scale_target_marker_controller.go @@ -29,13 +29,13 @@ import ( ) const ( - // ControllerName is the controller name that will be used when reporting events. + // ControllerName is the controller name that will be used when reporting events and metrics. ControllerName = "hpa-scale-target-marker" // scaleTargetWorkerNum is the async Worker number scaleTargetWorkerNum = 1 ) -// HpaScaleTargetMarker is to automatically add `retain-replicas` label to resource template mananged by HPA. +// HpaScaleTargetMarker is to automatically add `retain-replicas` label to resource template managed by HPA. type HpaScaleTargetMarker struct { DynamicClient dynamic.Interface RESTMapper meta.RESTMapper diff --git a/pkg/controllers/mcs/endpointslice_controller.go b/pkg/controllers/mcs/endpointslice_controller.go index a65daa57b591..445f0f49163b 100644 --- a/pkg/controllers/mcs/endpointslice_controller.go +++ b/pkg/controllers/mcs/endpointslice_controller.go @@ -40,7 +40,7 @@ import ( "github.com/karmada-io/karmada/pkg/util/names" ) -// EndpointSliceControllerName is the controller name that will be used when reporting events. +// EndpointSliceControllerName is the controller name that will be used when reporting events and metrics. const EndpointSliceControllerName = "endpointslice-controller" // EndpointSliceController is to collect EndpointSlice which reported by member cluster from executionNamespace to serviceexport namespace. @@ -115,7 +115,10 @@ func (c *EndpointSliceController) SetupWithManager(mgr controllerruntime.Manager return false }, } - return controllerruntime.NewControllerManagedBy(mgr).For(&workv1alpha1.Work{}, builder.WithPredicates(serviceImportPredicateFun)).Complete(c) + return controllerruntime.NewControllerManagedBy(mgr). + Named(EndpointSliceControllerName). + For(&workv1alpha1.Work{}, builder.WithPredicates(serviceImportPredicateFun)). + Complete(c) } func (c *EndpointSliceController) collectEndpointSliceFromWork(ctx context.Context, work *workv1alpha1.Work) error { diff --git a/pkg/controllers/mcs/service_export_controller.go b/pkg/controllers/mcs/service_export_controller.go index b9a03200d0c7..b1ba44a91458 100644 --- a/pkg/controllers/mcs/service_export_controller.go +++ b/pkg/controllers/mcs/service_export_controller.go @@ -55,7 +55,7 @@ import ( "github.com/karmada-io/karmada/pkg/util/names" ) -// ServiceExportControllerName is the controller name that will be used when reporting events. +// ServiceExportControllerName is the controller name that will be used when reporting events and metrics. const ServiceExportControllerName = "service-export-controller" // ServiceExportController is to sync ServiceExport and report EndpointSlices of exported service to control-plane. @@ -131,7 +131,7 @@ func (c *ServiceExportController) Reconcile(ctx context.Context, req controllerr // SetupWithManager creates a controller and register to controller manager. func (c *ServiceExportController) SetupWithManager(mgr controllerruntime.Manager) error { - return controllerruntime.NewControllerManagedBy(mgr).For(&workv1alpha1.Work{}, builder.WithPredicates(c.PredicateFunc)).Complete(c) + return controllerruntime.NewControllerManagedBy(mgr).Named(ServiceExportControllerName).For(&workv1alpha1.Work{}, builder.WithPredicates(c.PredicateFunc)).Complete(c) } // RunWorkQueue initializes worker and run it, worker will process resource asynchronously. @@ -494,7 +494,7 @@ func reportEndpointSlice(ctx context.Context, c client.Client, endpointSlice *un return err } - if err := helper.CreateOrUpdateWork(ctx, c, workMeta, endpointSlice, nil); err != nil { + if err := helper.CreateOrUpdateWork(ctx, c, workMeta, endpointSlice); err != nil { return err } diff --git a/pkg/controllers/mcs/service_import_controller.go b/pkg/controllers/mcs/service_import_controller.go index 9e7c96c9ac6e..7d8947a0119d 100644 --- a/pkg/controllers/mcs/service_import_controller.go +++ b/pkg/controllers/mcs/service_import_controller.go @@ -35,7 +35,7 @@ import ( "github.com/karmada-io/karmada/pkg/util/names" ) -// ServiceImportControllerName is the controller name that will be used when reporting events. +// ServiceImportControllerName is the controller name that will be used when reporting events and metrics. const ServiceImportControllerName = "service-import-controller" // ServiceImportController is to sync derived service from ServiceImport. @@ -71,7 +71,10 @@ func (c *ServiceImportController) Reconcile(ctx context.Context, req controllerr // SetupWithManager creates a controller and register to controller manager. func (c *ServiceImportController) SetupWithManager(mgr controllerruntime.Manager) error { - return controllerruntime.NewControllerManagedBy(mgr).For(&mcsv1alpha1.ServiceImport{}).Complete(c) + return controllerruntime.NewControllerManagedBy(mgr). + Named(ServiceImportControllerName). + For(&mcsv1alpha1.ServiceImport{}). + Complete(c) } func (c *ServiceImportController) deleteDerivedService(ctx context.Context, svcImport types.NamespacedName) (controllerruntime.Result, error) { diff --git a/pkg/controllers/multiclusterservice/endpointslice_collect_controller.go b/pkg/controllers/multiclusterservice/endpointslice_collect_controller.go index c326723d6774..2e056910ac29 100644 --- a/pkg/controllers/multiclusterservice/endpointslice_collect_controller.go +++ b/pkg/controllers/multiclusterservice/endpointslice_collect_controller.go @@ -74,6 +74,9 @@ var ( multiClusterServiceGVK = networkingv1alpha1.SchemeGroupVersion.WithKind("MultiClusterService") ) +// EndpointSliceCollectControllerName is the controller name that will be used when reporting events and metrics. +const EndpointSliceCollectControllerName = "endpointslice-collect-controller" + // Reconcile performs a full reconciliation for the object referred to by the Request. func (c *EndpointSliceCollectController) Reconcile(ctx context.Context, req controllerruntime.Request) (controllerruntime.Result, error) { klog.V(4).Infof("Reconciling Work %s", req.NamespacedName.String()) @@ -115,6 +118,7 @@ func (c *EndpointSliceCollectController) Reconcile(ctx context.Context, req cont // SetupWithManager creates a controller and register to controller manager. func (c *EndpointSliceCollectController) SetupWithManager(mgr controllerruntime.Manager) error { return controllerruntime.NewControllerManagedBy(mgr). + Named(EndpointSliceCollectControllerName). For(&workv1alpha1.Work{}, builder.WithPredicates(c.PredicateFunc)).Complete(c) } @@ -381,7 +385,7 @@ func reportEndpointSlice(ctx context.Context, c client.Client, endpointSlice *un return err } - if err := helper.CreateOrUpdateWork(ctx, c, workMeta, endpointSlice, nil); err != nil { + if err := helper.CreateOrUpdateWork(ctx, c, workMeta, endpointSlice); err != nil { klog.Errorf("Failed to create or update work(%s/%s), Error: %v", workMeta.Namespace, workMeta.Name, err) return err } diff --git a/pkg/controllers/multiclusterservice/endpointslice_collect_controller_test.go b/pkg/controllers/multiclusterservice/endpointslice_collect_controller_test.go new file mode 100644 index 000000000000..011f8e4ac4d8 --- /dev/null +++ b/pkg/controllers/multiclusterservice/endpointslice_collect_controller_test.go @@ -0,0 +1,387 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package multiclusterservice + +import ( + "context" + "reflect" + "sort" + "strings" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + discoveryv1 "k8s.io/api/discovery/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1" + "github.com/karmada-io/karmada/pkg/util" +) + +func TestGetEventHandler(t *testing.T) { + testCases := []struct { + name string + clusterName string + existingHandler bool + }{ + { + name: "New handler", + clusterName: "cluster1", + existingHandler: false, + }, + { + name: "Existing handler", + clusterName: "cluster2", + existingHandler: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + controller := &EndpointSliceCollectController{ + eventHandlers: sync.Map{}, + worker: &mockAsyncWorker{}, + } + if tc.existingHandler { + controller.eventHandlers.Store(tc.clusterName, &mockResourceEventHandler{}) + } + handler := controller.getEventHandler(tc.clusterName) + assert.NotNil(t, handler, "Handler should not be nil") + storedHandler, exists := controller.eventHandlers.Load(tc.clusterName) + assert.True(t, exists, "Handler should be stored in eventHandlers") + assert.Equal(t, handler, storedHandler, "Stored handler should match returned handler") + if !tc.existingHandler { + assert.IsType(t, &cache.ResourceEventHandlerFuncs{}, handler, "New handler should be of type *cache.ResourceEventHandlerFuncs") + } else { + assert.IsType(t, &mockResourceEventHandler{}, handler, "Existing handler should be of type *mockResourceEventHandler") + } + }) + } +} + +func TestGenHandlerFuncs(t *testing.T) { + clusterName := "test-cluster" + testObj := createTestEndpointSlice("test-object", "test-namespace") + + t.Run("AddFunc", func(t *testing.T) { + mockWorker := &mockAsyncWorker{} + controller := &EndpointSliceCollectController{ + worker: mockWorker, + } + addFunc := controller.genHandlerAddFunc(clusterName) + addFunc(testObj) + assert.Equal(t, 1, mockWorker.addCount, "Add function should be called once") + }) + + t.Run("UpdateFunc", func(t *testing.T) { + mockWorker := &mockAsyncWorker{} + controller := &EndpointSliceCollectController{ + worker: mockWorker, + } + updateFunc := controller.genHandlerUpdateFunc(clusterName) + newObj := createTestEndpointSlice("test-object", "test-namespace") + newObj.SetLabels(map[string]string{"new-label": "new-value"}) + + updateFunc(testObj, newObj) + assert.Equal(t, 1, mockWorker.addCount, "Update function should be called once when objects are different") + + updateFunc(testObj, testObj) + assert.Equal(t, 1, mockWorker.addCount, "Update function should not be called when objects are the same") + }) + + t.Run("DeleteFunc", func(t *testing.T) { + mockWorker := &mockAsyncWorker{} + controller := &EndpointSliceCollectController{ + worker: mockWorker, + } + deleteFunc := controller.genHandlerDeleteFunc(clusterName) + deleteFunc(testObj) + assert.Equal(t, 1, mockWorker.addCount, "Delete function should be called once") + + deletedObj := cache.DeletedFinalStateUnknown{Obj: testObj} + deleteFunc(deletedObj) + assert.Equal(t, 2, mockWorker.addCount, "Delete function should be called for DeletedFinalStateUnknown") + }) +} + +func TestGetEndpointSliceWorkMeta(t *testing.T) { + testCases := []struct { + name string + existingWork *workv1alpha1.Work + endpointSlice *unstructured.Unstructured + expectedMeta metav1.ObjectMeta + expectedError bool + }{ + { + name: "New work for EndpointSlice", + endpointSlice: createEndpointSliceForTest("test-eps", "default", "test-service", false), + expectedMeta: metav1.ObjectMeta{ + Name: "endpointslice-test-eps-default", + Namespace: "test-cluster", + Labels: map[string]string{ + util.MultiClusterServiceNamespaceLabel: "default", + util.MultiClusterServiceNameLabel: "test-service", + util.PropagationInstruction: util.PropagationInstructionSuppressed, + util.EndpointSliceWorkManagedByLabel: util.MultiClusterServiceKind, + }, + }, + }, + { + name: "Existing work for EndpointSlice", + existingWork: createExistingWork("endpointslice-test-eps-default", "test-cluster", "ExistingController"), + endpointSlice: createEndpointSliceForTest("test-eps", "default", "test-service", false), + expectedMeta: metav1.ObjectMeta{ + Name: "endpointslice-test-eps-default", + Namespace: "test-cluster", + Labels: map[string]string{ + util.MultiClusterServiceNamespaceLabel: "default", + util.MultiClusterServiceNameLabel: "test-service", + util.PropagationInstruction: util.PropagationInstructionSuppressed, + util.EndpointSliceWorkManagedByLabel: "ExistingController.MultiClusterService", + }, + Finalizers: []string{util.MCSEndpointSliceDispatchControllerFinalizer}, + }, + }, + { + name: "Nil EndpointSlice", + endpointSlice: nil, + expectedError: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + fakeClient := createFakeClient(tc.existingWork) + testFunc := func() (metav1.ObjectMeta, error) { + return getEndpointSliceWorkMeta(context.TODO(), fakeClient, "test-cluster", "endpointslice-test-eps-default", tc.endpointSlice) + } + if tc.expectedError { + assert.Panics(t, func() { + _, err := testFunc() + require.Error(t, err) + }, "Expected a panic for nil EndpointSlice") + } else { + meta, err := testFunc() + require.NoError(t, err) + assert.Equal(t, tc.expectedMeta.Name, meta.Name) + assert.Equal(t, tc.expectedMeta.Namespace, meta.Namespace) + assert.Equal(t, tc.expectedMeta.Finalizers, meta.Finalizers) + assert.True(t, compareLabels(meta.Labels, tc.expectedMeta.Labels), + "Labels do not match. Expected: %v, Got: %v", tc.expectedMeta.Labels, meta.Labels) + } + }) + } +} + +func TestCleanProviderClustersEndpointSliceWork(t *testing.T) { + testCases := []struct { + name string + existingWork *workv1alpha1.Work + expectedWork *workv1alpha1.Work + expectedDelete bool + }{ + { + name: "Work managed by multiple controllers", + existingWork: &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-work", + Namespace: "test-cluster", + Labels: map[string]string{ + util.MultiClusterServiceNameLabel: "test-service", + util.MultiClusterServiceNamespaceLabel: "default", + util.EndpointSliceWorkManagedByLabel: "MultiClusterService.OtherController", + }, + }, + }, + expectedWork: &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-work", + Namespace: "test-cluster", + Labels: map[string]string{ + util.EndpointSliceWorkManagedByLabel: "OtherController", + }, + }, + }, + expectedDelete: false, + }, + { + name: "Work managed only by MultiClusterService", + existingWork: &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-work", + Namespace: "test-cluster", + Labels: map[string]string{ + util.MultiClusterServiceNameLabel: "test-service", + util.MultiClusterServiceNamespaceLabel: "default", + util.EndpointSliceWorkManagedByLabel: "MultiClusterService", + }, + }, + }, + expectedWork: nil, + expectedDelete: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + scheme := setupSchemeEndpointCollect() + fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(tc.existingWork).Build() + err := cleanProviderClustersEndpointSliceWork(context.TODO(), fakeClient, tc.existingWork) + assert.NoError(t, err, "Unexpected error in cleanProviderClustersEndpointSliceWork") + + if tc.expectedDelete { + err = fakeClient.Get(context.TODO(), types.NamespacedName{Name: tc.existingWork.Name, Namespace: tc.existingWork.Namespace}, &workv1alpha1.Work{}) + assert.Error(t, err, "Expected Work to be deleted, but it still exists") + assert.True(t, apierrors.IsNotFound(err), "Expected NotFound error, got %v", err) + } else { + updatedWork := &workv1alpha1.Work{} + err = fakeClient.Get(context.TODO(), types.NamespacedName{Name: tc.existingWork.Name, Namespace: tc.existingWork.Namespace}, updatedWork) + assert.NoError(t, err, "Failed to get updated Work") + assert.True(t, compareLabels(updatedWork.Labels, tc.expectedWork.Labels), + "Labels mismatch. Expected %v, but got %v", tc.expectedWork.Labels, updatedWork.Labels) + } + }) + } +} + +// Helper Functions + +// Helper function to set up a scheme for EndpointSlice collection tests +func setupSchemeEndpointCollect() *runtime.Scheme { + scheme := runtime.NewScheme() + _ = workv1alpha1.Install(scheme) + _ = discoveryv1.AddToScheme(scheme) + return scheme +} + +// Helper function to create a test EndpointSlice +func createTestEndpointSlice(name, namespace string) *unstructured.Unstructured { + endpointSlice := &discoveryv1.EndpointSlice{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "discovery.k8s.io/v1", + Kind: "EndpointSlice", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + } + unstructuredObj, _ := runtime.DefaultUnstructuredConverter.ToUnstructured(endpointSlice) + return &unstructured.Unstructured{Object: unstructuredObj} +} + +// Helper function to create an EndpointSlice for testing with specific properties +func createEndpointSliceForTest(name, namespace, serviceName string, isManaged bool) *unstructured.Unstructured { + labels := map[string]interface{}{ + discoveryv1.LabelServiceName: serviceName, + } + if isManaged { + labels[discoveryv1.LabelManagedBy] = util.EndpointSliceDispatchControllerLabelValue + } + return &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "discovery.k8s.io/v1", + "kind": "EndpointSlice", + "metadata": map[string]interface{}{ + "name": name, + "namespace": namespace, + "labels": labels, + }, + }, + } +} + +// Helper function to create an existing Work resource for testing +func createExistingWork(name, namespace, managedBy string) *workv1alpha1.Work { + return &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: map[string]string{ + util.EndpointSliceWorkManagedByLabel: managedBy, + }, + }, + } +} + +// Helper function to create a fake client with an optional existing Work +func createFakeClient(existingWork *workv1alpha1.Work) client.Client { + scheme := setupSchemeEndpointCollect() + objs := []client.Object{} + if existingWork != nil { + objs = append(objs, existingWork) + } + return fake.NewClientBuilder().WithScheme(scheme).WithObjects(objs...).Build() +} + +// Helper function to compare two label maps, considering special handling for EndpointSliceWorkManagedByLabel +func compareLabels(actual, expected map[string]string) bool { + if len(actual) != len(expected) { + return false + } + for k, v := range expected { + actualV, exists := actual[k] + if !exists { + return false + } + if k == util.EndpointSliceWorkManagedByLabel { + actualParts := strings.Split(actualV, ".") + expectedParts := strings.Split(v, ".") + sort.Strings(actualParts) + sort.Strings(expectedParts) + if !reflect.DeepEqual(actualParts, expectedParts) { + return false + } + } else if actualV != v { + return false + } + } + return true +} + +// Mock implementations + +type mockAsyncWorker struct { + addCount int +} + +func (m *mockAsyncWorker) Add(_ interface{}) { + m.addCount++ +} + +func (m *mockAsyncWorker) AddAfter(_ interface{}, _ time.Duration) {} + +func (m *mockAsyncWorker) Enqueue(_ interface{}) {} + +func (m *mockAsyncWorker) Run(_ int, _ <-chan struct{}) {} + +type mockResourceEventHandler struct{} + +func (m *mockResourceEventHandler) OnAdd(_ interface{}, _ bool) {} + +func (m *mockResourceEventHandler) OnUpdate(_, _ interface{}) {} + +func (m *mockResourceEventHandler) OnDelete(_ interface{}) {} diff --git a/pkg/controllers/multiclusterservice/endpointslice_dispatch_controller.go b/pkg/controllers/multiclusterservice/endpointslice_dispatch_controller.go index 85a1d96c88c3..cf1e958185d6 100644 --- a/pkg/controllers/multiclusterservice/endpointslice_dispatch_controller.go +++ b/pkg/controllers/multiclusterservice/endpointslice_dispatch_controller.go @@ -49,7 +49,7 @@ import ( "github.com/karmada-io/karmada/pkg/util/names" ) -// EndpointsliceDispatchControllerName is the controller name that will be used when reporting events. +// EndpointsliceDispatchControllerName is the controller name that will be used when reporting events and metrics. const EndpointsliceDispatchControllerName = "endpointslice-dispatch-controller" // EndpointsliceDispatchController will reconcile a MultiClusterService object @@ -160,7 +160,9 @@ func (c *EndpointsliceDispatchController) SetupWithManager(mgr controllerruntime return false }, } - return controllerruntime.NewControllerManagedBy(mgr).For(&workv1alpha1.Work{}, builder.WithPredicates(workPredicateFun)). + return controllerruntime.NewControllerManagedBy(mgr). + Named(EndpointsliceDispatchControllerName). + For(&workv1alpha1.Work{}, builder.WithPredicates(workPredicateFun)). Watches(&networkingv1alpha1.MultiClusterService{}, handler.EnqueueRequestsFromMapFunc(c.newMultiClusterServiceFunc())). Watches(&clusterv1alpha1.Cluster{}, handler.EnqueueRequestsFromMapFunc(c.newClusterFunc())). Complete(c) @@ -393,7 +395,7 @@ func (c *EndpointsliceDispatchController) ensureEndpointSliceWork(ctx context.Co klog.Errorf("Failed to convert typed object to unstructured object, error is: %v", err) return err } - if err := helper.CreateOrUpdateWork(ctx, c.Client, workMeta, unstructuredEPS, nil); err != nil { + if err := helper.CreateOrUpdateWork(ctx, c.Client, workMeta, unstructuredEPS); err != nil { klog.Errorf("Failed to dispatch EndpointSlice %s/%s from %s to cluster %s:%v", work.GetNamespace(), work.GetName(), providerCluster, consumerCluster, err) return err diff --git a/pkg/controllers/multiclusterservice/endpointslice_dispatch_controller_test.go b/pkg/controllers/multiclusterservice/endpointslice_dispatch_controller_test.go new file mode 100644 index 000000000000..ec12fff7942a --- /dev/null +++ b/pkg/controllers/multiclusterservice/endpointslice_dispatch_controller_test.go @@ -0,0 +1,905 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package multiclusterservice + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + discoveryv1 "k8s.io/api/discovery/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1" + networkingv1alpha1 "github.com/karmada-io/karmada/pkg/apis/networking/v1alpha1" + workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1" + "github.com/karmada-io/karmada/pkg/util" +) + +func TestUpdateEndpointSliceDispatched(t *testing.T) { + tests := []struct { + name string + mcs *networkingv1alpha1.MultiClusterService + status metav1.ConditionStatus + reason string + message string + expectedCondition metav1.Condition + }{ + { + name: "update status to true", + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + }, + }, + status: metav1.ConditionTrue, + reason: "EndpointSliceDispatchedSucceed", + message: "EndpointSlice are dispatched successfully", + expectedCondition: metav1.Condition{ + Type: networkingv1alpha1.EndpointSliceDispatched, + Status: metav1.ConditionTrue, + Reason: "EndpointSliceDispatchedSucceed", + Message: "EndpointSlice are dispatched successfully", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mockClient := new(MockClient) + mockStatusWriter := new(MockStatusWriter) + + // Expectations Setup + mockClient.On("Status").Return(mockStatusWriter) + mockClient.On("Get", mock.Anything, mock.AnythingOfType("types.NamespacedName"), mock.AnythingOfType("*v1alpha1.MultiClusterService"), mock.Anything). + Run(func(args mock.Arguments) { + arg := args.Get(2).(*networkingv1alpha1.MultiClusterService) + *arg = *tt.mcs // Copy the input MCS to the output + }).Return(nil) + + mockStatusWriter.On("Update", mock.Anything, mock.AnythingOfType("*v1alpha1.MultiClusterService"), mock.Anything). + Run(func(args mock.Arguments) { + mcs := args.Get(1).(*networkingv1alpha1.MultiClusterService) + mcs.Status.Conditions = []metav1.Condition{tt.expectedCondition} + }).Return(nil) + + c := &EndpointsliceDispatchController{ + Client: mockClient, + EventRecorder: record.NewFakeRecorder(100), + } + + err := c.updateEndpointSliceDispatched(context.Background(), tt.mcs, tt.status, tt.reason, tt.message) + assert.NoError(t, err, "updateEndpointSliceDispatched should not return an error") + + mockClient.AssertExpectations(t) + mockStatusWriter.AssertExpectations(t) + + assert.Len(t, tt.mcs.Status.Conditions, 1, "MCS should have one condition") + if len(tt.mcs.Status.Conditions) > 0 { + condition := tt.mcs.Status.Conditions[0] + assert.Equal(t, tt.expectedCondition.Type, condition.Type) + assert.Equal(t, tt.expectedCondition.Status, condition.Status) + assert.Equal(t, tt.expectedCondition.Reason, condition.Reason) + assert.Equal(t, tt.expectedCondition.Message, condition.Message) + } + }) + } +} + +func TestNewClusterFunc(t *testing.T) { + tests := []struct { + name string + existingObjs []client.Object + inputObj client.Object + expectedResult []reconcile.Request + }{ + { + name: "new cluster, matching MCS", + existingObjs: []client.Object{ + &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + }, + Spec: networkingv1alpha1.MultiClusterServiceSpec{ + ConsumerClusters: []networkingv1alpha1.ClusterSelector{ + {Name: "cluster1"}, + }, + }, + }, + &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-work", + Namespace: "karmada-es-cluster1", + Labels: map[string]string{ + util.MultiClusterServiceNameLabel: "test-mcs", + util.MultiClusterServiceNamespaceLabel: "default", + }, + }, + }, + }, + inputObj: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + }, + }, + expectedResult: []reconcile.Request{ + {NamespacedName: types.NamespacedName{Namespace: "karmada-es-cluster1", Name: "test-work"}}, + }, + }, + { + name: "new cluster, no matching MCS", + existingObjs: []client.Object{ + &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + }, + Spec: networkingv1alpha1.MultiClusterServiceSpec{ + ConsumerClusters: []networkingv1alpha1.ClusterSelector{ + {Name: "cluster2"}, + }, + }, + }, + }, + inputObj: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + }, + }, + expectedResult: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := setupController(tt.existingObjs...) + result := c.newClusterFunc()(context.Background(), tt.inputObj) + assert.Equal(t, tt.expectedResult, result) + }) + } +} + +func TestGetClusterEndpointSliceWorks(t *testing.T) { + tests := []struct { + name string + existingObjs []client.Object + mcsNamespace string + mcsName string + expectedWorks int + expectedError bool + listError error + }{ + { + name: "find matching works", + existingObjs: []client.Object{ + &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "work1", + Namespace: "karmada-es-cluster1", + Labels: map[string]string{ + util.MultiClusterServiceNameLabel: "test-mcs", + util.MultiClusterServiceNamespaceLabel: "default", + }, + }, + }, + &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "work2", + Namespace: "karmada-es-cluster2", + Labels: map[string]string{ + util.MultiClusterServiceNameLabel: "test-mcs", + util.MultiClusterServiceNamespaceLabel: "default", + }, + }, + }, + &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "work3", + Namespace: "karmada-es-cluster3", + Labels: map[string]string{ + util.MultiClusterServiceNameLabel: "other-mcs", + util.MultiClusterServiceNamespaceLabel: "default", + }, + }, + }, + }, + mcsNamespace: "default", + mcsName: "test-mcs", + expectedWorks: 2, + expectedError: false, + }, + { + name: "no matching works", + existingObjs: []client.Object{ + &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "work1", + Namespace: "karmada-es-cluster1", + Labels: map[string]string{ + util.MultiClusterServiceNameLabel: "other-mcs", + util.MultiClusterServiceNamespaceLabel: "default", + }, + }, + }, + }, + mcsNamespace: "default", + mcsName: "test-mcs", + expectedWorks: 0, + expectedError: false, + }, + { + name: "works in different namespace", + existingObjs: []client.Object{ + &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "work1", + Namespace: "karmada-es-cluster1", + Labels: map[string]string{ + util.MultiClusterServiceNameLabel: "test-mcs", + util.MultiClusterServiceNamespaceLabel: "test-namespace", + }, + }, + }, + }, + mcsNamespace: "test-namespace", + mcsName: "test-mcs", + expectedWorks: 1, + expectedError: false, + }, + { + name: "list error", + existingObjs: []client.Object{}, + mcsNamespace: "default", + mcsName: "test-mcs", + expectedWorks: 0, + expectedError: true, + listError: errors.New("fake list error"), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := setupController(tt.existingObjs...) + if tt.listError != nil { + c.Client = &fakeClient{Client: c.Client, listError: tt.listError} + } + works, err := c.getClusterEndpointSliceWorks(context.Background(), tt.mcsNamespace, tt.mcsName) + if tt.expectedError { + assert.Error(t, err) + assert.Nil(t, works) + } else { + assert.NoError(t, err) + assert.Len(t, works, tt.expectedWorks) + for _, work := range works { + assert.Equal(t, tt.mcsName, work.Labels[util.MultiClusterServiceNameLabel]) + assert.Equal(t, tt.mcsNamespace, work.Labels[util.MultiClusterServiceNamespaceLabel]) + } + } + }) + } +} + +func TestNewMultiClusterServiceFunc(t *testing.T) { + tests := []struct { + name string + existingObjs []client.Object + inputObj client.Object + expectedResult []reconcile.Request + }{ + { + name: "MCS with matching works", + existingObjs: []client.Object{ + &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-work-1", + Namespace: "karmada-es-cluster1", + Labels: map[string]string{ + util.MultiClusterServiceNameLabel: "test-mcs", + util.MultiClusterServiceNamespaceLabel: "default", + }, + }, + }, + &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-work-2", + Namespace: "karmada-es-cluster2", + Labels: map[string]string{ + util.MultiClusterServiceNameLabel: "test-mcs", + util.MultiClusterServiceNamespaceLabel: "default", + }, + Annotations: map[string]string{ + util.EndpointSliceProvisionClusterAnnotation: "cluster2", + }, + }, + }, + }, + inputObj: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + }, + }, + expectedResult: []reconcile.Request{ + {NamespacedName: types.NamespacedName{Namespace: "karmada-es-cluster1", Name: "test-work-1"}}, + }, + }, + { + name: "MCS with no matching works", + existingObjs: []client.Object{ + &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-work", + Namespace: "karmada-es-cluster1", + Labels: map[string]string{ + util.MultiClusterServiceNameLabel: "other-mcs", + util.MultiClusterServiceNamespaceLabel: "default", + }, + }, + }, + }, + inputObj: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + }, + }, + expectedResult: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := setupController(tt.existingObjs...) + result := c.newMultiClusterServiceFunc()(context.Background(), tt.inputObj) + assert.Equal(t, tt.expectedResult, result) + }) + } +} + +func TestCleanOrphanDispatchedEndpointSlice(t *testing.T) { + tests := []struct { + name string + existingObjs []client.Object + mcs *networkingv1alpha1.MultiClusterService + expectedDeletes int + expectedError bool + }{ + { + name: "clean orphan works", + existingObjs: []client.Object{ + &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "work1", + Namespace: "karmada-es-cluster1", + Labels: map[string]string{ + util.MultiClusterServiceNameLabel: "test-mcs", + util.MultiClusterServiceNamespaceLabel: "default", + }, + Annotations: map[string]string{ + util.EndpointSliceProvisionClusterAnnotation: "provider", + }, + }, + }, + &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "work2", + Namespace: "karmada-es-cluster2", + Labels: map[string]string{ + util.MultiClusterServiceNameLabel: "test-mcs", + util.MultiClusterServiceNamespaceLabel: "default", + }, + Annotations: map[string]string{ + util.EndpointSliceProvisionClusterAnnotation: "provider", + }, + }, + }, + }, + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + }, + Spec: networkingv1alpha1.MultiClusterServiceSpec{ + ConsumerClusters: []networkingv1alpha1.ClusterSelector{ + {Name: "cluster1"}, + }, + }, + }, + expectedDeletes: 1, + expectedError: false, + }, + { + name: "no orphan works", + existingObjs: []client.Object{ + &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "work1", + Namespace: "karmada-es-cluster1", + Labels: map[string]string{ + util.MultiClusterServiceNameLabel: "test-mcs", + util.MultiClusterServiceNamespaceLabel: "default", + }, + Annotations: map[string]string{ + util.EndpointSliceProvisionClusterAnnotation: "provider", + }, + }, + }, + }, + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + }, + Spec: networkingv1alpha1.MultiClusterServiceSpec{ + ConsumerClusters: []networkingv1alpha1.ClusterSelector{ + {Name: "cluster1"}, + }, + }, + }, + expectedDeletes: 0, + expectedError: false, + }, + { + name: "work without provision cluster annotation", + existingObjs: []client.Object{ + &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "work1", + Namespace: "karmada-es-cluster1", + Labels: map[string]string{ + util.MultiClusterServiceNameLabel: "test-mcs", + util.MultiClusterServiceNamespaceLabel: "default", + }, + }, + }, + }, + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + }, + Spec: networkingv1alpha1.MultiClusterServiceSpec{ + ConsumerClusters: []networkingv1alpha1.ClusterSelector{ + {Name: "cluster2"}, + }, + }, + }, + expectedDeletes: 0, + expectedError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + scheme := setupSchemeEndpointDispatch() + fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(tt.existingObjs...).Build() + c := &EndpointsliceDispatchController{ + Client: fakeClient, + } + err := c.cleanOrphanDispatchedEndpointSlice(context.Background(), tt.mcs) + if tt.expectedError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + // Check if the expected number of works were deleted + remainingWorks := &workv1alpha1.WorkList{} + err = fakeClient.List(context.Background(), remainingWorks, &client.ListOptions{}) + assert.NoError(t, err) + assert.Len(t, remainingWorks.Items, len(tt.existingObjs)-tt.expectedDeletes) + } + }) + } +} + +func TestEnsureEndpointSliceWork(t *testing.T) { + tests := []struct { + name string + mcs *networkingv1alpha1.MultiClusterService + work *workv1alpha1.Work + providerCluster string + consumerCluster string + expectedError bool + expectedWork *workv1alpha1.Work + }{ + { + name: "create new work", + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + }, + }, + work: &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-work", + Namespace: "karmada-es-provider", + }, + Spec: workv1alpha1.WorkSpec{ + Workload: workv1alpha1.WorkloadTemplate{ + Manifests: []workv1alpha1.Manifest{ + { + RawExtension: runtime.RawExtension{ + Raw: []byte(`{ + "apiVersion": "discovery.k8s.io/v1", + "kind": "EndpointSlice", + "metadata": { + "name": "test-eps" + }, + "endpoints": [ + { + "addresses": ["10.0.0.1"] + } + ], + "ports": [ + { + "port": 80 + } + ] + }`), + }, + }, + }, + }, + }, + }, + providerCluster: "provider", + consumerCluster: "consumer", + expectedError: false, + expectedWork: &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-work", + Namespace: "karmada-es-consumer", + Finalizers: []string{util.ExecutionControllerFinalizer}, + Annotations: map[string]string{ + util.EndpointSliceProvisionClusterAnnotation: "provider", + }, + Labels: map[string]string{ + util.MultiClusterServiceNameLabel: "test-mcs", + util.MultiClusterServiceNamespaceLabel: "default", + }, + }, + Spec: workv1alpha1.WorkSpec{ + Workload: workv1alpha1.WorkloadTemplate{ + Manifests: []workv1alpha1.Manifest{ + { + RawExtension: runtime.RawExtension{ + Raw: []byte(`{ + "apiVersion": "discovery.k8s.io/v1", + "kind": "EndpointSlice", + "metadata": { + "name": "provider-test-eps", + "labels": { + "kubernetes.io/service-name": "test-mcs", + "endpointslice.kubernetes.io/managed-by": "endpointslice-dispatch-controller.karmada.io", + "karmada.io/managed": "true" + }, + "annotations": { + "endpointslice.karmada.io/provision-cluster": "provider", + "work.karmada.io/name": "test-work", + "work.karmada.io/namespace": "karmada-es-consumer", + "resourcetemplate.karmada.io/uid": "" + } + }, + "endpoints": [ + { + "addresses": ["10.0.0.1"] + } + ], + "ports": [ + { + "port": 80 + } + ] + }`), + }, + }, + }, + }, + }, + }, + }, + { + name: "empty manifest", + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + }, + }, + work: &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-work", + Namespace: "karmada-es-provider", + }, + Spec: workv1alpha1.WorkSpec{ + Workload: workv1alpha1.WorkloadTemplate{ + Manifests: []workv1alpha1.Manifest{}, + }, + }, + }, + providerCluster: "provider", + consumerCluster: "consumer", + expectedError: false, + expectedWork: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + scheme := setupSchemeEndpointDispatch() + fakeClient := fake.NewClientBuilder().WithScheme(scheme).Build() + c := &EndpointsliceDispatchController{ + Client: fakeClient, + } + + err := c.ensureEndpointSliceWork(context.Background(), tt.mcs, tt.work, tt.providerCluster, tt.consumerCluster) + + if tt.expectedError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + + if tt.expectedWork != nil { + createdWork := &workv1alpha1.Work{} + err = fakeClient.Get(context.Background(), types.NamespacedName{ + Name: tt.expectedWork.Name, + Namespace: tt.expectedWork.Namespace, + }, createdWork) + assert.NoError(t, err) + + assert.Equal(t, tt.expectedWork.ObjectMeta.Name, createdWork.ObjectMeta.Name) + assert.Equal(t, tt.expectedWork.ObjectMeta.Namespace, createdWork.ObjectMeta.Namespace) + assert.Equal(t, tt.expectedWork.ObjectMeta.Finalizers, createdWork.ObjectMeta.Finalizers) + assert.Equal(t, tt.expectedWork.ObjectMeta.Annotations, createdWork.ObjectMeta.Annotations) + assert.Equal(t, tt.expectedWork.ObjectMeta.Labels, createdWork.ObjectMeta.Labels) + + // Comparing manifests + assert.Equal(t, len(tt.expectedWork.Spec.Workload.Manifests), len(createdWork.Spec.Workload.Manifests)) + if len(tt.expectedWork.Spec.Workload.Manifests) > 0 { + expectedManifest := &unstructured.Unstructured{} + createdManifest := &unstructured.Unstructured{} + + err = expectedManifest.UnmarshalJSON(tt.expectedWork.Spec.Workload.Manifests[0].Raw) + assert.NoError(t, err) + err = createdManifest.UnmarshalJSON(createdWork.Spec.Workload.Manifests[0].Raw) + assert.NoError(t, err) + + assert.Equal(t, expectedManifest.GetName(), createdManifest.GetName()) + assert.Equal(t, expectedManifest.GetLabels(), createdManifest.GetLabels()) + assert.Equal(t, expectedManifest.GetAnnotations(), createdManifest.GetAnnotations()) + } + } else { + workList := &workv1alpha1.WorkList{} + err = fakeClient.List(context.Background(), workList) + assert.NoError(t, err) + assert.Empty(t, workList.Items) + } + } + }) + } +} + +func TestCleanupEndpointSliceFromConsumerClusters(t *testing.T) { + tests := []struct { + name string + existingObjs []client.Object + inputWork *workv1alpha1.Work + expectedErr bool + }{ + { + name: "cleanup works in consumer clusters", + existingObjs: []client.Object{ + &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-work-1", + Namespace: "karmada-es-cluster1", + Annotations: map[string]string{ + util.EndpointSliceProvisionClusterAnnotation: "cluster1", + }, + }, + }, + &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-work-2", + Namespace: "karmada-es-cluster2", + Annotations: map[string]string{ + util.EndpointSliceProvisionClusterAnnotation: "cluster1", + }, + }, + }, + }, + inputWork: &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-work", + Namespace: "karmada-es-cluster1", + Finalizers: []string{ + util.MCSEndpointSliceDispatchControllerFinalizer, + }, + }, + }, + expectedErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + scheme := setupSchemeEndpointDispatch() + c := &EndpointsliceDispatchController{ + Client: fake.NewClientBuilder().WithScheme(scheme).WithObjects(append(tt.existingObjs, tt.inputWork)...).Build(), + } + + err := c.cleanupEndpointSliceFromConsumerClusters(context.Background(), tt.inputWork) + if tt.expectedErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + + // Check if works are deleted + for _, obj := range tt.existingObjs { + work := obj.(*workv1alpha1.Work) + err := c.Client.Get(context.Background(), types.NamespacedName{Namespace: work.Namespace, Name: work.Name}, &workv1alpha1.Work{}) + assert.True(t, client.IgnoreNotFound(err) == nil) + } + + // Check if the finalizer is removed + updatedWork := &workv1alpha1.Work{} + err := c.Client.Get(context.Background(), types.NamespacedName{Namespace: tt.inputWork.Namespace, Name: tt.inputWork.Name}, updatedWork) + assert.NoError(t, err) + assert.NotContains(t, updatedWork.Finalizers, util.MCSEndpointSliceDispatchControllerFinalizer) + } + }) + } +} + +// Helper Functions + +// Helper function to create and configure a runtime scheme for the controller +func setupSchemeEndpointDispatch() *runtime.Scheme { + scheme := runtime.NewScheme() + _ = networkingv1alpha1.Install(scheme) + _ = workv1alpha1.Install(scheme) + _ = clusterv1alpha1.Install(scheme) + _ = discoveryv1.AddToScheme(scheme) + return scheme +} + +// Helper function to create a new EndpointsliceDispatchController with a fake client for testing +func setupController(objs ...client.Object) *EndpointsliceDispatchController { + scheme := setupSchemeEndpointDispatch() + return &EndpointsliceDispatchController{ + Client: fake.NewClientBuilder().WithScheme(scheme).WithObjects(objs...).Build(), + EventRecorder: record.NewFakeRecorder(100), + } +} + +// Mock implementations + +// MockClient is a mock of client.Client interface +type MockClient struct { + mock.Mock +} + +func (m *MockClient) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { + args := m.Called(ctx, key, obj, opts) + return args.Error(0) +} + +func (m *MockClient) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + args := m.Called(ctx, list, opts) + return args.Error(0) +} + +func (m *MockClient) Create(ctx context.Context, obj client.Object, opts ...client.CreateOption) error { + args := m.Called(ctx, obj, opts) + return args.Error(0) +} + +func (m *MockClient) Delete(ctx context.Context, obj client.Object, opts ...client.DeleteOption) error { + args := m.Called(ctx, obj, opts) + return args.Error(0) +} + +func (m *MockClient) Update(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + args := m.Called(ctx, obj, opts) + return args.Error(0) +} + +func (m *MockClient) Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + args := m.Called(ctx, obj, patch, opts) + return args.Error(0) +} + +func (m *MockClient) DeleteAllOf(ctx context.Context, obj client.Object, opts ...client.DeleteAllOfOption) error { + args := m.Called(ctx, obj, opts) + return args.Error(0) +} + +func (m *MockClient) Status() client.StatusWriter { + args := m.Called() + return args.Get(0).(client.StatusWriter) +} + +func (m *MockClient) SubResource(subResource string) client.SubResourceClient { + args := m.Called(subResource) + return args.Get(0).(client.SubResourceClient) +} + +func (m *MockClient) Scheme() *runtime.Scheme { + args := m.Called() + return args.Get(0).(*runtime.Scheme) +} + +func (m *MockClient) RESTMapper() meta.RESTMapper { + args := m.Called() + return args.Get(0).(meta.RESTMapper) +} + +func (m *MockClient) GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) { + args := m.Called(obj) + return args.Get(0).(schema.GroupVersionKind), args.Error(1) +} + +func (m *MockClient) IsObjectNamespaced(obj runtime.Object) (bool, error) { + args := m.Called(obj) + return args.Bool(0), args.Error(1) +} + +// MockStatusWriter is a mock of client.StatusWriter interface +type MockStatusWriter struct { + mock.Mock +} + +func (m *MockStatusWriter) Create(ctx context.Context, obj client.Object, subResource client.Object, opts ...client.SubResourceCreateOption) error { + args := m.Called(ctx, obj, subResource, opts) + return args.Error(0) +} + +func (m *MockStatusWriter) Update(ctx context.Context, obj client.Object, opts ...client.SubResourceUpdateOption) error { + args := m.Called(ctx, obj, opts) + return args.Error(0) +} + +func (m *MockStatusWriter) Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.SubResourcePatchOption) error { + args := m.Called(ctx, obj, patch, opts) + return args.Error(0) +} + +// Custom fake client that can simulate list errors +type fakeClient struct { + client.Client + listError error +} + +func (f *fakeClient) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + if f.listError != nil { + return f.listError + } + return f.Client.List(ctx, list, opts...) +} diff --git a/pkg/controllers/multiclusterservice/mcs_controller.go b/pkg/controllers/multiclusterservice/mcs_controller.go index be70d9d653a5..7cdb5781dea7 100644 --- a/pkg/controllers/multiclusterservice/mcs_controller.go +++ b/pkg/controllers/multiclusterservice/mcs_controller.go @@ -54,7 +54,7 @@ import ( "github.com/karmada-io/karmada/pkg/util/names" ) -// ControllerName is the controller name that will be used when reporting events. +// ControllerName is the controller name that will be used when reporting events and metrics. const ControllerName = "multiclusterservice-controller" // MCSController is to sync MultiClusterService. @@ -309,7 +309,7 @@ func (c *MCSController) propagateMultiClusterService(ctx context.Context, mcs *n klog.Errorf("Failed to convert MultiClusterService(%s/%s) to unstructured object, err is %v", mcs.Namespace, mcs.Name, err) return err } - if err = helper.CreateOrUpdateWork(ctx, c, workMeta, mcsObj, nil); err != nil { + if err = helper.CreateOrUpdateWork(ctx, c, workMeta, mcsObj); err != nil { klog.Errorf("Failed to create or update MultiClusterService(%s/%s) work in the given member cluster %s, err is %v", mcs.Namespace, mcs.Name, clusterName, err) return err @@ -600,6 +600,7 @@ func (c *MCSController) SetupWithManager(mgr controllerruntime.Manager) error { }) return controllerruntime.NewControllerManagedBy(mgr). + Named(ControllerName). For(&networkingv1alpha1.MultiClusterService{}, builder.WithPredicates(mcsPredicateFunc)). Watches(&corev1.Service{}, handler.EnqueueRequestsFromMapFunc(svcMapFunc), builder.WithPredicates(svcPredicateFunc)). Watches(&clusterv1alpha1.Cluster{}, handler.EnqueueRequestsFromMapFunc(c.clusterMapFunc())). diff --git a/pkg/controllers/multiclusterservice/mcs_controller_test.go b/pkg/controllers/multiclusterservice/mcs_controller_test.go new file mode 100644 index 000000000000..738022942574 --- /dev/null +++ b/pkg/controllers/multiclusterservice/mcs_controller_test.go @@ -0,0 +1,1103 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package multiclusterservice + +import ( + "context" + "fmt" + "sort" + "testing" + + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1" + networkingv1alpha1 "github.com/karmada-io/karmada/pkg/apis/networking/v1alpha1" + policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" + workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1" + workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2" + "github.com/karmada-io/karmada/pkg/util" + "github.com/karmada-io/karmada/pkg/util/names" +) + +func TestHandleMultiClusterServiceDelete(t *testing.T) { + tests := []struct { + name string + mcs *networkingv1alpha1.MultiClusterService + existingService *corev1.Service + existingResourceBinding *workv1alpha2.ResourceBinding + expectedServiceLabels map[string]string + expectedServiceAnnotations map[string]string + expectedRBLabels map[string]string + expectedRBAnnotations map[string]string + }{ + { + name: "Delete MCS and clean up Service and ResourceBinding", + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + Finalizers: []string{util.MCSControllerFinalizer}, + }, + }, + existingService: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + Labels: map[string]string{ + util.ResourceTemplateClaimedByLabel: util.MultiClusterServiceKind, + networkingv1alpha1.MultiClusterServicePermanentIDLabel: "test-id", + }, + Annotations: map[string]string{ + networkingv1alpha1.MultiClusterServiceNameAnnotation: "test-mcs", + networkingv1alpha1.MultiClusterServiceNamespaceAnnotation: "default", + }, + }, + }, + existingResourceBinding: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "service-test-mcs", + Namespace: "default", + Labels: map[string]string{ + workv1alpha2.BindingManagedByLabel: util.MultiClusterServiceKind, + networkingv1alpha1.MultiClusterServicePermanentIDLabel: "test-id", + }, + Annotations: map[string]string{ + networkingv1alpha1.MultiClusterServiceNameAnnotation: "test-mcs", + networkingv1alpha1.MultiClusterServiceNamespaceAnnotation: "default", + }, + }, + }, + expectedServiceLabels: nil, + expectedServiceAnnotations: map[string]string{ + networkingv1alpha1.MultiClusterServiceNameAnnotation: "test-mcs", + networkingv1alpha1.MultiClusterServiceNamespaceAnnotation: "default", + }, + expectedRBLabels: map[string]string{ + workv1alpha2.BindingManagedByLabel: util.MultiClusterServiceKind, + networkingv1alpha1.MultiClusterServicePermanentIDLabel: "test-id", + }, + expectedRBAnnotations: map[string]string{ + networkingv1alpha1.MultiClusterServiceNameAnnotation: "test-mcs", + networkingv1alpha1.MultiClusterServiceNamespaceAnnotation: "default", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + controller := newFakeController(tt.mcs, tt.existingService, tt.existingResourceBinding) + + _, err := controller.handleMultiClusterServiceDelete(context.Background(), tt.mcs) + assert.NoError(t, err) + + updatedService := &corev1.Service{} + err = controller.Client.Get(context.Background(), types.NamespacedName{Namespace: tt.mcs.Namespace, Name: tt.mcs.Name}, updatedService) + assert.NoError(t, err) + + updatedRB := &workv1alpha2.ResourceBinding{} + err = controller.Client.Get(context.Background(), types.NamespacedName{Namespace: tt.mcs.Namespace, Name: "service-" + tt.mcs.Name}, updatedRB) + assert.NoError(t, err) + + assert.Equal(t, tt.expectedServiceLabels, updatedService.Labels) + assert.Equal(t, tt.expectedServiceAnnotations, updatedService.Annotations) + assert.Equal(t, tt.expectedRBLabels, updatedRB.Labels) + assert.Equal(t, tt.expectedRBAnnotations, updatedRB.Annotations) + + updatedMCS := &networkingv1alpha1.MultiClusterService{} + err = controller.Client.Get(context.Background(), types.NamespacedName{Namespace: tt.mcs.Namespace, Name: tt.mcs.Name}, updatedMCS) + assert.NoError(t, err) + assert.NotContains(t, updatedMCS.Finalizers, util.MCSControllerFinalizer) + }) + } +} + +func TestRetrieveMultiClusterService(t *testing.T) { + tests := []struct { + name string + mcs *networkingv1alpha1.MultiClusterService + existingWorks []*workv1alpha1.Work + providerClusters sets.Set[string] + clusters []*clusterv1alpha1.Cluster + expectedWorks int + }{ + { + name: "Remove work for non-provider cluster", + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + Labels: map[string]string{ + networkingv1alpha1.MultiClusterServicePermanentIDLabel: "test-id", + }, + }, + }, + existingWorks: []*workv1alpha1.Work{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: names.GenerateWorkName("MultiClusterService", "test-mcs", "default"), + Namespace: names.GenerateExecutionSpaceName("cluster1"), + Labels: map[string]string{ + networkingv1alpha1.MultiClusterServicePermanentIDLabel: "test-id", + }, + }, + Spec: workv1alpha1.WorkSpec{ + Workload: workv1alpha1.WorkloadTemplate{ + Manifests: []workv1alpha1.Manifest{ + { + RawExtension: runtime.RawExtension{Raw: []byte(`{"apiVersion":"networking.karmada.io/v1alpha1","kind":"MultiClusterService"}`)}, + }, + }, + }, + }, + }, + }, + providerClusters: sets.New("cluster2"), + clusters: []*clusterv1alpha1.Cluster{ + { + ObjectMeta: metav1.ObjectMeta{Name: "cluster1"}, + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + {Type: clusterv1alpha1.ClusterConditionReady, Status: metav1.ConditionTrue}, + }, + }, + }, + }, + expectedWorks: 0, + }, + { + name: "Keep work for provider cluster", + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + Labels: map[string]string{ + networkingv1alpha1.MultiClusterServicePermanentIDLabel: "test-id", + }, + }, + }, + existingWorks: []*workv1alpha1.Work{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: names.GenerateWorkName("MultiClusterService", "test-mcs", "default"), + Namespace: names.GenerateExecutionSpaceName("cluster1"), + Labels: map[string]string{ + networkingv1alpha1.MultiClusterServicePermanentIDLabel: "test-id", + }, + }, + Spec: workv1alpha1.WorkSpec{ + Workload: workv1alpha1.WorkloadTemplate{ + Manifests: []workv1alpha1.Manifest{ + { + RawExtension: runtime.RawExtension{Raw: []byte(`{"apiVersion":"networking.karmada.io/v1alpha1","kind":"MultiClusterService"}`)}, + }, + }, + }, + }, + }, + }, + providerClusters: sets.New("cluster1"), + clusters: []*clusterv1alpha1.Cluster{ + { + ObjectMeta: metav1.ObjectMeta{Name: "cluster1"}, + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + {Type: clusterv1alpha1.ClusterConditionReady, Status: metav1.ConditionTrue}, + }, + }, + }, + }, + expectedWorks: 1, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + objs := []runtime.Object{tt.mcs} + objs = append(objs, toRuntimeObjects(tt.existingWorks)...) + objs = append(objs, toRuntimeObjects(tt.clusters)...) + + controller := newFakeController(objs...) + + err := controller.retrieveMultiClusterService(context.Background(), tt.mcs, tt.providerClusters) + assert.NoError(t, err) + + workList := &workv1alpha1.WorkList{} + err = controller.Client.List(context.Background(), workList) + assert.NoError(t, err) + + assert.Equal(t, tt.expectedWorks, len(workList.Items)) + }) + } +} + +func TestPropagateMultiClusterService(t *testing.T) { + tests := []struct { + name string + mcs *networkingv1alpha1.MultiClusterService + providerClusters sets.Set[string] + clusters []*clusterv1alpha1.Cluster + expectedWorks int + }{ + { + name: "Propagate to one ready cluster", + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + Labels: map[string]string{ + networkingv1alpha1.MultiClusterServicePermanentIDLabel: "test-id", + }, + }, + }, + providerClusters: sets.New("cluster1"), + clusters: []*clusterv1alpha1.Cluster{ + { + ObjectMeta: metav1.ObjectMeta{Name: "cluster1"}, + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + {Type: clusterv1alpha1.ClusterConditionReady, Status: metav1.ConditionTrue}, + }, + APIEnablements: []clusterv1alpha1.APIEnablement{ + { + GroupVersion: "discovery.k8s.io/v1", + Resources: []clusterv1alpha1.APIResource{ + {Kind: "EndpointSlice"}, + }, + }, + }, + }, + }, + }, + expectedWorks: 1, + }, + { + name: "No propagation to unready cluster", + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + Labels: map[string]string{ + networkingv1alpha1.MultiClusterServicePermanentIDLabel: "test-id", + }, + }, + }, + providerClusters: sets.New("cluster1"), + clusters: []*clusterv1alpha1.Cluster{ + { + ObjectMeta: metav1.ObjectMeta{Name: "cluster1"}, + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + {Type: clusterv1alpha1.ClusterConditionReady, Status: metav1.ConditionFalse}, + }, + }, + }, + }, + expectedWorks: 0, + }, + { + name: "No propagation to cluster without EndpointSlice support", + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + Labels: map[string]string{ + networkingv1alpha1.MultiClusterServicePermanentIDLabel: "test-id", + }, + }, + }, + providerClusters: sets.New("cluster1"), + clusters: []*clusterv1alpha1.Cluster{ + { + ObjectMeta: metav1.ObjectMeta{Name: "cluster1"}, + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + {Type: clusterv1alpha1.ClusterConditionReady, Status: metav1.ConditionTrue}, + }, + APIEnablements: []clusterv1alpha1.APIEnablement{ + { + GroupVersion: "v1", + Resources: []clusterv1alpha1.APIResource{ + {Kind: "Pod"}, + }, + }, + }, + }, + }, + }, + expectedWorks: 0, + }, + { + name: "Propagate to multiple ready clusters", + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + Labels: map[string]string{ + networkingv1alpha1.MultiClusterServicePermanentIDLabel: "test-id", + }, + }, + }, + providerClusters: sets.New("cluster1", "cluster2"), + clusters: []*clusterv1alpha1.Cluster{ + { + ObjectMeta: metav1.ObjectMeta{Name: "cluster1"}, + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + {Type: clusterv1alpha1.ClusterConditionReady, Status: metav1.ConditionTrue}, + }, + APIEnablements: []clusterv1alpha1.APIEnablement{ + { + GroupVersion: "discovery.k8s.io/v1", + Resources: []clusterv1alpha1.APIResource{ + {Kind: "EndpointSlice"}, + }, + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "cluster2"}, + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + {Type: clusterv1alpha1.ClusterConditionReady, Status: metav1.ConditionTrue}, + }, + APIEnablements: []clusterv1alpha1.APIEnablement{ + { + GroupVersion: "discovery.k8s.io/v1", + Resources: []clusterv1alpha1.APIResource{ + {Kind: "EndpointSlice"}, + }, + }, + }, + }, + }, + }, + expectedWorks: 2, + }, + { + name: "Mixed cluster readiness and API support", + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + Labels: map[string]string{ + networkingv1alpha1.MultiClusterServicePermanentIDLabel: "test-id", + }, + }, + }, + providerClusters: sets.New("cluster1", "cluster2", "cluster3"), + clusters: []*clusterv1alpha1.Cluster{ + { + ObjectMeta: metav1.ObjectMeta{Name: "cluster1"}, + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + {Type: clusterv1alpha1.ClusterConditionReady, Status: metav1.ConditionTrue}, + }, + APIEnablements: []clusterv1alpha1.APIEnablement{ + { + GroupVersion: "discovery.k8s.io/v1", + Resources: []clusterv1alpha1.APIResource{ + {Kind: "EndpointSlice"}, + }, + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "cluster2"}, + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + {Type: clusterv1alpha1.ClusterConditionReady, Status: metav1.ConditionFalse}, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "cluster3"}, + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + {Type: clusterv1alpha1.ClusterConditionReady, Status: metav1.ConditionTrue}, + }, + APIEnablements: []clusterv1alpha1.APIEnablement{ + { + GroupVersion: "v1", + Resources: []clusterv1alpha1.APIResource{ + {Kind: "Pod"}, + }, + }, + }, + }, + }, + }, + expectedWorks: 1, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + objs := []runtime.Object{tt.mcs} + objs = append(objs, toRuntimeObjects(tt.clusters)...) + + controller := newFakeController(objs...) + + err := controller.propagateMultiClusterService(context.Background(), tt.mcs, tt.providerClusters) + assert.NoError(t, err) + + workList := &workv1alpha1.WorkList{} + err = controller.Client.List(context.Background(), workList) + assert.NoError(t, err) + assert.Equal(t, tt.expectedWorks, len(workList.Items)) + + if tt.expectedWorks > 0 { + for _, work := range workList.Items { + assert.Equal(t, names.GenerateWorkName(tt.mcs.Kind, tt.mcs.Name, tt.mcs.Namespace), work.Name) + clusterName, err := names.GetClusterName(work.Namespace) + assert.NoError(t, err) + assert.Contains(t, tt.providerClusters, clusterName) + assert.Equal(t, "test-id", work.Labels[networkingv1alpha1.MultiClusterServicePermanentIDLabel]) + } + } + }) + } +} + +func TestBuildResourceBinding(t *testing.T) { + tests := []struct { + name string + svc *corev1.Service + mcs *networkingv1alpha1.MultiClusterService + providerClusters sets.Set[string] + consumerClusters sets.Set[string] + }{ + { + name: "Build ResourceBinding with non-overlapping clusters", + svc: &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Service", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-service", + Namespace: "default", + UID: "test-uid", + ResourceVersion: "1234", + }, + }, + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + Labels: map[string]string{ + networkingv1alpha1.MultiClusterServicePermanentIDLabel: "test-id", + }, + }, + }, + providerClusters: sets.New("cluster1", "cluster2"), + consumerClusters: sets.New("cluster3", "cluster4"), + }, + { + name: "Build ResourceBinding with empty consumer clusters", + svc: &corev1.Service{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Service", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-service", + Namespace: "default", + UID: "test-uid", + ResourceVersion: "1234", + }, + }, + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + Labels: map[string]string{ + networkingv1alpha1.MultiClusterServicePermanentIDLabel: "test-id", + }, + }, + }, + providerClusters: sets.New("cluster1", "cluster2"), + consumerClusters: sets.New[string](), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + controller := newFakeController() + rb, err := controller.buildResourceBinding(tt.svc, tt.mcs, tt.providerClusters, tt.consumerClusters) + + assert.NoError(t, err) + assert.NotNil(t, rb) + + // ObjectMeta Check + assert.Equal(t, names.GenerateBindingName(tt.svc.Kind, tt.svc.Name), rb.Name) + assert.Equal(t, tt.svc.Namespace, rb.Namespace) + + // Annotations Check + assert.Equal(t, tt.mcs.Name, rb.Annotations[networkingv1alpha1.MultiClusterServiceNameAnnotation]) + assert.Equal(t, tt.mcs.Namespace, rb.Annotations[networkingv1alpha1.MultiClusterServiceNamespaceAnnotation]) + + // Labels Check + assert.Equal(t, util.MultiClusterServiceKind, rb.Labels[workv1alpha2.BindingManagedByLabel]) + assert.Equal(t, "test-id", rb.Labels[networkingv1alpha1.MultiClusterServicePermanentIDLabel]) + + // OwnerReferences Check + assert.Len(t, rb.OwnerReferences, 1) + assert.Equal(t, tt.svc.APIVersion, rb.OwnerReferences[0].APIVersion) + assert.Equal(t, tt.svc.Kind, rb.OwnerReferences[0].Kind) + assert.Equal(t, tt.svc.Name, rb.OwnerReferences[0].Name) + assert.Equal(t, tt.svc.UID, rb.OwnerReferences[0].UID) + + // Finalizers Check + assert.Contains(t, rb.Finalizers, util.BindingControllerFinalizer) + + // Spec Check + expectedClusters := tt.providerClusters.Union(tt.consumerClusters).UnsortedList() + actualClusters := rb.Spec.Placement.ClusterAffinity.ClusterNames + + // Sort both slices before comparison + sort.Strings(expectedClusters) + sort.Strings(actualClusters) + + assert.Equal(t, expectedClusters, actualClusters, "Cluster names should match regardless of order") + + // Resource reference Check + assert.Equal(t, tt.svc.APIVersion, rb.Spec.Resource.APIVersion) + assert.Equal(t, tt.svc.Kind, rb.Spec.Resource.Kind) + assert.Equal(t, tt.svc.Namespace, rb.Spec.Resource.Namespace) + assert.Equal(t, tt.svc.Name, rb.Spec.Resource.Name) + assert.Equal(t, tt.svc.UID, rb.Spec.Resource.UID) + assert.Equal(t, tt.svc.ResourceVersion, rb.Spec.Resource.ResourceVersion) + }) + } +} + +func TestClaimMultiClusterServiceForService(t *testing.T) { + tests := []struct { + name string + svc *corev1.Service + mcs *networkingv1alpha1.MultiClusterService + updateError bool + expectedError bool + }{ + { + name: "Claim service for MCS - basic case", + svc: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-service", + Namespace: "default", + }, + }, + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + Labels: map[string]string{ + networkingv1alpha1.MultiClusterServicePermanentIDLabel: "test-id", + }, + }, + }, + }, + { + name: "Claim service for MCS - with existing labels and annotations", + svc: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-service", + Namespace: "default", + Labels: map[string]string{ + "existing-label": "value", + policyv1alpha1.PropagationPolicyPermanentIDLabel: "should-be-removed", + }, + Annotations: map[string]string{ + "existing-annotation": "value", + policyv1alpha1.PropagationPolicyNameAnnotation: "should-be-removed", + }, + }, + }, + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + Labels: map[string]string{ + networkingv1alpha1.MultiClusterServicePermanentIDLabel: "test-id", + }, + }, + }, + }, + { + name: "Claim service for MCS - update error", + svc: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-service", + Namespace: "default", + }, + }, + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "default", + Labels: map[string]string{ + networkingv1alpha1.MultiClusterServicePermanentIDLabel: "test-id", + }, + }, + }, + updateError: true, + expectedError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + controller := newFakeController(tt.svc) + if tt.updateError { + controller.Client = newFakeClientWithUpdateError(tt.svc, true) + } + + err := controller.claimMultiClusterServiceForService(context.Background(), tt.svc, tt.mcs) + + if tt.expectedError { + assert.Error(t, err) + return + } + + assert.NoError(t, err) + + updatedSvc := &corev1.Service{} + err = controller.Client.Get(context.Background(), types.NamespacedName{Namespace: tt.svc.Namespace, Name: tt.svc.Name}, updatedSvc) + assert.NoError(t, err) + + // Added labels and annotations check + assert.Equal(t, util.MultiClusterServiceKind, updatedSvc.Labels[util.ResourceTemplateClaimedByLabel]) + assert.Equal(t, "test-id", updatedSvc.Labels[networkingv1alpha1.MultiClusterServicePermanentIDLabel]) + assert.Equal(t, tt.mcs.Name, updatedSvc.Annotations[networkingv1alpha1.MultiClusterServiceNameAnnotation]) + assert.Equal(t, tt.mcs.Namespace, updatedSvc.Annotations[networkingv1alpha1.MultiClusterServiceNamespaceAnnotation]) + + // Removed labels and annotations check + assert.NotContains(t, updatedSvc.Labels, policyv1alpha1.PropagationPolicyPermanentIDLabel) + assert.NotContains(t, updatedSvc.Labels, policyv1alpha1.ClusterPropagationPolicyPermanentIDLabel) + assert.NotContains(t, updatedSvc.Annotations, policyv1alpha1.PropagationPolicyNameAnnotation) + assert.NotContains(t, updatedSvc.Annotations, policyv1alpha1.PropagationPolicyNamespaceAnnotation) + assert.NotContains(t, updatedSvc.Annotations, policyv1alpha1.ClusterPropagationPolicyAnnotation) + + // Check existing labels and annotations are preserved + if tt.svc.Labels != nil { + assert.Contains(t, updatedSvc.Labels, "existing-label") + } + if tt.svc.Annotations != nil { + assert.Contains(t, updatedSvc.Annotations, "existing-annotation") + } + }) + } +} + +func TestIsClusterReady(t *testing.T) { + tests := []struct { + name string + clusterName string + clusterObj *clusterv1alpha1.Cluster + expectedReady bool + }{ + { + name: "cluster is ready", + clusterName: "ready-cluster", + clusterObj: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ready-cluster", + }, + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + { + Type: clusterv1alpha1.ClusterConditionReady, + Status: metav1.ConditionTrue, + }, + }, + }, + }, + expectedReady: true, + }, + { + name: "cluster is not ready", + clusterName: "not-ready-cluster", + clusterObj: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "not-ready-cluster", + }, + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + { + Type: clusterv1alpha1.ClusterConditionReady, + Status: metav1.ConditionFalse, + }, + }, + }, + }, + expectedReady: false, + }, + { + name: "cluster does not exist", + clusterName: "non-existent-cluster", + clusterObj: nil, + expectedReady: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var objects []runtime.Object + if tt.clusterObj != nil { + objects = append(objects, tt.clusterObj) + } + + controller := newFakeController(objects...) + + ready := controller.IsClusterReady(context.Background(), tt.clusterName) + assert.Equal(t, tt.expectedReady, ready, "IsClusterReady() result does not match expected") + }) + } +} + +func TestServiceHasCrossClusterMultiClusterService(t *testing.T) { + tests := []struct { + name string + svc *corev1.Service + mcs *networkingv1alpha1.MultiClusterService + expected bool + }{ + { + name: "Service has cross-cluster MCS", + svc: &corev1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-svc", Namespace: "default"}}, + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{Name: "test-svc", Namespace: "default"}, + Spec: networkingv1alpha1.MultiClusterServiceSpec{ + Types: []networkingv1alpha1.ExposureType{networkingv1alpha1.ExposureType("CrossCluster")}, + }, + }, + expected: true, + }, + { + name: "Service has no cross-cluster MCS", + svc: &corev1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-svc", Namespace: "default"}}, + mcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{Name: "test-svc", Namespace: "default"}, + Spec: networkingv1alpha1.MultiClusterServiceSpec{ + Types: []networkingv1alpha1.ExposureType{networkingv1alpha1.ExposureType("LocalCluster")}, + }, + }, + expected: false, + }, + { + name: "Service has no MCS", + svc: &corev1.Service{ObjectMeta: metav1.ObjectMeta{Name: "test-svc", Namespace: "default"}}, + mcs: nil, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + objs := []runtime.Object{tt.svc} + if tt.mcs != nil { + objs = append(objs, tt.mcs) + } + + controller := newFakeController(objs...) + + result := controller.serviceHasCrossClusterMultiClusterService(tt.svc) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestClusterMapFunc(t *testing.T) { + tests := []struct { + name string + object client.Object + mcsList []*networkingv1alpha1.MultiClusterService + expectedRequests []reconcile.Request + }{ + { + name: "Cluster matches MCS provider", + object: &clusterv1alpha1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "cluster1"}}, + mcsList: []*networkingv1alpha1.MultiClusterService{ + { + ObjectMeta: metav1.ObjectMeta{Name: "mcs1", Namespace: "default"}, + Spec: networkingv1alpha1.MultiClusterServiceSpec{ + Types: []networkingv1alpha1.ExposureType{networkingv1alpha1.ExposureType("CrossCluster")}, + ProviderClusters: []networkingv1alpha1.ClusterSelector{{Name: "cluster1"}}, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "mcs2", Namespace: "default"}, + Spec: networkingv1alpha1.MultiClusterServiceSpec{ + Types: []networkingv1alpha1.ExposureType{networkingv1alpha1.ExposureType("CrossCluster")}, + ProviderClusters: []networkingv1alpha1.ClusterSelector{{Name: "cluster2"}}, + }, + }, + }, + expectedRequests: []reconcile.Request{ + {NamespacedName: types.NamespacedName{Namespace: "default", Name: "mcs1"}}, + {NamespacedName: types.NamespacedName{Namespace: "default", Name: "mcs2"}}, + }, + }, + { + name: "Cluster doesn't match any MCS", + object: &clusterv1alpha1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "cluster3"}}, + mcsList: []*networkingv1alpha1.MultiClusterService{ + { + ObjectMeta: metav1.ObjectMeta{Name: "mcs1", Namespace: "default"}, + Spec: networkingv1alpha1.MultiClusterServiceSpec{ + Types: []networkingv1alpha1.ExposureType{networkingv1alpha1.ExposureType("CrossCluster")}, + ProviderClusters: []networkingv1alpha1.ClusterSelector{{Name: "cluster1"}}, + }, + }, + }, + expectedRequests: []reconcile.Request{ + {NamespacedName: types.NamespacedName{Namespace: "default", Name: "mcs1"}}, + }, + }, + { + name: "Empty MCS list", + object: &clusterv1alpha1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "cluster1"}}, + mcsList: []*networkingv1alpha1.MultiClusterService{}, + expectedRequests: []reconcile.Request{}, + }, + { + name: "Non-Cluster object", + object: &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod1"}}, + mcsList: []*networkingv1alpha1.MultiClusterService{}, + expectedRequests: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + objs := []runtime.Object{tt.object} + objs = append(objs, toRuntimeObjects(tt.mcsList)...) + + controller := newFakeController(objs...) + mapFunc := controller.clusterMapFunc() + + requests := mapFunc(context.Background(), tt.object) + + assert.Equal(t, len(tt.expectedRequests), len(requests), "Number of requests does not match expected") + assert.ElementsMatch(t, tt.expectedRequests, requests, "Requests do not match expected") + + if _, ok := tt.object.(*clusterv1alpha1.Cluster); ok { + for _, request := range requests { + found := false + for _, mcs := range tt.mcsList { + if mcs.Name == request.Name && mcs.Namespace == request.Namespace { + found = true + break + } + } + assert.True(t, found, "Generated request does not correspond to any MCS in the list") + } + } + }) + } +} + +func TestNeedSyncMultiClusterService(t *testing.T) { + tests := []struct { + name string + mcs *networkingv1alpha1.MultiClusterService + clusterName string + expectedNeed bool + expectedErr bool + }{ + { + name: "MCS with CrossCluster type and matching provider cluster", + mcs: &networkingv1alpha1.MultiClusterService{ + Spec: networkingv1alpha1.MultiClusterServiceSpec{ + Types: []networkingv1alpha1.ExposureType{networkingv1alpha1.ExposureType("CrossCluster")}, + ProviderClusters: []networkingv1alpha1.ClusterSelector{{Name: "cluster1"}}, + ConsumerClusters: []networkingv1alpha1.ClusterSelector{{Name: "cluster2"}}, + }, + }, + clusterName: "cluster1", + expectedNeed: true, + expectedErr: false, + }, + { + name: "MCS with CrossCluster type and matching consumer cluster", + mcs: &networkingv1alpha1.MultiClusterService{ + Spec: networkingv1alpha1.MultiClusterServiceSpec{ + Types: []networkingv1alpha1.ExposureType{networkingv1alpha1.ExposureType("CrossCluster")}, + ProviderClusters: []networkingv1alpha1.ClusterSelector{{Name: "cluster1"}}, + ConsumerClusters: []networkingv1alpha1.ClusterSelector{{Name: "cluster2"}}, + }, + }, + clusterName: "cluster2", + expectedNeed: true, + expectedErr: false, + }, + { + name: "MCS without CrossCluster type", + mcs: &networkingv1alpha1.MultiClusterService{ + Spec: networkingv1alpha1.MultiClusterServiceSpec{ + Types: []networkingv1alpha1.ExposureType{networkingv1alpha1.ExposureType("LocalCluster")}, + }, + }, + clusterName: "cluster1", + expectedNeed: false, + expectedErr: false, + }, + { + name: "MCS with empty ProviderClusters and ConsumerClusters", + mcs: &networkingv1alpha1.MultiClusterService{ + Spec: networkingv1alpha1.MultiClusterServiceSpec{ + Types: []networkingv1alpha1.ExposureType{networkingv1alpha1.ExposureType("CrossCluster")}, + }, + }, + clusterName: "cluster1", + expectedNeed: true, + expectedErr: false, + }, + { + name: "Cluster doesn't match ProviderClusters or ConsumerClusters", + mcs: &networkingv1alpha1.MultiClusterService{ + Spec: networkingv1alpha1.MultiClusterServiceSpec{ + Types: []networkingv1alpha1.ExposureType{networkingv1alpha1.ExposureType("CrossCluster")}, + ProviderClusters: []networkingv1alpha1.ClusterSelector{{Name: "cluster1"}}, + ConsumerClusters: []networkingv1alpha1.ClusterSelector{{Name: "cluster2"}}, + }, + }, + clusterName: "cluster3", + expectedNeed: false, + expectedErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + clusters := createClustersFromMCS(tt.mcs) + objs := append([]runtime.Object{tt.mcs}, toRuntimeObjects(clusters)...) + + controller := newFakeController(objs...) + need, err := controller.needSyncMultiClusterService(tt.mcs, tt.clusterName) + + assert.Equal(t, tt.expectedNeed, need, "Expected need %v, but got %v", tt.expectedNeed, need) + if tt.expectedErr { + assert.Error(t, err, "Expected an error, but got none") + } else { + assert.NoError(t, err, "Expected no error, but got %v", err) + } + }) + } +} + +// Helper Functions + +// Helper function to create fake Cluster objects based on the MCS spec +func createClustersFromMCS(mcs *networkingv1alpha1.MultiClusterService) []*clusterv1alpha1.Cluster { + var clusters []*clusterv1alpha1.Cluster + for _, pc := range mcs.Spec.ProviderClusters { + clusters = append(clusters, &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Name: pc.Name}, + }) + } + for _, cc := range mcs.Spec.ConsumerClusters { + clusters = append(clusters, &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Name: cc.Name}, + }) + } + return clusters +} + +// Helper function to set up a scheme with all necessary types +func setupScheme() *runtime.Scheme { + s := runtime.NewScheme() + _ = corev1.AddToScheme(s) + _ = networkingv1alpha1.Install(s) + _ = workv1alpha1.Install(s) + _ = workv1alpha2.Install(s) + _ = clusterv1alpha1.Install(s) + _ = scheme.AddToScheme(s) + return s +} + +// Helper function to create a new MCSController with a fake client +func newFakeController(objs ...runtime.Object) *MCSController { + s := setupScheme() + fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(objs...).Build() + return &MCSController{ + Client: fakeClient, + EventRecorder: record.NewFakeRecorder(100), + } +} + +// Helper function to convert a slice of objects to a slice of runtime.Object +func toRuntimeObjects(objs interface{}) []runtime.Object { + var result []runtime.Object + switch v := objs.(type) { + case []*workv1alpha1.Work: + for _, obj := range v { + result = append(result, obj) + } + case []*clusterv1alpha1.Cluster: + for _, obj := range v { + result = append(result, obj) + } + case []*networkingv1alpha1.MultiClusterService: + for _, obj := range v { + result = append(result, obj) + } + } + return result +} + +// Helper function to create a fake client that can simulate update errors +func newFakeClientWithUpdateError(svc *corev1.Service, shouldError bool) client.Client { + s := runtime.NewScheme() + _ = corev1.AddToScheme(s) + _ = networkingv1alpha1.Install(s) + + fakeClient := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(svc).Build() + + if shouldError { + return &errorInjectingClient{ + Client: fakeClient, + shouldError: shouldError, + } + } + + return fakeClient +} + +type errorInjectingClient struct { + client.Client + shouldError bool +} + +func (c *errorInjectingClient) Update(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + if c.shouldError { + return fmt.Errorf("simulated update error") + } + return c.Client.Update(ctx, obj, opts...) +} diff --git a/pkg/controllers/namespace/namespace_sync_controller.go b/pkg/controllers/namespace/namespace_sync_controller.go index 43ceeb809796..465f15c1e30b 100644 --- a/pkg/controllers/namespace/namespace_sync_controller.go +++ b/pkg/controllers/namespace/namespace_sync_controller.go @@ -48,7 +48,7 @@ import ( ) const ( - // ControllerName is the controller name that will be used when reporting events. + // ControllerName is the controller name that will be used when reporting events and metrics. ControllerName = "namespace-sync-controller" ) @@ -157,7 +157,7 @@ func (c *Controller) buildWorks(ctx context.Context, namespace *corev1.Namespace Annotations: annotations, } - if err = helper.CreateOrUpdateWork(ctx, c.Client, objectMeta, clonedNamespaced, nil); err != nil { + if err = helper.CreateOrUpdateWork(ctx, c.Client, objectMeta, clonedNamespaced); err != nil { ch <- fmt.Errorf("sync namespace(%s) to cluster(%s) failed due to: %v", clonedNamespaced.GetName(), cluster.GetName(), err) return } @@ -273,6 +273,7 @@ func (c *Controller) SetupWithManager(mgr controllerruntime.Manager) error { }) return controllerruntime.NewControllerManagedBy(mgr). + Named(ControllerName). For(&corev1.Namespace{}). Watches(&clusterv1alpha1.Cluster{}, handler.EnqueueRequestsFromMapFunc(clusterNamespaceFn), diff --git a/pkg/controllers/namespace/namespace_sync_controller_test.go b/pkg/controllers/namespace/namespace_sync_controller_test.go index 20b6fc6f1f5d..409d5fb56854 100644 --- a/pkg/controllers/namespace/namespace_sync_controller_test.go +++ b/pkg/controllers/namespace/namespace_sync_controller_test.go @@ -19,7 +19,6 @@ package namespace import ( "context" "regexp" - "sync" "testing" "time" @@ -287,7 +286,7 @@ func TestController_buildWorks(t *testing.T) { Plaintext: []policyv1alpha1.PlaintextOverrider{ { Path: "/metadata/labels/overridden", - Operator: "add", + Operator: policyv1alpha1.OverriderOpAdd, Value: apiextensionsv1.JSON{ Raw: []byte(`"true"`), }, @@ -349,10 +348,9 @@ func TestController_buildWorks(t *testing.T) { func TestController_SetupWithManager(t *testing.T) { tests := []struct { - name string - setupScheme func(*runtime.Scheme) - concurrentRuns int - expectError bool + name string + setupScheme func(*runtime.Scheme) + expectError bool }{ { name: "Successful setup", @@ -362,25 +360,12 @@ func TestController_SetupWithManager(t *testing.T) { _ = workv1alpha1.Install(scheme) _ = policyv1alpha1.Install(scheme) }, - concurrentRuns: 1, - expectError: false, + expectError: false, }, { - name: "Concurrent setup", - setupScheme: func(scheme *runtime.Scheme) { - _ = corev1.AddToScheme(scheme) - _ = clusterv1alpha1.Install(scheme) - _ = workv1alpha1.Install(scheme) - _ = policyv1alpha1.Install(scheme) - }, - concurrentRuns: 10, - expectError: false, - }, - { - name: "Setup with error", - setupScheme: func(_ *runtime.Scheme) {}, // Intentionally empty to trigger error - concurrentRuns: 1, - expectError: true, + name: "Setup with error", + setupScheme: func(_ *runtime.Scheme) {}, // Intentionally empty to trigger error + expectError: true, }, } @@ -395,28 +380,20 @@ func TestController_SetupWithManager(t *testing.T) { }) assert.NoError(t, err) - var wg sync.WaitGroup - for i := 0; i < tt.concurrentRuns; i++ { - wg.Add(1) - go func() { - defer wg.Done() - c := &Controller{ - Client: mgr.GetClient(), - EventRecorder: mgr.GetEventRecorderFor("test-controller"), - OverrideManager: overridemanager.New(mgr.GetClient(), mgr.GetEventRecorderFor("test-controller")), - } - err := c.SetupWithManager(mgr) - if tt.expectError { - assert.Error(t, err) - } else { - assert.NoError(t, err) - assert.NotNil(t, c.Client, "Controller's Client should not be nil") - assert.NotNil(t, c.EventRecorder, "Controller's EventRecorder should not be nil") - assert.NotNil(t, c.OverrideManager, "Controller's OverrideManager should not be nil") - } - }() + c := &Controller{ + Client: mgr.GetClient(), + EventRecorder: mgr.GetEventRecorderFor("test-controller"), + OverrideManager: overridemanager.New(mgr.GetClient(), mgr.GetEventRecorderFor("test-controller")), + } + err = c.SetupWithManager(mgr) + if tt.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.NotNil(t, c.Client, "Controller's Client should not be nil") + assert.NotNil(t, c.EventRecorder, "Controller's EventRecorder should not be nil") + assert.NotNil(t, c.OverrideManager, "Controller's OverrideManager should not be nil") } - wg.Wait() }) } } diff --git a/pkg/controllers/remediation/remedy_controller.go b/pkg/controllers/remediation/remedy_controller.go index baea6f26250b..c55ce4bd6d2e 100644 --- a/pkg/controllers/remediation/remedy_controller.go +++ b/pkg/controllers/remediation/remedy_controller.go @@ -34,7 +34,7 @@ import ( "github.com/karmada-io/karmada/pkg/util/helper" ) -// ControllerName is the controller name that will be used when reporting events. +// ControllerName is the controller name that will be used when reporting events and metrics. const ControllerName = "remedy-controller" // RemedyController is to sync Cluster resource, according to the cluster status diff --git a/pkg/controllers/remediation/remedy_controller_test.go b/pkg/controllers/remediation/remedy_controller_test.go new file mode 100644 index 000000000000..befcad2d4b0a --- /dev/null +++ b/pkg/controllers/remediation/remedy_controller_test.go @@ -0,0 +1,201 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remediation + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/rest" + controllerruntime "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1" + remedyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/remedy/v1alpha1" +) + +func TestReconcile(t *testing.T) { + tests := []struct { + name string + existingObjs []runtime.Object + inputCluster *clusterv1alpha1.Cluster + expectedError bool + expectedActions []string + expectedRequeue bool + }{ + { + name: "Cluster not found", + existingObjs: []runtime.Object{}, + inputCluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "non-existent-cluster", + }, + }, + expectedError: false, + expectedActions: nil, + expectedRequeue: false, + }, + { + name: "Cluster with no matching remedies", + existingObjs: []runtime.Object{ + &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + }, + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + { + Type: clusterv1alpha1.ClusterConditionReady, + Status: metav1.ConditionTrue, + }, + }, + }, + }, + }, + inputCluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + }, + }, + expectedError: false, + expectedActions: nil, + expectedRequeue: false, + }, + { + name: "Cluster being deleted", + existingObjs: []runtime.Object{ + &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deleting-cluster", + DeletionTimestamp: &metav1.Time{Time: time.Now()}, + Finalizers: []string{"test-finalizer"}, + }, + }, + }, + inputCluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deleting-cluster", + }, + }, + expectedError: false, + expectedActions: nil, + expectedRequeue: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + scheme := setupScheme() + fakeClient := createFakeClient(scheme, tt.existingObjs...) + controller := &RemedyController{Client: fakeClient} + + req := reconcile.Request{ + NamespacedName: types.NamespacedName{Name: tt.inputCluster.Name}, + } + result, err := controller.Reconcile(context.Background(), req) + + if tt.expectedError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + + // Assert requeue result + assert.Equal(t, tt.expectedRequeue, result.Requeue, "Unexpected requeue result") + + // Assert remedy actions if expected + if tt.expectedActions != nil { + updatedCluster := &clusterv1alpha1.Cluster{} + err := fakeClient.Get(context.Background(), req.NamespacedName, updatedCluster) + assert.NoError(t, err, "Failed to get updated cluster") + assert.Equal(t, tt.expectedActions, updatedCluster.Status.RemedyActions, "Unexpected remedy actions") + } + }) + } +} + +func TestSetupWithManager(t *testing.T) { + scheme := setupScheme() + tests := []struct { + name string + controllerSetup func() *RemedyController + }{ + { + name: "setup with valid client", + controllerSetup: func() *RemedyController { + return &RemedyController{Client: createFakeClient(scheme)} + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mgr, err := setupManager(scheme) + require.NoError(t, err) + + controller := tt.controllerSetup() + err = controller.SetupWithManager(mgr) + + assert.NoError(t, err) + }) + } +} + +func TestSetupWatches(t *testing.T) { + scheme := setupScheme() + + mgr, err := setupManager(scheme) + require.NoError(t, err) + + controller := &RemedyController{Client: mgr.GetClient()} + + remedyController, err := controllerruntime.NewControllerManagedBy(mgr). + For(&clusterv1alpha1.Cluster{}). + Build(controller) + require.NoError(t, err) + + err = controller.setupWatches(remedyController, mgr) + assert.NoError(t, err) +} + +// Helper Functions + +// Helper function to set up the scheme +func setupScheme() *runtime.Scheme { + scheme := runtime.NewScheme() + _ = clusterv1alpha1.Install(scheme) + _ = remedyv1alpha1.Install(scheme) + return scheme +} + +// Helper function to create a fake client +func createFakeClient(scheme *runtime.Scheme, objs ...runtime.Object) client.Client { + return fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(objs...).Build() +} + +// Helper function to set up the manager +func setupManager(scheme *runtime.Scheme) (controllerruntime.Manager, error) { + return controllerruntime.NewManager(&rest.Config{}, controllerruntime.Options{Scheme: scheme}) +} diff --git a/pkg/controllers/status/cluster_status_controller.go b/pkg/controllers/status/cluster_status_controller.go index fff3bfb0953e..e81b07d5fc5f 100644 --- a/pkg/controllers/status/cluster_status_controller.go +++ b/pkg/controllers/status/cluster_status_controller.go @@ -58,7 +58,7 @@ import ( ) const ( - // ControllerName is the controller name that will be used when reporting events. + // ControllerName is the controller name that will be used when reporting events and metrics. ControllerName = "cluster-status-controller" clusterReady = "ClusterReady" clusterHealthy = "cluster is healthy and ready to accept workloads" @@ -67,6 +67,10 @@ const ( clusterNotReachableReason = "ClusterNotReachable" clusterNotReachableMsg = "cluster is not reachable" statusCollectionFailed = "StatusCollectionFailed" + + apiEnablementsComplete = "Complete" + apiEnablementPartialAPIEnablements = "Partial" + apiEnablementEmptyAPIEnablements = "Empty" ) var ( @@ -167,6 +171,7 @@ func (c *ClusterStatusController) SetupWithManager(mgr controllerruntime.Manager failureThreshold: c.ClusterFailureThreshold.Duration, } return controllerruntime.NewControllerManagedBy(mgr). + Named(ControllerName). For(&clusterv1alpha1.Cluster{}, builder.WithPredicates(c.PredicateFunc)). WithOptions(controller.Options{ RateLimiter: ratelimiterflag.DefaultControllerRateLimiter(c.RateLimiterOptions), @@ -214,29 +219,42 @@ func (c *ClusterStatusController) syncClusterStatus(ctx context.Context, cluster // can be safely removed from current controller. c.initializeGenericInformerManagerForCluster(clusterClient) - err = c.setCurrentClusterStatus(clusterClient, cluster, ¤tClusterStatus) + var conditions []metav1.Condition + conditions, err = c.setCurrentClusterStatus(clusterClient, cluster, ¤tClusterStatus) if err != nil { return err } + conditions = append(conditions, *readyCondition) + return c.updateStatusIfNeeded(ctx, cluster, currentClusterStatus, conditions...) } return c.updateStatusIfNeeded(ctx, cluster, currentClusterStatus, *readyCondition) } -func (c *ClusterStatusController) setCurrentClusterStatus(clusterClient *util.ClusterClient, cluster *clusterv1alpha1.Cluster, currentClusterStatus *clusterv1alpha1.ClusterStatus) error { +func (c *ClusterStatusController) setCurrentClusterStatus(clusterClient *util.ClusterClient, cluster *clusterv1alpha1.Cluster, currentClusterStatus *clusterv1alpha1.ClusterStatus) ([]metav1.Condition, error) { + var conditions []metav1.Condition clusterVersion, err := getKubernetesVersion(clusterClient) if err != nil { klog.Errorf("Failed to get Kubernetes version for Cluster %s. Error: %v.", cluster.GetName(), err) } currentClusterStatus.KubernetesVersion = clusterVersion + var apiEnablementCondition metav1.Condition // get the list of APIs installed in the member cluster apiEnables, err := getAPIEnablements(clusterClient) if len(apiEnables) == 0 { + apiEnablementCondition = util.NewCondition(clusterv1alpha1.ClusterConditionCompleteAPIEnablements, + apiEnablementEmptyAPIEnablements, "collected empty APIEnablements from the cluster", metav1.ConditionFalse) klog.Errorf("Failed to get any APIs installed in Cluster %s. Error: %v.", cluster.GetName(), err) } else if err != nil { + apiEnablementCondition = util.NewCondition(clusterv1alpha1.ClusterConditionCompleteAPIEnablements, + apiEnablementPartialAPIEnablements, fmt.Sprintf("might collect partial APIEnablements(%d) from the cluster", len(apiEnables)), metav1.ConditionFalse) klog.Warningf("Maybe get partial(%d) APIs installed in Cluster %s. Error: %v.", len(apiEnables), cluster.GetName(), err) + } else { + apiEnablementCondition = util.NewCondition(clusterv1alpha1.ClusterConditionCompleteAPIEnablements, + apiEnablementsComplete, "collected complete APIEnablements from the cluster", metav1.ConditionTrue) } + conditions = append(conditions, apiEnablementCondition) currentClusterStatus.APIEnablements = apiEnables if c.EnableClusterResourceModeling { @@ -246,7 +264,7 @@ func (c *ClusterStatusController) setCurrentClusterStatus(clusterClient *util.Cl klog.Errorf("Failed to get or create informer for Cluster %s. Error: %v.", cluster.GetName(), err) // in large-scale clusters, the timeout may occur. // if clusterInformerManager fails to be built, should be returned, otherwise, it may cause a nil pointer - return err + return nil, err } nodes, err := listNodes(clusterInformerManager) if err != nil { @@ -264,7 +282,7 @@ func (c *ClusterStatusController) setCurrentClusterStatus(clusterClient *util.Cl currentClusterStatus.ResourceSummary.AllocatableModelings = getAllocatableModelings(cluster, nodes, pods) } } - return nil + return conditions, nil } func setStatusCollectionFailedCondition(ctx context.Context, c client.Client, cluster *clusterv1alpha1.Cluster, message string) error { @@ -434,9 +452,9 @@ func getClusterHealthStatus(clusterClient *util.ClusterClient) (online, healthy return true, true } -func healthEndpointCheck(client *clientset.Clientset, path string) (int, error) { +func healthEndpointCheck(client clientset.Interface, path string) (int, error) { var healthStatus int - resp := client.DiscoveryClient.RESTClient().Get().AbsPath(path).Do(context.TODO()).StatusCode(&healthStatus) + resp := client.Discovery().RESTClient().Get().AbsPath(path).Do(context.TODO()).StatusCode(&healthStatus) return healthStatus, resp.Error() } diff --git a/pkg/controllers/status/crb_status_controller.go b/pkg/controllers/status/crb_status_controller.go index 79a11fb76cbc..e546150eb058 100644 --- a/pkg/controllers/status/crb_status_controller.go +++ b/pkg/controllers/status/crb_status_controller.go @@ -39,7 +39,7 @@ import ( "github.com/karmada-io/karmada/pkg/util/helper" ) -// CRBStatusControllerName is the controller name that will be used when reporting events. +// CRBStatusControllerName is the controller name that will be used when reporting events and metrics. const CRBStatusControllerName = "cluster-resource-binding-status-controller" // CRBStatusController is to sync status of ClusterResourceBinding @@ -101,7 +101,8 @@ func (c *CRBStatusController) SetupWithManager(mgr controllerruntime.Manager) er return requests }) - return controllerruntime.NewControllerManagedBy(mgr).Named("clusterResourceBinding_status_controller"). + return controllerruntime.NewControllerManagedBy(mgr). + Named(CRBStatusControllerName). For(&workv1alpha2.ClusterResourceBinding{}, bindingPredicateFn). Watches(&workv1alpha1.Work{}, handler.EnqueueRequestsFromMapFunc(workMapFunc), workPredicateFn). WithOptions(controller.Options{RateLimiter: ratelimiterflag.DefaultControllerRateLimiter(c.RateLimiterOptions)}). diff --git a/pkg/controllers/status/crb_status_controller_test.go b/pkg/controllers/status/crb_status_controller_test.go index 9c44efa45364..1bf0b33dfa4e 100644 --- a/pkg/controllers/status/crb_status_controller_test.go +++ b/pkg/controllers/status/crb_status_controller_test.go @@ -43,9 +43,9 @@ func generateCRBStatusController() *CRBStatusController { stopCh := make(chan struct{}) defer close(stopCh) dynamicClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme, - &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod1", Namespace: "default"}}) + &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "ns1"}}) m := genericmanager.NewSingleClusterInformerManager(dynamicClient, 0, stopCh) - m.Lister(corev1.SchemeGroupVersion.WithResource("pods")) + m.Lister(corev1.SchemeGroupVersion.WithResource("namespaces")) m.Start() m.WaitForCacheSync() @@ -55,7 +55,7 @@ func generateCRBStatusController() *CRBStatusController { InformerManager: m, RESTMapper: func() meta.RESTMapper { m := meta.NewDefaultRESTMapper([]schema.GroupVersion{corev1.SchemeGroupVersion}) - m.Add(corev1.SchemeGroupVersion.WithKind("Pod"), meta.RESTScopeNamespace) + m.Add(corev1.SchemeGroupVersion.WithKind("Namespace"), meta.RESTScopeNamespace) return m }(), EventRecorder: &record.FakeRecorder{}, @@ -75,15 +75,13 @@ func TestCRBStatusController_Reconcile(t *testing.T) { name: "failed in syncBindingStatus", binding: &workv1alpha2.ClusterResourceBinding{ ObjectMeta: metav1.ObjectMeta{ - Name: "binding", - Namespace: "default", + Name: "binding", }, Spec: workv1alpha2.ResourceBindingSpec{ Resource: workv1alpha2.ObjectReference{ APIVersion: "v1", - Kind: "Pod", - Namespace: "default", - Name: "pod", + Kind: "Namespace", + Name: "ns", }, }, }, @@ -99,8 +97,7 @@ func TestCRBStatusController_Reconcile(t *testing.T) { name: "failed in syncBindingStatus", binding: &workv1alpha2.ClusterResourceBinding{ ObjectMeta: metav1.ObjectMeta{ - Name: "binding", - Namespace: "default", + Name: "binding", // finalizers field is required when deletionTimestamp is defined, otherwise will encounter the // error: `refusing to create obj binding with metadata.deletionTimestamp but no finalizers`. Finalizers: []string{"test"}, @@ -109,9 +106,8 @@ func TestCRBStatusController_Reconcile(t *testing.T) { Spec: workv1alpha2.ResourceBindingSpec{ Resource: workv1alpha2.ObjectReference{ APIVersion: "v1", - Kind: "Pod", - Namespace: "default", - Name: "pod", + Kind: "Namespace", + Name: "ns", }, }, }, @@ -128,8 +124,7 @@ func TestCRBStatusController_Reconcile(t *testing.T) { // Prepare req req := controllerruntime.Request{ NamespacedName: types.NamespacedName{ - Name: "binding", - Namespace: "default", + Name: "binding", }, } @@ -151,42 +146,40 @@ func TestCRBStatusController_Reconcile(t *testing.T) { func TestCRBStatusController_syncBindingStatus(t *testing.T) { tests := []struct { - name string - resource workv1alpha2.ObjectReference - podNameInDynamicClient string - resourceExistInClient bool - expectedError bool + name string + resource workv1alpha2.ObjectReference + nsNameInDynamicClient string + resourceExistInClient bool + expectedError bool }{ { name: "failed in FetchResourceTemplate, err is NotFound", resource: workv1alpha2.ObjectReference{ APIVersion: "v1", - Kind: "Pod", - Namespace: "default", - Name: "pod", + Kind: "Namespace", + Name: "ns", }, - podNameInDynamicClient: "pod1", - resourceExistInClient: true, - expectedError: false, + nsNameInDynamicClient: "ns1", + resourceExistInClient: true, + expectedError: false, }, { - name: "failed in FetchResourceTemplate, err is not NotFound", - resource: workv1alpha2.ObjectReference{}, - podNameInDynamicClient: "pod", - resourceExistInClient: true, - expectedError: true, + name: "failed in FetchResourceTemplate, err is not NotFound", + resource: workv1alpha2.ObjectReference{}, + nsNameInDynamicClient: "ns", + resourceExistInClient: true, + expectedError: true, }, { name: "failed in AggregateClusterResourceBindingWorkStatus", resource: workv1alpha2.ObjectReference{ APIVersion: "v1", - Kind: "Pod", - Namespace: "default", - Name: "pod", + Kind: "Namespace", + Name: "ns", }, - podNameInDynamicClient: "pod", - resourceExistInClient: false, - expectedError: true, + nsNameInDynamicClient: "ns", + resourceExistInClient: false, + expectedError: true, }, } @@ -194,13 +187,12 @@ func TestCRBStatusController_syncBindingStatus(t *testing.T) { t.Run(tt.name, func(t *testing.T) { c := generateCRBStatusController() c.DynamicClient = dynamicfake.NewSimpleDynamicClient(scheme.Scheme, - &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: tt.podNameInDynamicClient, Namespace: "default"}}) + &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: tt.nsNameInDynamicClient}}) c.ResourceInterpreter = FakeResourceInterpreter{DefaultInterpreter: native.NewDefaultInterpreter()} binding := &workv1alpha2.ClusterResourceBinding{ ObjectMeta: metav1.ObjectMeta{ - Name: "binding", - Namespace: "default", + Name: "binding", }, Spec: workv1alpha2.ResourceBindingSpec{ Resource: tt.resource, diff --git a/pkg/controllers/status/rb_status_controller.go b/pkg/controllers/status/rb_status_controller.go index d5f1b8989732..e1891d8bd7ed 100644 --- a/pkg/controllers/status/rb_status_controller.go +++ b/pkg/controllers/status/rb_status_controller.go @@ -39,7 +39,7 @@ import ( "github.com/karmada-io/karmada/pkg/util/helper" ) -// RBStatusControllerName is the controller name that will be used when reporting events. +// RBStatusControllerName is the controller name that will be used when reporting events and metrics. const RBStatusControllerName = "resource-binding-status-controller" // RBStatusController is to sync status of ResourceBinding @@ -103,7 +103,8 @@ func (c *RBStatusController) SetupWithManager(mgr controllerruntime.Manager) err return requests }) - return controllerruntime.NewControllerManagedBy(mgr).Named("resourceBinding_status_controller"). + return controllerruntime.NewControllerManagedBy(mgr). + Named(RBStatusControllerName). For(&workv1alpha2.ResourceBinding{}, bindingPredicateFn). Watches(&workv1alpha1.Work{}, handler.EnqueueRequestsFromMapFunc(workMapFunc), workPredicateFn). WithOptions(controller.Options{RateLimiter: ratelimiterflag.DefaultControllerRateLimiter(c.RateLimiterOptions)}). diff --git a/pkg/controllers/status/work_status_controller.go b/pkg/controllers/status/work_status_controller.go index 90a21237ead9..7da9ddec580d 100644 --- a/pkg/controllers/status/work_status_controller.go +++ b/pkg/controllers/status/work_status_controller.go @@ -38,6 +38,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/predicate" clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1" + configv1alpha1 "github.com/karmada-io/karmada/pkg/apis/config/v1alpha1" workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1" workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2" "github.com/karmada-io/karmada/pkg/events" @@ -54,7 +55,7 @@ import ( "github.com/karmada-io/karmada/pkg/util/restmapper" ) -// WorkStatusControllerName is the controller name that will be used when reporting events. +// WorkStatusControllerName is the controller name that will be used when reporting events and metrics. const WorkStatusControllerName = "work-status-controller" // WorkStatusController is to sync status of Work. @@ -169,7 +170,7 @@ func generateKey(obj interface{}) (util.QueueKey, error) { func getClusterNameFromAnnotation(resource *unstructured.Unstructured) (string, error) { workNamespace, exist := resource.GetAnnotations()[workv1alpha2.WorkNamespaceAnnotation] if !exist { - klog.V(4).Infof("Ignore resource(kind=%s, %s/%s) which is not managed by Karmada.", resource.GetKind(), resource.GetNamespace(), resource.GetName()) + klog.V(5).Infof("Ignore resource(kind=%s, %s/%s) which is not managed by Karmada.", resource.GetKind(), resource.GetNamespace(), resource.GetName()) return "", nil } @@ -365,20 +366,7 @@ func (c *WorkStatusController) reflectStatus(ctx context.Context, work *workv1al } c.EventRecorder.Eventf(work, corev1.EventTypeNormal, events.EventReasonReflectStatusSucceed, "Reflect status for object(%s/%s/%s) succeed.", clusterObj.GetKind(), clusterObj.GetNamespace(), clusterObj.GetName()) - var resourceHealth workv1alpha1.ResourceHealth - // When an unregistered resource kind is requested with the ResourceInterpreter, - // the interpreter will return an error, we treat its health status as Unknown. - healthy, err := c.ResourceInterpreter.InterpretHealth(clusterObj) - if err != nil { - resourceHealth = workv1alpha1.ResourceUnknown - c.EventRecorder.Eventf(work, corev1.EventTypeWarning, events.EventReasonInterpretHealthFailed, "Interpret health of object(%s/%s/%s) failed, err: %s.", clusterObj.GetKind(), clusterObj.GetNamespace(), clusterObj.GetName(), err.Error()) - } else if healthy { - resourceHealth = workv1alpha1.ResourceHealthy - c.EventRecorder.Eventf(work, corev1.EventTypeNormal, events.EventReasonInterpretHealthSucceed, "Interpret health of object(%s/%s/%s) as healthy.", clusterObj.GetKind(), clusterObj.GetNamespace(), clusterObj.GetName()) - } else { - resourceHealth = workv1alpha1.ResourceUnhealthy - c.EventRecorder.Eventf(work, corev1.EventTypeNormal, events.EventReasonInterpretHealthSucceed, "Interpret health of object(%s/%s/%s) as unhealthy.", clusterObj.GetKind(), clusterObj.GetNamespace(), clusterObj.GetName()) - } + resourceHealth := c.interpretHealth(clusterObj, work) identifier, err := c.buildStatusIdentifier(work, clusterObj) if err != nil { @@ -400,6 +388,28 @@ func (c *WorkStatusController) reflectStatus(ctx context.Context, work *workv1al }) } +func (c *WorkStatusController) interpretHealth(clusterObj *unstructured.Unstructured, work *workv1alpha1.Work) workv1alpha1.ResourceHealth { + // For kind that doesn't have health check, we treat it as healthy. + if !c.ResourceInterpreter.HookEnabled(clusterObj.GroupVersionKind(), configv1alpha1.InterpreterOperationInterpretHealth) { + klog.V(5).Infof("skipping health assessment for object: %v %s/%s as missing customization and will treat it as healthy.", clusterObj.GroupVersionKind(), clusterObj.GetNamespace(), clusterObj.GetName()) + return workv1alpha1.ResourceHealthy + } + + var resourceHealth workv1alpha1.ResourceHealth + healthy, err := c.ResourceInterpreter.InterpretHealth(clusterObj) + if err != nil { + resourceHealth = workv1alpha1.ResourceUnknown + c.EventRecorder.Eventf(work, corev1.EventTypeWarning, events.EventReasonInterpretHealthFailed, "Interpret health of object(%s/%s/%s) failed, err: %s.", clusterObj.GetKind(), clusterObj.GetNamespace(), clusterObj.GetName(), err.Error()) + } else if healthy { + resourceHealth = workv1alpha1.ResourceHealthy + c.EventRecorder.Eventf(work, corev1.EventTypeNormal, events.EventReasonInterpretHealthSucceed, "Interpret health of object(%s/%s/%s) as healthy.", clusterObj.GetKind(), clusterObj.GetNamespace(), clusterObj.GetName()) + } else { + resourceHealth = workv1alpha1.ResourceUnhealthy + c.EventRecorder.Eventf(work, corev1.EventTypeNormal, events.EventReasonInterpretHealthSucceed, "Interpret health of object(%s/%s/%s) as unhealthy.", clusterObj.GetKind(), clusterObj.GetNamespace(), clusterObj.GetName()) + } + return resourceHealth +} + func (c *WorkStatusController) buildStatusIdentifier(work *workv1alpha1.Work, clusterObj *unstructured.Unstructured) (*workv1alpha1.ResourceIdentifier, error) { manifestRef := helper.ManifestReference{APIVersion: clusterObj.GetAPIVersion(), Kind: clusterObj.GetKind(), Namespace: clusterObj.GetNamespace(), Name: clusterObj.GetName()} @@ -536,6 +546,7 @@ func (c *WorkStatusController) getSingleClusterManager(cluster *clusterv1alpha1. // SetupWithManager creates a controller and register to controller manager. func (c *WorkStatusController) SetupWithManager(mgr controllerruntime.Manager) error { return controllerruntime.NewControllerManagedBy(mgr). + Named(WorkStatusControllerName). For(&workv1alpha1.Work{}, builder.WithPredicates(c.PredicateFunc)). WithOptions(controller.Options{ RateLimiter: ratelimiterflag.DefaultControllerRateLimiter(c.RateLimiterOptions), diff --git a/pkg/controllers/status/work_status_controller_test.go b/pkg/controllers/status/work_status_controller_test.go index 11563539d767..24d2f5f29d6b 100644 --- a/pkg/controllers/status/work_status_controller_test.go +++ b/pkg/controllers/status/work_status_controller_test.go @@ -25,22 +25,26 @@ import ( "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/uuid" dynamicfake "k8s.io/client-go/dynamic/fake" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/record" "k8s.io/utils/ptr" controllerruntime "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1" workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1" workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2" + "github.com/karmada-io/karmada/pkg/events" "github.com/karmada-io/karmada/pkg/resourceinterpreter/default/native" "github.com/karmada-io/karmada/pkg/sharedcli/ratelimiterflag" "github.com/karmada-io/karmada/pkg/util" @@ -723,6 +727,9 @@ func newWorkStatusController(cluster *clusterv1alpha1.Cluster, dynamicClientSets m.Add(corev1.SchemeGroupVersion.WithKind("Pod"), meta.RESTScopeNamespace) return m }(), + ResourceInterpreter: FakeResourceInterpreter{ + DefaultInterpreter: native.NewDefaultInterpreter(), + }, } if len(dynamicClientSets) > 0 { @@ -1021,3 +1028,47 @@ func TestWorkStatusController_registerInformersAndStart(t *testing.T) { assert.NotEmpty(t, err) }) } + +func TestWorkStatusController_interpretHealth(t *testing.T) { + tests := []struct { + name string + clusterObj client.Object + expectedResourceHealth workv1alpha1.ResourceHealth + expectedEventReason string + }{ + { + name: "deployment without status is interpreted as unhealthy", + clusterObj: testhelper.NewDeployment("foo", "bar"), + expectedResourceHealth: workv1alpha1.ResourceUnhealthy, + expectedEventReason: events.EventReasonInterpretHealthSucceed, + }, + { + name: "cluster role without status is interpreted as healthy", + clusterObj: testhelper.NewClusterRole("foo", []rbacv1.PolicyRule{}), + expectedResourceHealth: workv1alpha1.ResourceHealthy, + }, + } + + cluster := newCluster("cluster", clusterv1alpha1.ClusterConditionReady, metav1.ConditionTrue) + c := newWorkStatusController(cluster) + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + work := testhelper.NewWork(tt.clusterObj.GetName(), tt.clusterObj.GetNamespace(), string(uuid.NewUUID()), []byte{}) + obj, err := helper.ToUnstructured(tt.clusterObj) + assert.NoError(t, err) + + resourceHealth := c.interpretHealth(obj, work) + assert.Equalf(t, tt.expectedResourceHealth, resourceHealth, "expected resource health %v, got %v", tt.expectedResourceHealth, resourceHealth) + + eventRecorder := c.EventRecorder.(*record.FakeRecorder) + if tt.expectedEventReason == "" { + assert.Empty(t, eventRecorder.Events, "expected no events to get recorded") + } else { + assert.Equal(t, 1, len(eventRecorder.Events)) + e := <-eventRecorder.Events + assert.Containsf(t, e, tt.expectedEventReason, "expected event reason %v, got %v", tt.expectedEventReason, e) + } + }) + } +} diff --git a/pkg/controllers/unifiedauth/unified_auth_controller.go b/pkg/controllers/unifiedauth/unified_auth_controller.go index e645da0aeb25..ff0886031611 100644 --- a/pkg/controllers/unifiedauth/unified_auth_controller.go +++ b/pkg/controllers/unifiedauth/unified_auth_controller.go @@ -44,7 +44,7 @@ import ( ) const ( - // ControllerName is the controller name that will be used when reporting events. + // ControllerName is the controller name that will be used when reporting events and metrics. ControllerName = "unified-auth-controller" karmadaImpersonatorName = "karmada-impersonator" @@ -237,7 +237,7 @@ func (c *Controller) buildWorks(ctx context.Context, cluster *clusterv1alpha1.Cl }, } - if err := helper.CreateOrUpdateWork(ctx, c.Client, objectMeta, obj, nil); err != nil { + if err := helper.CreateOrUpdateWork(ctx, c.Client, objectMeta, obj); err != nil { return err } @@ -255,6 +255,7 @@ func (c *Controller) SetupWithManager(mgr controllerruntime.Manager) error { } return controllerruntime.NewControllerManagedBy(mgr). + Named(ControllerName). For(&clusterv1alpha1.Cluster{}, builder.WithPredicates(clusterPredicateFunc)). Watches(&rbacv1.ClusterRole{}, handler.EnqueueRequestsFromMapFunc(c.newClusterRoleMapFunc())). Watches(&rbacv1.ClusterRoleBinding{}, handler.EnqueueRequestsFromMapFunc(c.newClusterRoleBindingMapFunc())). diff --git a/pkg/controllers/unifiedauth/unified_auth_controller_test.go b/pkg/controllers/unifiedauth/unified_auth_controller_test.go index 7122af35c8ca..8b47dfd35ba0 100644 --- a/pkg/controllers/unifiedauth/unified_auth_controller_test.go +++ b/pkg/controllers/unifiedauth/unified_auth_controller_test.go @@ -18,18 +18,23 @@ package unifiedauth import ( "context" + "fmt" "reflect" "testing" + "github.com/stretchr/testify/assert" rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/reconcile" clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1" + workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1" "github.com/karmada-io/karmada/pkg/util" "github.com/karmada-io/karmada/pkg/util/gclient" ) @@ -122,25 +127,16 @@ func TestController_generateRequestsFromClusterRole(t *testing.T) { cluster2 := &clusterv1alpha1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "member2"}} cluster3 := &clusterv1alpha1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "member3"}} - type fields struct { - Client client.Client - EventRecorder record.EventRecorder - } type args struct { clusterRole *rbacv1.ClusterRole } tests := []struct { - name string - fields fields - args args - want []reconcile.Request + name string + args args + want []reconcile.Request }{ { name: "specify resource names of cluster.karmada.io with cluster/proxy resource", - fields: fields{ - Client: fakeclient.NewClientBuilder().WithScheme(gclient.NewSchema()).WithObjects(cluster1, cluster2, cluster3).Build(), - EventRecorder: record.NewFakeRecorder(1024), - }, args: args{ clusterRole: &rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{Name: "cluster-proxy"}, @@ -158,10 +154,6 @@ func TestController_generateRequestsFromClusterRole(t *testing.T) { }, { name: "specify cluster.karmada.io with cluster/proxy resource", - fields: fields{ - Client: fakeclient.NewClientBuilder().WithScheme(gclient.NewSchema()).WithObjects(cluster1, cluster2, cluster3).Build(), - EventRecorder: record.NewFakeRecorder(1024), - }, args: args{ clusterRole: &rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{Name: "cluster-proxy"}, @@ -179,10 +171,6 @@ func TestController_generateRequestsFromClusterRole(t *testing.T) { }, { name: "specify cluster.karmada.io with wildcard resource", - fields: fields{ - Client: fakeclient.NewClientBuilder().WithScheme(gclient.NewSchema()).WithObjects(cluster1, cluster2, cluster3).Build(), - EventRecorder: record.NewFakeRecorder(1024), - }, args: args{ clusterRole: &rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{Name: "cluster-proxy"}, @@ -200,10 +188,6 @@ func TestController_generateRequestsFromClusterRole(t *testing.T) { }, { name: "specify search.karmada.io with proxying/proxy resource", - fields: fields{ - Client: fakeclient.NewClientBuilder().WithScheme(gclient.NewSchema()).WithObjects(cluster1, cluster2, cluster3).Build(), - EventRecorder: record.NewFakeRecorder(1024), - }, args: args{ clusterRole: &rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{Name: "search-proxy"}, @@ -221,10 +205,6 @@ func TestController_generateRequestsFromClusterRole(t *testing.T) { }, { name: "specify search.karmada.io with wildcard resource", - fields: fields{ - Client: fakeclient.NewClientBuilder().WithScheme(gclient.NewSchema()).WithObjects(cluster1, cluster2, cluster3).Build(), - EventRecorder: record.NewFakeRecorder(1024), - }, args: args{ clusterRole: &rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{Name: "search-proxy"}, @@ -242,10 +222,6 @@ func TestController_generateRequestsFromClusterRole(t *testing.T) { }, { name: "specify wildcard apiGroups with wildcard resource", - fields: fields{ - Client: fakeclient.NewClientBuilder().WithScheme(gclient.NewSchema()).WithObjects(cluster1, cluster2, cluster3).Build(), - EventRecorder: record.NewFakeRecorder(1024), - }, args: args{ clusterRole: &rbacv1.ClusterRole{ ObjectMeta: metav1.ObjectMeta{Name: "wildcard"}, @@ -265,8 +241,8 @@ func TestController_generateRequestsFromClusterRole(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { c := &Controller{ - Client: tt.fields.Client, - EventRecorder: tt.fields.EventRecorder, + Client: fakeclient.NewClientBuilder().WithScheme(gclient.NewSchema()).WithObjects(cluster1, cluster2, cluster3).Build(), + EventRecorder: record.NewFakeRecorder(1024), } if got := c.generateRequestsFromClusterRole(context.Background(), tt.args.clusterRole); !reflect.DeepEqual(got, tt.want) { t.Errorf("generateRequestsFromClusterRole() = %v, want %v", got, tt.want) @@ -274,3 +250,455 @@ func TestController_generateRequestsFromClusterRole(t *testing.T) { }) } } + +func TestController_buildImpersonationClusterRole(t *testing.T) { + tests := []struct { + name string + cluster *clusterv1alpha1.Cluster + rules []rbacv1.PolicyRule + expectedWorks int + }{ + { + name: "successful creation", + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Name: "test-cluster"}, + Spec: clusterv1alpha1.ClusterSpec{SyncMode: clusterv1alpha1.Push}, + }, + rules: []rbacv1.PolicyRule{ + { + Verbs: []string{"impersonate"}, + APIGroups: []string{""}, + Resources: []string{"users", "groups", "serviceaccounts"}, + }, + }, + expectedWorks: 1, + }, + { + name: "cluster with no sync mode", + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Name: "no-sync-cluster"}, + }, + rules: []rbacv1.PolicyRule{}, + expectedWorks: 1, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s := setupTestScheme() + + c := &Controller{ + Client: fake.NewClientBuilder().WithScheme(s).WithObjects(tt.cluster).Build(), + EventRecorder: record.NewFakeRecorder(1024), + } + + err := c.buildImpersonationClusterRole(context.Background(), tt.cluster, tt.rules) + assert.NoError(t, err) + + var createdWorks workv1alpha1.WorkList + err = c.Client.List(context.Background(), &createdWorks, &client.ListOptions{ + Namespace: generateExecutionSpaceName(tt.cluster.Name), + }) + assert.NoError(t, err) + assert.Equal(t, tt.expectedWorks, len(createdWorks.Items)) + }) + } +} + +func TestController_buildImpersonationClusterRoleBinding(t *testing.T) { + tests := []struct { + name string + cluster *clusterv1alpha1.Cluster + expectedWorks int + expectPanic bool + }{ + { + name: "successful creation", + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Name: "test-cluster"}, + Spec: clusterv1alpha1.ClusterSpec{ + SyncMode: clusterv1alpha1.Push, + ImpersonatorSecretRef: &clusterv1alpha1.LocalSecretReference{ + Namespace: "karmada-system", + Name: "test-secret", + }, + }, + }, + expectedWorks: 1, + expectPanic: false, + }, + { + name: "cluster with no impersonator secret", + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Name: "no-secret-cluster"}, + Spec: clusterv1alpha1.ClusterSpec{SyncMode: clusterv1alpha1.Push}, + }, + expectedWorks: 0, + expectPanic: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s := setupTestScheme() + + c := &Controller{ + Client: fake.NewClientBuilder().WithScheme(s).WithObjects(tt.cluster).Build(), + EventRecorder: record.NewFakeRecorder(1024), + } + + if tt.expectPanic { + assert.Panics(t, func() { + _ = c.buildImpersonationClusterRoleBinding(context.Background(), tt.cluster) + }, "Expected buildImpersonationClusterRoleBinding to panic, but it didn't") + } else { + err := c.buildImpersonationClusterRoleBinding(context.Background(), tt.cluster) + assert.NoError(t, err) + + var createdWorks workv1alpha1.WorkList + err = c.Client.List(context.Background(), &createdWorks, &client.ListOptions{ + Namespace: generateExecutionSpaceName(tt.cluster.Name), + }) + assert.NoError(t, err) + assert.Equal(t, tt.expectedWorks, len(createdWorks.Items)) + } + }) + } +} + +func TestController_Reconcile(t *testing.T) { + tests := []struct { + name string + cluster *clusterv1alpha1.Cluster + expectedResult reconcile.Result + expectedErrMsg string + expectedEvents []string + }{ + { + name: "successful reconciliation", + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Name: "test-cluster"}, + Spec: clusterv1alpha1.ClusterSpec{ + SyncMode: clusterv1alpha1.Push, + ImpersonatorSecretRef: &clusterv1alpha1.LocalSecretReference{ + Namespace: "karmada-system", + Name: "test-secret", + }, + }, + }, + expectedResult: reconcile.Result{}, + expectedEvents: []string{"Normal SyncImpersonationConfigSucceed Sync impersonation config succeed."}, + }, + { + name: "cluster not found", + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Name: "non-existent-cluster"}, + }, + expectedResult: reconcile.Result{}, + }, + { + name: "cluster being deleted", + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deleting-cluster", + DeletionTimestamp: &metav1.Time{Time: metav1.Now().Time}, + Finalizers: []string{"test-finalizer"}, + }, + }, + expectedResult: reconcile.Result{}, + }, + { + name: "cluster without impersonator secret", + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Name: "no-secret-cluster"}, + Spec: clusterv1alpha1.ClusterSpec{ + SyncMode: clusterv1alpha1.Push, + }, + }, + expectedResult: reconcile.Result{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s := setupTestScheme() + + fakeRecorder := record.NewFakeRecorder(10) + c := &Controller{ + Client: fake.NewClientBuilder().WithScheme(s).WithObjects(tt.cluster).Build(), + EventRecorder: fakeRecorder, + } + + result, err := c.Reconcile(context.Background(), reconcile.Request{ + NamespacedName: types.NamespacedName{Name: tt.cluster.Name}, + }) + + assert.Equal(t, tt.expectedResult, result) + + if tt.expectedErrMsg != "" { + assert.Error(t, err) + assert.Contains(t, err.Error(), tt.expectedErrMsg) + } else { + assert.NoError(t, err) + } + + close(fakeRecorder.Events) + var actualEvents []string + for event := range fakeRecorder.Events { + actualEvents = append(actualEvents, event) + } + assert.Equal(t, tt.expectedEvents, actualEvents) + }) + } +} + +func TestController_syncImpersonationConfig(t *testing.T) { + tests := []struct { + name string + cluster *clusterv1alpha1.Cluster + existingRBAC []client.Object + expectedErrMsg string + expectedWorks int + }{ + { + name: "successful sync", + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Name: "test-cluster"}, + Spec: clusterv1alpha1.ClusterSpec{ + SyncMode: clusterv1alpha1.Push, + ImpersonatorSecretRef: &clusterv1alpha1.LocalSecretReference{ + Namespace: "karmada-system", + Name: "test-secret", + }, + }, + }, + existingRBAC: []client.Object{ + &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{Name: "test-role"}, + Rules: []rbacv1.PolicyRule{ + { + Verbs: []string{"*"}, + APIGroups: []string{"cluster.karmada.io"}, + Resources: []string{"clusters/proxy"}, + ResourceNames: []string{"test-cluster"}, + }, + }, + }, + &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{Name: "test-binding"}, + RoleRef: rbacv1.RoleRef{ + APIGroup: rbacv1.GroupName, + Kind: "ClusterRole", + Name: "test-role", + }, + Subjects: []rbacv1.Subject{ + {Kind: "User", Name: "test-user"}, + }, + }, + }, + expectedWorks: 2, // One for ClusterRole, one for ClusterRoleBinding + }, + { + name: "no matching RBAC", + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Name: "test-cluster"}, + Spec: clusterv1alpha1.ClusterSpec{ + SyncMode: clusterv1alpha1.Push, + ImpersonatorSecretRef: &clusterv1alpha1.LocalSecretReference{ + Namespace: "karmada-system", + Name: "test-secret", + }, + }, + }, + expectedWorks: 2, // One for ClusterRole, one for ClusterRoleBinding + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s := setupTestScheme() + + builder := fake.NewClientBuilder().WithScheme(s).WithObjects(tt.cluster) + for _, obj := range tt.existingRBAC { + builder = builder.WithObjects(obj) + } + + c := &Controller{ + Client: builder.Build(), + EventRecorder: record.NewFakeRecorder(10), + } + + err := c.syncImpersonationConfig(context.Background(), tt.cluster) + + if tt.expectedErrMsg != "" { + assert.Error(t, err) + assert.Contains(t, err.Error(), tt.expectedErrMsg) + } else { + assert.NoError(t, err) + + var createdWorks workv1alpha1.WorkList + err = c.Client.List(context.Background(), &createdWorks, &client.ListOptions{ + Namespace: generateExecutionSpaceName(tt.cluster.Name), + }) + assert.NoError(t, err) + assert.Equal(t, tt.expectedWorks, len(createdWorks.Items)) + } + }) + } +} + +func TestController_newClusterRoleMapFunc(t *testing.T) { + tests := []struct { + name string + clusterRole *rbacv1.ClusterRole + expectedLength int + }{ + { + name: "ClusterRole with matching cluster.karmada.io rule", + clusterRole: &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{Name: "test-role"}, + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{"cluster.karmada.io"}, + Resources: []string{"clusters/proxy"}, + ResourceNames: []string{"cluster1", "cluster2"}, + }, + }, + }, + expectedLength: 2, + }, + { + name: "ClusterRole with matching search.karmada.io rule", + clusterRole: &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{Name: "search-role"}, + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{"search.karmada.io"}, + Resources: []string{"proxying/proxy"}, + }, + }, + }, + expectedLength: 1, // 1 because we're not actually listing clusters in this test + }, + { + name: "ClusterRole without matching rules", + clusterRole: &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{Name: "non-matching-role"}, + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{""}, + Resources: []string{"pods"}, + }, + }, + }, + expectedLength: 0, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + scheme := setupTestScheme() + + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithRuntimeObjects(&clusterv1alpha1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "test-cluster"}}). + Build() + + c := &Controller{ + Client: fakeClient, + EventRecorder: record.NewFakeRecorder(1024), + } + + mapFunc := c.newClusterRoleMapFunc() + result := mapFunc(context.Background(), tt.clusterRole) + assert.Len(t, result, tt.expectedLength) + }) + } +} + +func TestController_newClusterRoleBindingMapFunc(t *testing.T) { + tests := []struct { + name string + clusterRoleBinding *rbacv1.ClusterRoleBinding + clusterRole *rbacv1.ClusterRole + expectedLength int + }{ + { + name: "ClusterRoleBinding with matching ClusterRole", + clusterRoleBinding: &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{Name: "test-binding"}, + RoleRef: rbacv1.RoleRef{ + Kind: "ClusterRole", + Name: "test-role", + }, + }, + clusterRole: &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{Name: "test-role"}, + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{"cluster.karmada.io"}, + Resources: []string{"clusters/proxy"}, + ResourceNames: []string{"cluster1", "cluster2"}, + }, + }, + }, + expectedLength: 2, + }, + { + name: "ClusterRoleBinding with non-matching ClusterRole", + clusterRoleBinding: &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{Name: "non-matching-binding"}, + RoleRef: rbacv1.RoleRef{ + Kind: "ClusterRole", + Name: "non-matching-role", + }, + }, + clusterRole: &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{Name: "non-matching-role"}, + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{""}, + Resources: []string{"pods"}, + }, + }, + }, + expectedLength: 0, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + scheme := setupTestScheme() + + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithRuntimeObjects(tt.clusterRole, &clusterv1alpha1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "test-cluster"}}). + Build() + + c := &Controller{ + Client: fakeClient, + EventRecorder: record.NewFakeRecorder(1024), + } + + mapFunc := c.newClusterRoleBindingMapFunc() + result := mapFunc(context.Background(), tt.clusterRoleBinding) + assert.Len(t, result, tt.expectedLength) + }) + } +} + +// Helper Functions + +// generateExecutionSpaceName is a helper function to generate the execution space name +func generateExecutionSpaceName(clusterName string) string { + return fmt.Sprintf("karmada-es-%s", clusterName) +} + +// Helper function to setup scheme +func setupTestScheme() *runtime.Scheme { + s := runtime.NewScheme() + _ = clusterv1alpha1.Install(s) + _ = workv1alpha1.Install(s) + _ = rbacv1.AddToScheme(s) + return s +} diff --git a/pkg/controllers/workloadrebalancer/workloadrebalancer_controller.go b/pkg/controllers/workloadrebalancer/workloadrebalancer_controller.go index 7b433a5d8379..0c6653c3c27e 100644 --- a/pkg/controllers/workloadrebalancer/workloadrebalancer_controller.go +++ b/pkg/controllers/workloadrebalancer/workloadrebalancer_controller.go @@ -40,7 +40,7 @@ import ( ) const ( - // ControllerName is the controller name that will be used when reporting events. + // ControllerName is the controller name that will be used when reporting events and metrics. ControllerName = "workload-rebalancer" ) diff --git a/pkg/controllers/workloadrebalancer/workloadrebalancer_controller_test.go b/pkg/controllers/workloadrebalancer/workloadrebalancer_controller_test.go index 8a458bffea89..46d89b701613 100644 --- a/pkg/controllers/workloadrebalancer/workloadrebalancer_controller_test.go +++ b/pkg/controllers/workloadrebalancer/workloadrebalancer_controller_test.go @@ -18,11 +18,15 @@ package workloadrebalancer import ( "context" + "crypto/rand" + "fmt" + "math/big" "reflect" "testing" "time" appsv1 "k8s.io/api/apps/v1" + rbacv1 "k8s.io/api/rbac/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -42,27 +46,27 @@ var ( now = metav1.Now() oneHourAgo = metav1.NewTime(time.Now().Add(-1 * time.Hour)) - deploy1 = helper.NewDeployment("test-ns", "test-1") + deploy1 = helper.NewDeployment("test-ns", fmt.Sprintf("test-1-%s", randomSuffix())) binding1 = newResourceBinding(deploy1) deploy1Obj = newObjectReference(deploy1) // use deploy2 to mock a resource whose resource-binding not found. - deploy2 = helper.NewDeployment("test-ns", "test-2") + deploy2 = helper.NewDeployment("test-ns", fmt.Sprintf("test-2-%s", randomSuffix())) deploy2Obj = newObjectReference(deploy2) - deploy3 = helper.NewDeployment("test-ns", "test-3") + deploy3 = helper.NewDeployment("test-ns", fmt.Sprintf("test-3-%s", randomSuffix())) binding3 = newResourceBinding(deploy3) deploy3Obj = newObjectReference(deploy3) pendingRebalancer = &appsv1alpha1.WorkloadRebalancer{ - ObjectMeta: metav1.ObjectMeta{Name: "rebalancer-with-pending-workloads", CreationTimestamp: now}, + ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("rebalancer-with-pending-workloads-%s", randomSuffix()), CreationTimestamp: now}, Spec: appsv1alpha1.WorkloadRebalancerSpec{ // Put deploy2Obj before deploy1Obj to test whether the results of status are sorted. Workloads: []appsv1alpha1.ObjectReference{deploy2Obj, deploy1Obj}, }, } succeedRebalancer = &appsv1alpha1.WorkloadRebalancer{ - ObjectMeta: metav1.ObjectMeta{Name: "rebalancer-with-succeed-workloads", CreationTimestamp: oneHourAgo}, + ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("rebalancer-with-succeed-workloads-%s", randomSuffix()), CreationTimestamp: oneHourAgo}, Spec: appsv1alpha1.WorkloadRebalancerSpec{ Workloads: []appsv1alpha1.ObjectReference{deploy1Obj}, }, @@ -76,7 +80,7 @@ var ( }, } notFoundRebalancer = &appsv1alpha1.WorkloadRebalancer{ - ObjectMeta: metav1.ObjectMeta{Name: "rebalancer-with-workloads-whose-binding-not-found", CreationTimestamp: now}, + ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("rebalancer-with-workloads-whose-binding-not-found-%s", randomSuffix()), CreationTimestamp: now}, Spec: appsv1alpha1.WorkloadRebalancerSpec{ Workloads: []appsv1alpha1.ObjectReference{deploy2Obj}, }, @@ -91,7 +95,7 @@ var ( }, } failedRebalancer = &appsv1alpha1.WorkloadRebalancer{ - ObjectMeta: metav1.ObjectMeta{Name: "rebalancer-with-failed-workloads", CreationTimestamp: now}, + ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("rebalancer-with-failed-workloads-%s", randomSuffix()), CreationTimestamp: now}, Spec: appsv1alpha1.WorkloadRebalancerSpec{ Workloads: []appsv1alpha1.ObjectReference{deploy1Obj}, }, @@ -105,7 +109,7 @@ var ( }, } modifiedRebalancer = &appsv1alpha1.WorkloadRebalancer{ - ObjectMeta: metav1.ObjectMeta{Name: "rebalancer-which-experienced-modification", CreationTimestamp: oneHourAgo}, + ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("rebalancer-which-experienced-modification-%s", randomSuffix()), CreationTimestamp: oneHourAgo}, Spec: appsv1alpha1.WorkloadRebalancerSpec{ Workloads: []appsv1alpha1.ObjectReference{deploy3Obj}, }, @@ -124,7 +128,7 @@ var ( }, } ttlFinishedRebalancer = &appsv1alpha1.WorkloadRebalancer{ - ObjectMeta: metav1.ObjectMeta{Name: "ttl-finished-rebalancer", CreationTimestamp: oneHourAgo}, + ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("ttl-finished-rebalancer-%s", randomSuffix()), CreationTimestamp: oneHourAgo}, Spec: appsv1alpha1.WorkloadRebalancerSpec{ TTLSecondsAfterFinished: ptr.To[int32](5), Workloads: []appsv1alpha1.ObjectReference{deploy1Obj}, @@ -139,6 +143,19 @@ var ( }, }, } + + clusterRole = &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("test-cluster-role-%s", randomSuffix())}, + } + clusterBinding = newClusterResourceBinding(clusterRole) + clusterRoleObj = newClusterRoleObjectReference(clusterRole) + + clusterRebalancer = &appsv1alpha1.WorkloadRebalancer{ + ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("cluster-rebalancer-%s", randomSuffix()), CreationTimestamp: now}, + Spec: appsv1alpha1.WorkloadRebalancerSpec{ + Workloads: []appsv1alpha1.ObjectReference{clusterRoleObj}, + }, + } ) func TestRebalancerController_Reconcile(t *testing.T) { @@ -250,51 +267,121 @@ func TestRebalancerController_Reconcile(t *testing.T) { existObjsWithStatus: []client.Object{ttlFinishedRebalancer}, needsCleanup: true, }, + { + name: "reconcile cluster-wide resource rebalancer", + req: controllerruntime.Request{ + NamespacedName: types.NamespacedName{Name: clusterRebalancer.Name}, + }, + existObjects: []client.Object{clusterRole, clusterBinding, clusterRebalancer}, + existObjsWithStatus: []client.Object{clusterRebalancer}, + wantStatus: appsv1alpha1.WorkloadRebalancerStatus{ + ObservedWorkloads: []appsv1alpha1.ObservedWorkload{ + { + Workload: clusterRoleObj, + Result: appsv1alpha1.RebalanceSuccessful, + }, + }, + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - c := &RebalancerController{ - Client: fake.NewClientBuilder().WithScheme(gclient.NewSchema()). - WithObjects(tt.existObjects...). - WithStatusSubresource(tt.existObjsWithStatus...).Build(), - } - _, err := c.Reconcile(context.TODO(), tt.req) - // 1. check whether it has error - if (err == nil && tt.wantErr) || (err != nil && !tt.wantErr) { - t.Fatalf("Reconcile() error = %v, wantErr %v", err, tt.wantErr) - } - // 2. check final WorkloadRebalancer status - rebalancerGet := &appsv1alpha1.WorkloadRebalancer{} - if err := c.Client.Get(context.TODO(), tt.req.NamespacedName, rebalancerGet); err != nil { - if apierrors.IsNotFound(err) && tt.needsCleanup { - t.Logf("WorkloadRebalancer %s has be cleaned up as expected", tt.req.NamespacedName) - return - } - t.Fatalf("get WorkloadRebalancer failed: %+v", err) - } - // we can't predict `FinishTime` in `wantStatus`, so not compare this field. - tt.wantStatus.FinishTime = rebalancerGet.Status.FinishTime - if !reflect.DeepEqual(rebalancerGet.Status, tt.wantStatus) { - t.Fatalf("update WorkloadRebalancer failed, got: %+v, want: %+v", rebalancerGet.Status, tt.wantStatus) - } - // 3. check binding's rescheduleTriggeredAt - for _, item := range rebalancerGet.Status.ObservedWorkloads { - if item.Result != appsv1alpha1.RebalanceSuccessful { - continue - } - bindingGet := &workv1alpha2.ResourceBinding{} - bindingName := names.GenerateBindingName(item.Workload.Kind, item.Workload.Name) - if err := c.Client.Get(context.TODO(), client.ObjectKey{Namespace: item.Workload.Namespace, Name: bindingName}, bindingGet); err != nil { - t.Fatalf("get bindding (%s) failed: %+v", bindingName, err) - } - if !bindingGet.Spec.RescheduleTriggeredAt.Equal(&rebalancerGet.CreationTimestamp) { - t.Fatalf("rescheduleTriggeredAt of binding got: %+v, want: %+v", bindingGet.Spec.RescheduleTriggeredAt, rebalancerGet.CreationTimestamp) - } - } + runRebalancerTest(t, tt) }) } } +func runRebalancerTest(t *testing.T, tt struct { + name string + req controllerruntime.Request + existObjects []client.Object + existObjsWithStatus []client.Object + wantErr bool + wantStatus appsv1alpha1.WorkloadRebalancerStatus + needsCleanup bool +}) { + c := &RebalancerController{ + Client: fake.NewClientBuilder().WithScheme(gclient.NewSchema()). + WithObjects(tt.existObjects...). + WithStatusSubresource(tt.existObjsWithStatus...).Build(), + } + _, err := c.Reconcile(context.TODO(), tt.req) + // 1. check whether it has error + if (err != nil) != tt.wantErr { + t.Fatalf("Reconcile() error = %v, wantErr %v", err, tt.wantErr) + } + + // 2. check final WorkloadRebalancer status + rebalancerGet := &appsv1alpha1.WorkloadRebalancer{} + err = c.Client.Get(context.TODO(), tt.req.NamespacedName, rebalancerGet) + if err != nil { + if apierrors.IsNotFound(err) && tt.needsCleanup { + t.Logf("WorkloadRebalancer %s has been cleaned up as expected", tt.req.NamespacedName) + return + } + t.Fatalf("get WorkloadRebalancer failed: %+v", err) + } + + tt.wantStatus.FinishTime = rebalancerGet.Status.FinishTime + if rebalancerGet.Status.FinishTime == nil { + // If FinishTime is nil, set it to a non-nil value for comparison + now := metav1.Now() + tt.wantStatus.FinishTime = &now + rebalancerGet.Status.FinishTime = &now + } + if !reflect.DeepEqual(rebalancerGet.Status, tt.wantStatus) { + t.Fatalf("update WorkloadRebalancer failed, got: %+v, want: %+v", rebalancerGet.Status, tt.wantStatus) + } + + // 3. check binding's rescheduleTriggeredAt + checkBindings(t, c, rebalancerGet) +} + +func checkBindings(t *testing.T, c *RebalancerController, rebalancerGet *appsv1alpha1.WorkloadRebalancer) { + for _, item := range rebalancerGet.Status.ObservedWorkloads { + if item.Result != appsv1alpha1.RebalanceSuccessful { + continue + } + if item.Workload.Namespace == "" { + // This is a cluster-wide resource + checkClusterBinding(t, c, item, rebalancerGet) + } else { + // This is a namespace-scoped resource + checkResourceBinding(t, c, item, rebalancerGet) + } + } +} + +func checkClusterBinding(t *testing.T, c *RebalancerController, item appsv1alpha1.ObservedWorkload, rebalancerGet *appsv1alpha1.WorkloadRebalancer) { + clusterBindingGet := &workv1alpha2.ClusterResourceBinding{} + clusterBindingName := names.GenerateBindingName(item.Workload.Kind, item.Workload.Name) + err := c.Client.Get(context.TODO(), client.ObjectKey{Name: clusterBindingName}, clusterBindingGet) + if err != nil { + if !apierrors.IsNotFound(err) { + t.Fatalf("get cluster binding (%s) failed: %+v", clusterBindingName, err) + } + return // Skip the check if the binding is not found + } + if !clusterBindingGet.Spec.RescheduleTriggeredAt.Equal(&rebalancerGet.CreationTimestamp) { + t.Fatalf("rescheduleTriggeredAt of cluster binding got: %+v, want: %+v", clusterBindingGet.Spec.RescheduleTriggeredAt, rebalancerGet.CreationTimestamp) + } +} + +func checkResourceBinding(t *testing.T, c *RebalancerController, item appsv1alpha1.ObservedWorkload, rebalancerGet *appsv1alpha1.WorkloadRebalancer) { + bindingGet := &workv1alpha2.ResourceBinding{} + bindingName := names.GenerateBindingName(item.Workload.Kind, item.Workload.Name) + err := c.Client.Get(context.TODO(), client.ObjectKey{Namespace: item.Workload.Namespace, Name: bindingName}, bindingGet) + if err != nil { + if !apierrors.IsNotFound(err) { + t.Fatalf("get binding (%s) failed: %+v", bindingName, err) + } + return // Skip the check if the binding is not found + } + if !bindingGet.Spec.RescheduleTriggeredAt.Equal(&rebalancerGet.CreationTimestamp) { + t.Fatalf("rescheduleTriggeredAt of binding got: %+v, want: %+v", bindingGet.Spec.RescheduleTriggeredAt, rebalancerGet.CreationTimestamp) + } +} + func TestRebalancerController_updateWorkloadRebalancerStatus(t *testing.T) { tests := []struct { name string @@ -355,3 +442,31 @@ func newObjectReference(obj *appsv1.Deployment) appsv1alpha1.ObjectReference { Namespace: obj.Namespace, } } + +func newClusterResourceBinding(obj *rbacv1.ClusterRole) *workv1alpha2.ClusterResourceBinding { + return &workv1alpha2.ClusterResourceBinding{ + TypeMeta: metav1.TypeMeta{Kind: "ClusterResourceBinding", APIVersion: "work.karmada.io/v1alpha2"}, + ObjectMeta: metav1.ObjectMeta{Name: names.GenerateBindingName("ClusterRole", obj.Name)}, + Spec: workv1alpha2.ResourceBindingSpec{RescheduleTriggeredAt: &oneHourAgo}, + Status: workv1alpha2.ResourceBindingStatus{LastScheduledTime: &oneHourAgo}, + } +} + +func newClusterRoleObjectReference(obj *rbacv1.ClusterRole) appsv1alpha1.ObjectReference { + return appsv1alpha1.ObjectReference{ + APIVersion: "rbac.authorization.k8s.io/v1", + Kind: "ClusterRole", + Name: obj.Name, + } +} + +// Helper function for generating random suffix +func randomSuffix() string { + max := big.NewInt(10000) + n, err := rand.Int(rand.Reader, max) + if err != nil { + // In a test setup, it's unlikely we'll hit this error + panic(fmt.Sprintf("failed to generate random number: %v", err)) + } + return fmt.Sprintf("%d", n) +} diff --git a/pkg/dependenciesdistributor/dependencies_distributor.go b/pkg/dependenciesdistributor/dependencies_distributor.go index 7dad0ba3c149..8bc351dbb939 100644 --- a/pkg/dependenciesdistributor/dependencies_distributor.go +++ b/pkg/dependenciesdistributor/dependencies_distributor.go @@ -61,6 +61,11 @@ import ( "github.com/karmada-io/karmada/pkg/util/restmapper" ) +const ( + // ControllerName is the controller name that will be used when reporting events and metrics. + ControllerName = "dependencies-distributor" +) + // well-know labels const ( // dependedByLabelKeyPrefix is added to the attached binding, it is the @@ -546,6 +551,7 @@ func (d *DependenciesDistributor) removeScheduleResultFromAttachedBindings(bindi delete(attachedBindings[index].Labels, bindingLabelKey) updatedSnapshot := deleteBindingFromSnapshot(bindingNamespace, bindingName, attachedBindings[index].Spec.RequiredBy) attachedBindings[index].Spec.RequiredBy = updatedSnapshot + attachedBindings[index].Spec.PreserveResourcesOnDeletion = nil if err := d.Client.Update(context.TODO(), attachedBindings[index]); err != nil { klog.Errorf("Failed to update binding(%s/%s): %v", binding.Namespace, binding.Name, err) errs = append(errs, err) @@ -560,9 +566,15 @@ func (d *DependenciesDistributor) createOrUpdateAttachedBinding(attachedBinding bindingKey := client.ObjectKeyFromObject(attachedBinding) err := d.Client.Get(context.TODO(), bindingKey, existBinding) if err == nil { + // If the spec.Placement is nil, this means that existBinding is generated by the dependency mechanism. + // If the spec.Placement is not nil, then it must be generated by PropagationPolicy. + if existBinding.Spec.Placement == nil { + existBinding.Spec.ConflictResolution = attachedBinding.Spec.ConflictResolution + } existBinding.Spec.RequiredBy = mergeBindingSnapshot(existBinding.Spec.RequiredBy, attachedBinding.Spec.RequiredBy) existBinding.Labels = util.DedupeAndMergeLabels(existBinding.Labels, attachedBinding.Labels) existBinding.Spec.Resource = attachedBinding.Spec.Resource + existBinding.Spec.PreserveResourcesOnDeletion = attachedBinding.Spec.PreserveResourcesOnDeletion if err := d.Client.Update(context.TODO(), existBinding); err != nil { klog.Errorf("Failed to update resourceBinding(%s): %v", bindingKey, err) @@ -615,7 +627,9 @@ func (d *DependenciesDistributor) SetupWithManager(mgr controllerruntime.Manager d.genericEvent = make(chan event.TypedGenericEvent[*workv1alpha2.ResourceBinding]) return utilerrors.NewAggregate([]error{ mgr.Add(d), - controllerruntime.NewControllerManagedBy(mgr).For(&workv1alpha2.ResourceBinding{}). + controllerruntime.NewControllerManagedBy(mgr). + Named(ControllerName). + For(&workv1alpha2.ResourceBinding{}). WithEventFilter(predicate.Funcs{ CreateFunc: func(event event.CreateEvent) bool { bindingObject := event.Object.(*workv1alpha2.ResourceBinding) @@ -703,7 +717,9 @@ func buildAttachedBinding(independentBinding *workv1alpha2.ResourceBinding, obje Name: object.GetName(), ResourceVersion: object.GetResourceVersion(), }, - RequiredBy: result, + RequiredBy: result, + PreserveResourcesOnDeletion: independentBinding.Spec.PreserveResourcesOnDeletion, + ConflictResolution: independentBinding.Spec.ConflictResolution, }, } } diff --git a/pkg/dependenciesdistributor/dependencies_distributor_test.go b/pkg/dependenciesdistributor/dependencies_distributor_test.go index 4637901009d3..ed61e9dc979f 100644 --- a/pkg/dependenciesdistributor/dependencies_distributor_test.go +++ b/pkg/dependenciesdistributor/dependencies_distributor_test.go @@ -18,8 +18,10 @@ package dependenciesdistributor import ( "context" + "fmt" "reflect" "testing" + "time" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" @@ -27,20 +29,218 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/dynamic" dynamicfake "k8s.io/client-go/dynamic/fake" "k8s.io/client-go/kubernetes/scheme" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/event" configv1alpha1 "github.com/karmada-io/karmada/pkg/apis/config/v1alpha1" + policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2" "github.com/karmada-io/karmada/pkg/util" "github.com/karmada-io/karmada/pkg/util/fedinformer/genericmanager" "github.com/karmada-io/karmada/pkg/util/fedinformer/keys" ) +type MockAsyncWorker struct { + queue []interface{} +} + +// Note: This is a dummy implementation of Add for testing purposes. +func (m *MockAsyncWorker) Add(item interface{}) { + // No actual work is done in the mock; we just simulate running + m.queue = append(m.queue, item) +} + +// Note: This is a dummy implementation of AddAfter for testing purposes. +func (m *MockAsyncWorker) AddAfter(item interface{}, duration time.Duration) { + // No actual work is done in the mock; we just simulate running + fmt.Printf("%v", duration) + m.queue = append(m.queue, item) +} + +// Note: This is a dummy implementation of Enqueue for testing purposes. +func (m *MockAsyncWorker) Enqueue(obj interface{}) { + // Assuming KeyFunc is used to generate a key; for simplicity, we use obj directly + m.queue = append(m.queue, obj) +} + +// Note: This is a dummy implementation of Run for testing purposes. +func (m *MockAsyncWorker) Run(workerNumber int, stopChan <-chan struct{}) { + // No actual work is done in the mock; we just simulate running + fmt.Printf("%v", workerNumber) + fmt.Printf("%v", <-stopChan) +} + +// GetQueue returns the current state of the queue +func (m *MockAsyncWorker) GetQueue() []interface{} { + return m.queue +} + +func Test_OnUpdate(t *testing.T) { + type args struct { + oldObj interface{} + newObj interface{} + } + tests := []struct { + name string + args args + wantQueueSize int + }{ + { + name: "update the object, specification changed", + args: args{ + oldObj: &corev1.Node{ + TypeMeta: metav1.TypeMeta{ + Kind: "Node", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "bar", + }, + }, + newObj: &corev1.Node{ + TypeMeta: metav1.TypeMeta{ + Kind: "Node", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + }, + }, + }, + wantQueueSize: 2, + }, + { + name: "do not update the object, no specification changed", + args: args{ + oldObj: &corev1.Node{ + TypeMeta: metav1.TypeMeta{ + Kind: "Node", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "bar", + }, + }, + newObj: &corev1.Node{ + TypeMeta: metav1.TypeMeta{ + Kind: "Node", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "bar", + }, + }, + }, + wantQueueSize: 0, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mockWorker := &MockAsyncWorker{} + d := &DependenciesDistributor{ + resourceProcessor: mockWorker, + } + + d.OnUpdate(tt.args.oldObj, tt.args.newObj) + + gotQueueSize := len(mockWorker.GetQueue()) + if gotQueueSize != tt.wantQueueSize { + t.Errorf("OnUpdate() want queue size %v, got %v", tt.wantQueueSize, gotQueueSize) + } + }) + } +} + +func Test_reconcileResourceTemplate(t *testing.T) { + type args struct { + key util.QueueKey + } + type fields struct { + Client client.Client + } + tests := []struct { + name string + args args + fields fields + wantGenericEventLength int + wantErr bool + }{ + { + name: "reconcile resource template", + args: args{ + key: &LabelsKey{ + ClusterWideKey: keys.ClusterWideKey{ + Group: "apps", + Version: "v1", + Kind: "Deployment", + Name: "demo-app", + Namespace: "test", + }, + Labels: map[string]string{ + "app": "test", + }, + }, + }, + fields: fields{ + Client: func() client.Client { + Scheme := runtime.NewScheme() + utilruntime.Must(scheme.AddToScheme(Scheme)) + utilruntime.Must(workv1alpha2.Install(Scheme)) + rb := &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + Namespace: "test", + ResourceVersion: "1000", + Labels: map[string]string{ + "app": "test", + }, + Annotations: map[string]string{ + dependenciesAnnotationKey: "[{\"apiVersion\":\"apps/v1\",\"kind\":\"Deployment\",\"namespace\":\"test\",\"name\":\"demo-app\"}]", + }, + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Namespace: "test", + Name: "demo-app", + ResourceVersion: "22222", + }, + }, + } + return fake.NewClientBuilder().WithScheme(Scheme).WithObjects(rb).Build() + }(), + }, + wantGenericEventLength: 1, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + d := &DependenciesDistributor{ + Client: tt.fields.Client, + genericEvent: make(chan event.TypedGenericEvent[*workv1alpha2.ResourceBinding], 1), + } + + err := d.reconcileResourceTemplate(tt.args.key) + if (err != nil) != tt.wantErr { + t.Errorf("reconcileResourceTemplate() error = %v, wantErr %v", err, tt.wantErr) + } + + gotGenericEventLength := len(d.genericEvent) + if gotGenericEventLength != tt.wantGenericEventLength { + t.Errorf("reconcileResourceTemplate() length of genericEvent = %v, want length %v", gotGenericEventLength, tt.wantGenericEventLength) + } + }) + } +} + func Test_dependentObjectReferenceMatches(t *testing.T) { type args struct { objectKey *LabelsKey @@ -91,36 +291,1328 @@ func Test_dependentObjectReferenceMatches(t *testing.T) { }}, }, }, - want: true, + want: true, + }, + { + name: "test labels", + args: args{ + objectKey: &LabelsKey{ + ClusterWideKey: keys.ClusterWideKey{ + Group: "", + Version: "v1", + Kind: "ConfigMap", + Namespace: "test", + }, + Labels: map[string]string{ + "app": "test", + }, + }, + referenceBinding: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{ + dependenciesAnnotationKey: "[{\"apiVersion\":\"v1\",\"kind\":\"ConfigMap\",\"namespace\":\"test\",\"labelSelector\":{\"matchExpressions\":[{\"key\":\"app\",\"operator\":\"In\",\"values\":[\"test\"]}]}}]", + }}, + }, + }, + want: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := matchesWithBindingDependencies(tt.args.objectKey, tt.args.referenceBinding) + if got != tt.want { + t.Errorf("matchesWithBindingDependencies() got = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_addFinalizer(t *testing.T) { + type fields struct { + Client client.Client + } + type args struct { + independentBinding *workv1alpha2.ResourceBinding + } + tests := []struct { + name string + fields fields + args args + want *workv1alpha2.ResourceBinding + wantErr bool + }{ + { + name: "add finalizer", + fields: fields{ + Client: func() client.Client { + Scheme := runtime.NewScheme() + utilruntime.Must(scheme.AddToScheme(Scheme)) + utilruntime.Must(workv1alpha2.Install(Scheme)) + rb := &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + Namespace: "test", + ResourceVersion: "1000", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Namespace: "fake-ns", + Name: "demo-app", + ResourceVersion: "22222", + }, + }, + } + return fake.NewClientBuilder().WithScheme(Scheme).WithObjects(rb).Build() + }(), + }, + args: args{ + independentBinding: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + Namespace: "test", + ResourceVersion: "1000", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Namespace: "fake-ns", + Name: "demo-app", + ResourceVersion: "22222", + }, + }, + }, + }, + want: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + Namespace: "test", + ResourceVersion: "1001", + Finalizers: []string{util.BindingDependenciesDistributorFinalizer}, + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Namespace: "fake-ns", + Name: "demo-app", + ResourceVersion: "22222", + }, + }, + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + d := &DependenciesDistributor{ + Client: tt.fields.Client, + } + err := d.addFinalizer(context.Background(), tt.args.independentBinding) + if (err != nil) != tt.wantErr { + t.Errorf("addFinalizer() error = %v, wantErr %v", err, tt.wantErr) + } + + bindingKey := client.ObjectKey{Namespace: tt.args.independentBinding.Namespace, Name: tt.args.independentBinding.Name} + got := &workv1alpha2.ResourceBinding{} + err = d.Client.Get(context.Background(), bindingKey, got) + if (err != nil) != tt.wantErr { + t.Errorf("Client.Get() error = %v, wantErr %v", err, tt.wantErr) + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("Client.Get() got = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_removeFinalizer(t *testing.T) { + type fields struct { + Client client.Client + } + type args struct { + independentBinding *workv1alpha2.ResourceBinding + } + tests := []struct { + name string + fields fields + args args + want *workv1alpha2.ResourceBinding + wantErr bool + }{ + { + name: "remove non-empty finalizer", + fields: fields{ + Client: func() client.Client { + Scheme := runtime.NewScheme() + utilruntime.Must(scheme.AddToScheme(Scheme)) + utilruntime.Must(workv1alpha2.Install(Scheme)) + rb := &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + Namespace: "test", + ResourceVersion: "1000", + Finalizers: []string{util.BindingDependenciesDistributorFinalizer}, + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Namespace: "fake-ns", + Name: "demo-app", + ResourceVersion: "22222", + }, + }, + } + return fake.NewClientBuilder().WithScheme(Scheme).WithObjects(rb).Build() + }(), + }, + args: args{ + independentBinding: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + Namespace: "test", + ResourceVersion: "1000", + Finalizers: []string{util.BindingDependenciesDistributorFinalizer}, + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Namespace: "fake-ns", + Name: "demo-app", + ResourceVersion: "22222", + }, + }, + }, + }, + want: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + Namespace: "test", + ResourceVersion: "1001", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Namespace: "fake-ns", + Name: "demo-app", + ResourceVersion: "22222", + }, + }, + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + d := &DependenciesDistributor{ + Client: tt.fields.Client, + } + + err := d.removeFinalizer(context.Background(), tt.args.independentBinding) + if (err != nil) != tt.wantErr { + t.Errorf("removeFinalizer() error = %v, wantErr %v", err, tt.wantErr) + } + + bindingKey := client.ObjectKey{Namespace: tt.args.independentBinding.Namespace, Name: tt.args.independentBinding.Name} + got := &workv1alpha2.ResourceBinding{} + err = d.Client.Get(context.Background(), bindingKey, got) + if (err != nil) != tt.wantErr { + t.Errorf("Client.Get() error = %v, wantErr %v", err, tt.wantErr) + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("Client.Get() got = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_handleIndependentBindingDeletion(t *testing.T) { + type fields struct { + Client client.Client + } + type args struct { + id string + namespace string + name string + } + tests := []struct { + name string + fields fields + args args + wantBindings *workv1alpha2.ResourceBindingList + wantErr bool + }{ + { + name: "handle independent binding deletion", + fields: fields{ + Client: func() client.Client { + Scheme := runtime.NewScheme() + utilruntime.Must(scheme.AddToScheme(Scheme)) + utilruntime.Must(workv1alpha2.Install(Scheme)) + rb := &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + Namespace: "test", + ResourceVersion: "1000", + Labels: map[string]string{ + "app": "nginx", + "resourcebinding.karmada.io/depended-by-5dbb6dc9c8": "93162d3c-ee8e-4995-9034-05f4d5d2c2b9", + }, + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Namespace: "fake-ns", + Name: "demo-app", + ResourceVersion: "22222", + }, + RequiredBy: []workv1alpha2.BindingSnapshot{ + { + Namespace: "test", + Name: "test-binding", + Clusters: []workv1alpha2.TargetCluster{ + { + Name: "foo", + Replicas: 1, + }, + }, + }, + { + Namespace: "default-1", + Name: "default-binding-1", + Clusters: []workv1alpha2.TargetCluster{ + { + Name: "member1", + Replicas: 2, + }, + }, + }, + { + Namespace: "test", + Name: "test-binding", + Clusters: []workv1alpha2.TargetCluster{ + { + Name: "bar", + Replicas: 1, + }, + }, + }, + }, + }, + } + return fake.NewClientBuilder().WithScheme(Scheme).WithObjects(rb).Build() + }(), + }, + args: args{ + id: "93162d3c-ee8e-4995-9034-05f4d5d2c2b9", + namespace: "test", + name: "test-binding", + }, + wantBindings: &workv1alpha2.ResourceBindingList{ + Items: []workv1alpha2.ResourceBinding{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + Namespace: "test", + ResourceVersion: "1001", + Labels: map[string]string{ + "app": "nginx", + }, + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Namespace: "fake-ns", + Name: "demo-app", + ResourceVersion: "22222", + }, + RequiredBy: []workv1alpha2.BindingSnapshot{ + { + Namespace: "default-1", + Name: "default-binding-1", + Clusters: []workv1alpha2.TargetCluster{ + { + Name: "member1", + Replicas: 2, + }, + }, + }, + }, + }, + }, + }, + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + d := &DependenciesDistributor{ + Client: tt.fields.Client, + } + err := d.handleIndependentBindingDeletion(tt.args.id, tt.args.namespace, tt.args.name) + if (err != nil) != tt.wantErr { + t.Errorf("handleIndependentBindingDeletion() error = %v, wantErr %v", err, tt.wantErr) + } + + existBindings := &workv1alpha2.ResourceBindingList{} + err = d.Client.List(context.TODO(), existBindings) + if (err != nil) != tt.wantErr { + t.Errorf("handleIndependentBindingDeletion(), Client.List() error = %v, wantErr %v", err, tt.wantErr) + } + if !reflect.DeepEqual(existBindings, tt.wantBindings) { + t.Errorf("handleIndependentBindingDeletion(), Client.List() = %v, want %v", existBindings, tt.wantBindings) + } + }) + } +} + +func Test_removeOrphanAttachedBindings(t *testing.T) { + type fields struct { + Client client.Client + DynamicClient dynamic.Interface + InformerManager genericmanager.SingleClusterInformerManager + RESTMapper meta.RESTMapper + } + type args struct { + independentBinding *workv1alpha2.ResourceBinding + dependencies []configv1alpha1.DependentObjectReference + } + tests := []struct { + name string + fields fields + args args + wantBindings *workv1alpha2.ResourceBindingList + wantErr bool + }{ + { + name: "remove orphan attached bindings", + fields: fields{ + Client: func() client.Client { + Scheme := runtime.NewScheme() + utilruntime.Must(scheme.AddToScheme(Scheme)) + utilruntime.Must(workv1alpha2.Install(Scheme)) + rb := &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding-1", + Namespace: "test", + ResourceVersion: "1000", + Labels: map[string]string{ + "resourcebinding.karmada.io/depended-by-5dbb6dc9c8": "93162d3c-ee8e-4995-9034-05f4d5d2c2b9", + }, + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "v1", + Kind: "Pod", + Namespace: "default", + Name: "pod", + ResourceVersion: "22222", + }, + }, + } + return fake.NewClientBuilder().WithScheme(Scheme).WithObjects(rb).Build() + }(), + DynamicClient: dynamicfake.NewSimpleDynamicClient(scheme.Scheme), + InformerManager: func() genericmanager.SingleClusterInformerManager { + c := dynamicfake.NewSimpleDynamicClient(scheme.Scheme, + &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod", Namespace: "default", Labels: map[string]string{"resourcebinding.karmada.io/depended-by-5dbb6dc9c8": "93162d3c-ee8e-4995-9034-05f4d5d2c2b9"}}}) + m := genericmanager.NewSingleClusterInformerManager(c, 0, context.TODO().Done()) + m.Lister(corev1.SchemeGroupVersion.WithResource("pods")) + m.Start() + m.WaitForCacheSync() + return m + }(), + RESTMapper: func() meta.RESTMapper { + m := meta.NewDefaultRESTMapper([]schema.GroupVersion{corev1.SchemeGroupVersion}) + m.Add(corev1.SchemeGroupVersion.WithKind("Pod"), meta.RESTScopeNamespace) + return m + }(), + }, + args: args{ + independentBinding: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + Namespace: "test", + ResourceVersion: "1000", + Labels: map[string]string{ + workv1alpha2.ResourceBindingPermanentIDLabel: "93162d3c-ee8e-4995-9034-05f4d5d2c2b9", + }, + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "v1", + Kind: "Pod", + Namespace: "default", + Name: "pod", + ResourceVersion: "22222", + }, + }, + }, + dependencies: []configv1alpha1.DependentObjectReference{ + { + APIVersion: "v1", + Kind: "Pod", + Namespace: "test", + Name: "pod-test", + }, + }, + }, + wantBindings: &workv1alpha2.ResourceBindingList{ + Items: []workv1alpha2.ResourceBinding{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding-1", + Namespace: "test", + ResourceVersion: "1001", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "v1", + Kind: "Pod", + Namespace: "default", + Name: "pod", + ResourceVersion: "22222", + }, + }, + }, + }, + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + d := &DependenciesDistributor{ + Client: tt.fields.Client, + DynamicClient: tt.fields.DynamicClient, + InformerManager: tt.fields.InformerManager, + RESTMapper: tt.fields.RESTMapper, + } + err := d.removeOrphanAttachedBindings(context.Background(), tt.args.independentBinding, tt.args.dependencies) + if (err != nil) != tt.wantErr { + t.Errorf("removeOrphanAttachedBindings() error = %v, wantErr %v", err, tt.wantErr) + } + + existBindings := &workv1alpha2.ResourceBindingList{} + err = d.Client.List(context.TODO(), existBindings) + if (err != nil) != tt.wantErr { + t.Errorf("removeOrphanAttachedBindings(), Client.List() error = %v, wantErr %v", err, tt.wantErr) + } + if !reflect.DeepEqual(existBindings, tt.wantBindings) { + t.Errorf("removeOrphanAttachedBindings(), Client.List() = %v, want %v", existBindings, tt.wantBindings) + } + }) + } +} + +func Test_handleDependentResource(t *testing.T) { + type fields struct { + Client client.Client + DynamicClient dynamic.Interface + InformerManager genericmanager.SingleClusterInformerManager + RESTMapper meta.RESTMapper + } + type args struct { + independentBinding *workv1alpha2.ResourceBinding + dependencies configv1alpha1.DependentObjectReference + } + tests := []struct { + name string + fields fields + args args + wantBinding *workv1alpha2.ResourceBinding + wantErr bool + }{ + { + name: "nil label selector, non-empty name", + fields: fields{ + Client: func() client.Client { + Scheme := runtime.NewScheme() + utilruntime.Must(scheme.AddToScheme(Scheme)) + utilruntime.Must(workv1alpha2.Install(Scheme)) + rb := &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + Namespace: "test", + ResourceVersion: "1000", + Labels: map[string]string{ + "resourcebinding.karmada.io/depended-by-5dbb6dc9c8": "93162d3c-ee8e-4995-9034-05f4d5d2c2b9", + }, + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "v1", + Kind: "Pod", + Namespace: "default", + Name: "pod", + ResourceVersion: "22222", + UID: types.UID("db56a4a6-0dff-465a-b046-2c1dea42a42b"), + }, + Clusters: []workv1alpha2.TargetCluster{ + { + Name: "member1", + Replicas: 2, + }, + }, + }, + } + return fake.NewClientBuilder().WithScheme(Scheme).WithObjects(rb).Build() + }(), + DynamicClient: dynamicfake.NewSimpleDynamicClient(scheme.Scheme), + InformerManager: func() genericmanager.SingleClusterInformerManager { + c := dynamicfake.NewSimpleDynamicClient(scheme.Scheme, + &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod", Namespace: "default", Labels: map[string]string{"resourcebinding.karmada.io/depended-by-5dbb6dc9c8": "93162d3c-ee8e-4995-9034-05f4d5d2c2b9"}}}) + m := genericmanager.NewSingleClusterInformerManager(c, 0, context.TODO().Done()) + m.Lister(corev1.SchemeGroupVersion.WithResource("pods")) + m.Start() + m.WaitForCacheSync() + return m + }(), + RESTMapper: func() meta.RESTMapper { + m := meta.NewDefaultRESTMapper([]schema.GroupVersion{corev1.SchemeGroupVersion}) + m.Add(corev1.SchemeGroupVersion.WithKind("Pod"), meta.RESTScopeNamespace) + return m + }(), + }, + args: args{ + independentBinding: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + Namespace: "test", + Labels: map[string]string{workv1alpha2.ResourceBindingPermanentIDLabel: "93162d3c-ee8e-4995-9034-05f4d5d2c2b9"}, + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "v1", + Kind: "Pod", + Namespace: "default", + Name: "pod", + ResourceVersion: "22222", + UID: types.UID("db56a4a6-0dff-465a-b046-2c1dea42a42b"), + }, + Clusters: []workv1alpha2.TargetCluster{ + { + Name: "member1", + Replicas: 2, + }, + }, + }, + }, + dependencies: configv1alpha1.DependentObjectReference{ + APIVersion: "v1", + Kind: "Pod", + Namespace: "default", + Name: "pod", + }, + }, + wantBinding: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + Namespace: "test", + ResourceVersion: "1000", + Labels: map[string]string{"resourcebinding.karmada.io/depended-by-5dbb6dc9c8": "93162d3c-ee8e-4995-9034-05f4d5d2c2b9"}, + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "v1", + Kind: "Pod", + Namespace: "default", + Name: "pod", + ResourceVersion: "22222", + UID: types.UID("db56a4a6-0dff-465a-b046-2c1dea42a42b"), + }, + Clusters: []workv1alpha2.TargetCluster{ + { + Name: "member1", + Replicas: 2, + }, + }, + }, + }, + wantErr: false, + }, + { + name: "empty name, non-nil label selector", + fields: fields{ + Client: func() client.Client { + Scheme := runtime.NewScheme() + utilruntime.Must(scheme.AddToScheme(Scheme)) + utilruntime.Must(workv1alpha2.Install(Scheme)) + rb := &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + Namespace: "test", + ResourceVersion: "1000", + Labels: map[string]string{ + "resourcebinding.karmada.io/depended-by-5dbb6dc9c8": "93162d3c-ee8e-4995-9034-05f4d5d2c2b9", + }, + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "v1", + Kind: "Pod", + Namespace: "default", + Name: "pod", + ResourceVersion: "22222", + UID: types.UID("db56a4a6-0dff-465a-b046-2c1dea42a42b"), + }, + Clusters: []workv1alpha2.TargetCluster{ + { + Name: "member1", + Replicas: 2, + }, + }, + }, + } + return fake.NewClientBuilder().WithScheme(Scheme).WithObjects(rb).Build() + }(), + DynamicClient: dynamicfake.NewSimpleDynamicClient(scheme.Scheme), + InformerManager: func() genericmanager.SingleClusterInformerManager { + c := dynamicfake.NewSimpleDynamicClient(scheme.Scheme, + &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod", Namespace: "default", Labels: map[string]string{"resourcebinding.karmada.io/depended-by-5dbb6dc9c8": "93162d3c-ee8e-4995-9034-05f4d5d2c2b9"}}}) + m := genericmanager.NewSingleClusterInformerManager(c, 0, context.TODO().Done()) + m.Lister(corev1.SchemeGroupVersion.WithResource("pods")) + m.Start() + m.WaitForCacheSync() + return m + }(), + RESTMapper: func() meta.RESTMapper { + m := meta.NewDefaultRESTMapper([]schema.GroupVersion{corev1.SchemeGroupVersion}) + m.Add(corev1.SchemeGroupVersion.WithKind("Pod"), meta.RESTScopeNamespace) + return m + }(), + }, + args: args{ + independentBinding: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + Namespace: "test", + Labels: map[string]string{workv1alpha2.ResourceBindingPermanentIDLabel: "93162d3c-ee8e-4995-9034-05f4d5d2c2b9"}, + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "v1", + Kind: "Pod", + Namespace: "default", + Name: "pod", + ResourceVersion: "22222", + UID: types.UID("db56a4a6-0dff-465a-b046-2c1dea42a42b"), + }, + Clusters: []workv1alpha2.TargetCluster{ + { + Name: "member1", + Replicas: 2, + }, + }, + }, + }, + dependencies: configv1alpha1.DependentObjectReference{ + APIVersion: "v1", + Kind: "Pod", + Namespace: "default", + LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"resourcebinding.karmada.io/depended-by-5dbb6dc9c8": "93162d3c-ee8e-4995-9034-05f4d5d2c2b9"}}, + }, + }, + wantBinding: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + Namespace: "test", + ResourceVersion: "1000", + Labels: map[string]string{"resourcebinding.karmada.io/depended-by-5dbb6dc9c8": "93162d3c-ee8e-4995-9034-05f4d5d2c2b9"}, + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "v1", + Kind: "Pod", + Namespace: "default", + Name: "pod", + ResourceVersion: "22222", + UID: types.UID("db56a4a6-0dff-465a-b046-2c1dea42a42b"), + }, + Clusters: []workv1alpha2.TargetCluster{ + { + Name: "member1", + Replicas: 2, + }, + }, + }, + }, + wantErr: false, + }, + { + name: "nil label selector, empty name", + fields: fields{ + Client: func() client.Client { + return fake.NewClientBuilder().Build() + }(), + DynamicClient: dynamicfake.NewSimpleDynamicClient(scheme.Scheme), + InformerManager: func() genericmanager.SingleClusterInformerManager { + c := dynamicfake.NewSimpleDynamicClient(scheme.Scheme, + &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod", Namespace: "default", Labels: map[string]string{"resourcebinding.karmada.io/depended-by-5dbb6dc9c8": "93162d3c-ee8e-4995-9034-05f4d5d2c2b9"}}}) + m := genericmanager.NewSingleClusterInformerManager(c, 0, context.TODO().Done()) + m.Lister(corev1.SchemeGroupVersion.WithResource("pods")) + m.Start() + m.WaitForCacheSync() + return m + }(), + RESTMapper: func() meta.RESTMapper { + m := meta.NewDefaultRESTMapper([]schema.GroupVersion{corev1.SchemeGroupVersion}) + m.Add(corev1.SchemeGroupVersion.WithKind("Pod"), meta.RESTScopeNamespace) + return m + }(), + }, + args: args{ + independentBinding: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + Namespace: "test", + Labels: map[string]string{workv1alpha2.ResourceBindingPermanentIDLabel: "93162d3c-ee8e-4995-9034-05f4d5d2c2b9"}, + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Clusters: []workv1alpha2.TargetCluster{ + { + Name: "member1", + Replicas: 2, + }, + }, + }, + }, + dependencies: configv1alpha1.DependentObjectReference{ + APIVersion: "v1", + Kind: "Pod", + Namespace: "default", + }, + }, + wantBinding: &workv1alpha2.ResourceBinding{}, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + d := &DependenciesDistributor{ + Client: tt.fields.Client, + DynamicClient: tt.fields.DynamicClient, + InformerManager: tt.fields.InformerManager, + RESTMapper: tt.fields.RESTMapper, + } + err := d.handleDependentResource(context.Background(), tt.args.independentBinding, tt.args.dependencies) + if (err != nil) != tt.wantErr { + t.Errorf("handleDependentResource() error = %v, wantErr %v", err, tt.wantErr) + } + + existBinding := &workv1alpha2.ResourceBinding{} + bindingKey := client.ObjectKeyFromObject(tt.args.independentBinding) + err = d.Client.Get(context.TODO(), bindingKey, existBinding) + if (err != nil) != tt.wantErr { + t.Errorf("handleDependentResource(), Client.Get() error = %v, wantErr %v", err, tt.wantErr) + } + if !reflect.DeepEqual(existBinding, tt.wantBinding) { + t.Errorf("handleDependentResource(), Client.Get() = %v, want %v", existBinding, tt.wantBinding) + } + }) + } +} + +func Test_recordDependencies(t *testing.T) { + type fields struct { + Client client.Client + } + type args struct { + independentBinding *workv1alpha2.ResourceBinding + dependencies []configv1alpha1.DependentObjectReference + } + tests := []struct { + name string + fields fields + args args + want *workv1alpha2.ResourceBinding + wantErr bool + }{ + { + name: "record updated dependencies", + fields: fields{ + Client: func() client.Client { + Scheme := runtime.NewScheme() + utilruntime.Must(scheme.AddToScheme(Scheme)) + utilruntime.Must(workv1alpha2.Install(Scheme)) + rb := &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + Namespace: "test", + ResourceVersion: "1000", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "v1", + Kind: "Pod", + Namespace: "default", + Name: "pod", + ResourceVersion: "22222", + }, + }, + } + return fake.NewClientBuilder().WithScheme(Scheme).WithObjects(rb).Build() + }(), + }, + args: args{ + independentBinding: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + Namespace: "test", + ResourceVersion: "1000", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "v1", + Kind: "Pod", + Namespace: "default", + Name: "pod", + ResourceVersion: "22222", + }, + }, + }, + dependencies: []configv1alpha1.DependentObjectReference{ + { + APIVersion: "v1", + Kind: "Pod", + Namespace: "default", + Name: "pod", + }, + }, + }, + want: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + Namespace: "test", + ResourceVersion: "1001", + Annotations: map[string]string{ + dependenciesAnnotationKey: "[{\"apiVersion\":\"v1\",\"kind\":\"Pod\",\"namespace\":\"default\",\"name\":\"pod\"}]", + }, + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "v1", + Kind: "Pod", + Namespace: "default", + Name: "pod", + ResourceVersion: "22222", + }, + }, + }, + wantErr: false, + }, + { + name: "no need to record non-updated dependencies", + fields: fields{ + Client: func() client.Client { + Scheme := runtime.NewScheme() + utilruntime.Must(scheme.AddToScheme(Scheme)) + utilruntime.Must(workv1alpha2.Install(Scheme)) + rb := &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + Namespace: "test", + ResourceVersion: "1000", + Annotations: map[string]string{ + dependenciesAnnotationKey: "[{\"apiVersion\":\"v1\",\"kind\":\"Pod\",\"namespace\":\"default\",\"name\":\"pod\"}]", + }, + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "v1", + Kind: "Pod", + Namespace: "default", + Name: "pod", + ResourceVersion: "22222", + }, + }, + } + return fake.NewClientBuilder().WithScheme(Scheme).WithObjects(rb).Build() + }(), + }, + args: args{ + independentBinding: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + Namespace: "test", + ResourceVersion: "1000", + Annotations: map[string]string{ + dependenciesAnnotationKey: "[{\"apiVersion\":\"v1\",\"kind\":\"Pod\",\"namespace\":\"default\",\"name\":\"pod\"}]", + }, + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "v1", + Kind: "Pod", + Namespace: "default", + Name: "pod", + ResourceVersion: "22222", + }, + }, + }, + dependencies: []configv1alpha1.DependentObjectReference{ + { + APIVersion: "v1", + Kind: "Pod", + Namespace: "default", + Name: "pod", + }, + }, + }, + want: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + Namespace: "test", + ResourceVersion: "1000", + Annotations: map[string]string{ + dependenciesAnnotationKey: "[{\"apiVersion\":\"v1\",\"kind\":\"Pod\",\"namespace\":\"default\",\"name\":\"pod\"}]", + }, + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "v1", + Kind: "Pod", + Namespace: "default", + Name: "pod", + ResourceVersion: "22222", + }, + }, + }, + wantErr: false, + }, + { + name: "non-matching independent binding", + fields: fields{ + Client: func() client.Client { + Scheme := runtime.NewScheme() + utilruntime.Must(scheme.AddToScheme(Scheme)) + utilruntime.Must(workv1alpha2.Install(Scheme)) + rb := &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + Namespace: "test", + ResourceVersion: "1000", + Annotations: map[string]string{ + dependenciesAnnotationKey: "[{\"apiVersion\":\"v1\",\"kind\":\"Pod\",\"namespace\":\"default\",\"name\":\"pod\"}]", + }, + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "v1", + Kind: "Pod", + Namespace: "default", + Name: "pod", + ResourceVersion: "22222", + }, + }, + } + return fake.NewClientBuilder().WithScheme(Scheme).WithObjects(rb).Build() + }(), + }, + args: args{ + independentBinding: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + Namespace: "test", + ResourceVersion: "999", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "v1", + Kind: "Pod", + Namespace: "default", + Name: "pod", + ResourceVersion: "22222", + }, + }, + }, + dependencies: []configv1alpha1.DependentObjectReference{ + { + APIVersion: "v1", + Kind: "Pod", + Namespace: "default", + Name: "pod", + }, + }, + }, + want: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + Namespace: "test", + ResourceVersion: "1001", + Annotations: map[string]string{ + dependenciesAnnotationKey: "[{\"apiVersion\":\"v1\",\"kind\":\"Pod\",\"namespace\":\"default\",\"name\":\"pod\"}]", + }, + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "v1", + Kind: "Pod", + Namespace: "default", + Name: "pod", + ResourceVersion: "22222", + }, + }, + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + d := &DependenciesDistributor{ + Client: tt.fields.Client, + } + err := d.recordDependencies(context.Background(), tt.args.independentBinding, tt.args.dependencies) + if (err != nil) != tt.wantErr { + t.Errorf("recordDependencies() error = %v, wantErr %v", err, tt.wantErr) + } + + bindingKey := client.ObjectKey{Namespace: tt.args.independentBinding.Namespace, Name: tt.args.independentBinding.Name} + got := &workv1alpha2.ResourceBinding{} + err = d.Client.Get(context.Background(), bindingKey, got) + if (err != nil) != tt.wantErr { + t.Errorf("Client.Get() error = %v, wantErr %v", err, tt.wantErr) + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("Client.Get() got = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_findOrphanAttachedBindings(t *testing.T) { + type fields struct { + Client client.Client + DynamicClient dynamic.Interface + InformerManager genericmanager.SingleClusterInformerManager + RESTMapper meta.RESTMapper + } + type args struct { + independentBinding *workv1alpha2.ResourceBinding + dependencies []configv1alpha1.DependentObjectReference + } + tests := []struct { + name string + fields fields + args args + want []*workv1alpha2.ResourceBinding + wantErr bool + }{ + { + name: "find orphan attached bindings - matching dependency", + fields: fields{ + Client: func() client.Client { + Scheme := runtime.NewScheme() + utilruntime.Must(scheme.AddToScheme(Scheme)) + utilruntime.Must(workv1alpha2.Install(Scheme)) + rb := &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding-1", + Namespace: "test", + ResourceVersion: "1000", + Labels: map[string]string{ + "resourcebinding.karmada.io/depended-by-5dbb6dc9c8": "93162d3c-ee8e-4995-9034-05f4d5d2c2b9", + }, + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "v1", + Kind: "Pod", + Namespace: "default", + Name: "pod", + ResourceVersion: "22222", + }, + }, + } + return fake.NewClientBuilder().WithScheme(Scheme).WithObjects(rb).Build() + }(), + DynamicClient: dynamicfake.NewSimpleDynamicClient(scheme.Scheme), + InformerManager: func() genericmanager.SingleClusterInformerManager { + c := dynamicfake.NewSimpleDynamicClient(scheme.Scheme, + &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod", Namespace: "default", Labels: map[string]string{"resourcebinding.karmada.io/depended-by-5dbb6dc9c8": "93162d3c-ee8e-4995-9034-05f4d5d2c2b9"}}}) + m := genericmanager.NewSingleClusterInformerManager(c, 0, context.TODO().Done()) + m.Lister(corev1.SchemeGroupVersion.WithResource("pods")) + m.Start() + m.WaitForCacheSync() + return m + }(), + RESTMapper: func() meta.RESTMapper { + m := meta.NewDefaultRESTMapper([]schema.GroupVersion{corev1.SchemeGroupVersion}) + m.Add(corev1.SchemeGroupVersion.WithKind("Pod"), meta.RESTScopeNamespace) + return m + }(), + }, + args: args{ + independentBinding: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + Namespace: "test", + ResourceVersion: "1000", + Labels: map[string]string{ + workv1alpha2.ResourceBindingPermanentIDLabel: "93162d3c-ee8e-4995-9034-05f4d5d2c2b9", + }, + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "v1", + Kind: "Pod", + Namespace: "default", + Name: "pod", + ResourceVersion: "22222", + }, + }, + }, + dependencies: []configv1alpha1.DependentObjectReference{ + { + APIVersion: "v1", + Kind: "Pod", + Namespace: "default", + Name: "pod-test", + }, + }, + }, + want: []*workv1alpha2.ResourceBinding{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding-1", + Namespace: "test", + ResourceVersion: "1000", + Labels: map[string]string{ + "resourcebinding.karmada.io/depended-by-5dbb6dc9c8": "93162d3c-ee8e-4995-9034-05f4d5d2c2b9", + }, + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "v1", + Kind: "Pod", + Namespace: "default", + Name: "pod", + ResourceVersion: "22222", + }, + }, + }, + }, + wantErr: false, }, { - name: "test labels", + name: "find orphan attached bindings - non matching dependency", + fields: fields{ + Client: func() client.Client { + Scheme := runtime.NewScheme() + utilruntime.Must(scheme.AddToScheme(Scheme)) + utilruntime.Must(workv1alpha2.Install(Scheme)) + rb := &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding-1", + Namespace: "test", + ResourceVersion: "1000", + Labels: map[string]string{ + "resourcebinding.karmada.io/depended-by-5dbb6dc9c8": "93162d3c-ee8e-4995-9034-05f4d5d2c2b9", + }, + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "v1", + Kind: "Pod", + Namespace: "default", + Name: "pod", + ResourceVersion: "22222", + }, + }, + } + + return fake.NewClientBuilder().WithScheme(Scheme).WithObjects(rb).Build() + }(), + DynamicClient: dynamicfake.NewSimpleDynamicClient(scheme.Scheme), + InformerManager: func() genericmanager.SingleClusterInformerManager { + c := dynamicfake.NewSimpleDynamicClient(scheme.Scheme, + &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod", Namespace: "default", Labels: map[string]string{"resourcebinding.karmada.io/depended-by-5dbb6dc9c8": "93162d3c-ee8e-4995-9034-05f4d5d2c2b9"}}}) + m := genericmanager.NewSingleClusterInformerManager(c, 0, context.TODO().Done()) + m.Lister(corev1.SchemeGroupVersion.WithResource("pods")) + m.Start() + m.WaitForCacheSync() + return m + }(), + RESTMapper: func() meta.RESTMapper { + m := meta.NewDefaultRESTMapper([]schema.GroupVersion{corev1.SchemeGroupVersion}) + m.Add(corev1.SchemeGroupVersion.WithKind("Pod"), meta.RESTScopeNamespace) + return m + }(), + }, args: args{ - objectKey: &LabelsKey{ - ClusterWideKey: keys.ClusterWideKey{ - Group: "", - Version: "v1", - Kind: "ConfigMap", - Namespace: "test", + independentBinding: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + Namespace: "test", + ResourceVersion: "1000", + Labels: map[string]string{ + workv1alpha2.ResourceBindingPermanentIDLabel: "93162d3c-ee8e-4995-9034-05f4d5d2c2b9", + }, }, - Labels: map[string]string{ - "app": "test", + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "v1", + Kind: "Pod", + Namespace: "default", + Name: "pod", + ResourceVersion: "22222", + }, }, }, - referenceBinding: &workv1alpha2.ResourceBinding{ - ObjectMeta: metav1.ObjectMeta{Annotations: map[string]string{ - dependenciesAnnotationKey: "[{\"apiVersion\":\"v1\",\"kind\":\"ConfigMap\",\"namespace\":\"test\",\"labelSelector\":{\"matchExpressions\":[{\"key\":\"app\",\"operator\":\"In\",\"values\":[\"test\"]}]}}]", - }}, + dependencies: []configv1alpha1.DependentObjectReference{ + { + APIVersion: "v1", + Kind: "Pod", + Namespace: "test", + Name: "pod-test", + }, }, }, - want: true, + want: []*workv1alpha2.ResourceBinding{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding-1", + Namespace: "test", + ResourceVersion: "1000", + Labels: map[string]string{ + "resourcebinding.karmada.io/depended-by-5dbb6dc9c8": "93162d3c-ee8e-4995-9034-05f4d5d2c2b9", + }, + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "v1", + Kind: "Pod", + Namespace: "default", + Name: "pod", + ResourceVersion: "22222", + }, + }, + }, + }, + wantErr: false, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := matchesWithBindingDependencies(tt.args.objectKey, tt.args.referenceBinding) - if got != tt.want { - t.Errorf("matchesWithBindingDependencies() got = %v, want %v", got, tt.want) + d := &DependenciesDistributor{ + Client: tt.fields.Client, + DynamicClient: tt.fields.DynamicClient, + InformerManager: tt.fields.InformerManager, + RESTMapper: tt.fields.RESTMapper, + } + got, err := d.findOrphanAttachedBindings(context.Background(), tt.args.independentBinding, tt.args.dependencies) + if (err != nil) != tt.wantErr { + t.Errorf("findOrphanAttachedBindings() error = %v, wantErr %v", err, tt.wantErr) + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("findOrphanAttachedBindings() got = %v, want %v", got, tt.want) } }) } @@ -995,6 +2487,175 @@ func Test_createOrUpdateAttachedBinding(t *testing.T) { return fake.NewClientBuilder().WithScheme(Scheme).Build() }, }, + { + name: "update attached binding with ConflictResolution", + attachedBinding: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + Namespace: "test", + ResourceVersion: "1000", + Labels: map[string]string{"app": "nginx"}, + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Namespace: "fake-ns", + Name: "demo-app", + ResourceVersion: "22222", + }, + RequiredBy: []workv1alpha2.BindingSnapshot{ + { + Namespace: "test-1", + Name: "test-binding-1", + Clusters: []workv1alpha2.TargetCluster{ + { + Name: "foo", + Replicas: 1, + }, + }, + }, + { + Namespace: "default-2", + Name: "default-binding-2", + Clusters: []workv1alpha2.TargetCluster{ + { + Name: "member2", + Replicas: 4, + }, + }, + }, + { + Namespace: "test-2", + Name: "test-binding-2", + Clusters: []workv1alpha2.TargetCluster{ + { + Name: "bar", + Replicas: 1, + }, + }, + }, + }, + ConflictResolution: policyv1alpha1.ConflictOverwrite, + }, + }, + wantErr: false, + wantBinding: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + Namespace: "test", + ResourceVersion: "1001", + Labels: map[string]string{"app": "nginx", "foo": "bar"}, + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Namespace: "fake-ns", + Name: "demo-app", + ResourceVersion: "22222", + }, + RequiredBy: []workv1alpha2.BindingSnapshot{ + { + Namespace: "default-1", + Name: "default-binding-1", + Clusters: []workv1alpha2.TargetCluster{ + { + Name: "member1", + Replicas: 2, + }, + }, + }, + { + Namespace: "default-2", + Name: "default-binding-2", + Clusters: []workv1alpha2.TargetCluster{ + { + Name: "member2", + Replicas: 4, + }, + }, + }, + { + Namespace: "default-3", + Name: "default-binding-3", + Clusters: []workv1alpha2.TargetCluster{ + { + Name: "member3", + Replicas: 4, + }, + }, + }, + { + Namespace: "test-1", + Name: "test-binding-1", + Clusters: []workv1alpha2.TargetCluster{ + { + Name: "foo", + Replicas: 1, + }, + }, + }, + { + Namespace: "test-2", + Name: "test-binding-2", + Clusters: []workv1alpha2.TargetCluster{ + { + Name: "bar", + Replicas: 1, + }, + }, + }, + }, + ConflictResolution: policyv1alpha1.ConflictOverwrite, + }, + }, + setupClient: func() client.Client { + rb := &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + Namespace: "test", + ResourceVersion: "1000", + Labels: map[string]string{"foo": "bar"}, + }, + Spec: workv1alpha2.ResourceBindingSpec{ + RequiredBy: []workv1alpha2.BindingSnapshot{ + { + Namespace: "default-1", + Name: "default-binding-1", + Clusters: []workv1alpha2.TargetCluster{ + { + Name: "member1", + Replicas: 2, + }, + }, + }, + { + Namespace: "default-2", + Name: "default-binding-2", + Clusters: []workv1alpha2.TargetCluster{ + { + Name: "member2", + Replicas: 3, + }, + }, + }, + { + Namespace: "default-3", + Name: "default-binding-3", + Clusters: []workv1alpha2.TargetCluster{ + { + Name: "member3", + Replicas: 4, + }, + }, + }, + }, + }, + } + return fake.NewClientBuilder().WithScheme(Scheme).WithObjects(rb).Build() + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/pkg/descheduler/core/filter_test.go b/pkg/descheduler/core/filter_test.go index 4f9c592168e2..a1e0b1ae3d35 100644 --- a/pkg/descheduler/core/filter_test.go +++ b/pkg/descheduler/core/filter_test.go @@ -58,6 +58,21 @@ func TestFilterBindings(t *testing.T) { }, expected: 2, }, + { + name: "Invalid placement annotation", + bindings: []*workv1alpha2.ResourceBinding{ + createBindingWithInvalidPlacementAnnotation("binding1", "apps/v1", "Deployment"), + }, + expected: 0, + }, + { + name: "Mix of valid and invalid annotations", + bindings: []*workv1alpha2.ResourceBinding{ + createBindingWithInvalidPlacementAnnotation("binding1", "apps/v1", "Deployment"), + createBinding("binding2", "apps/v1", "Deployment", createValidPlacement()), + }, + expected: 1, + }, } for _, tt := range tests { @@ -108,6 +123,30 @@ func TestValidateGVK(t *testing.T) { }, expected: false, }, + { + name: "Empty APIVersion", + reference: &workv1alpha2.ObjectReference{ + APIVersion: "", + Kind: "Deployment", + }, + expected: false, + }, + { + name: "Empty Kind", + reference: &workv1alpha2.ObjectReference{ + APIVersion: "apps/v1", + Kind: "", + }, + expected: false, + }, + { + name: "Case-sensitive check", + reference: &workv1alpha2.ObjectReference{ + APIVersion: "apps/v1", + Kind: "deployment", + }, + expected: false, + }, } for _, tt := range tests { @@ -151,6 +190,30 @@ func TestValidatePlacement(t *testing.T) { binding: createBinding("binding5", "apps/v1", "Deployment", createPlacement(policyv1alpha1.ReplicaSchedulingTypeDivided, policyv1alpha1.ReplicaDivisionPreferenceWeighted, &policyv1alpha1.ClusterPreferences{DynamicWeight: policyv1alpha1.DynamicWeightByAvailableReplicas})), expected: true, }, + { + name: "Invalid JSON in placement annotation", + binding: createBindingWithInvalidPlacementAnnotation("binding6", "apps/v1", "Deployment"), + expected: false, + }, + { + name: "Valid JSON but invalid placement structure", + binding: func() *workv1alpha2.ResourceBinding { + b := createBinding("binding7", "apps/v1", "Deployment", nil) + b.Annotations = map[string]string{util.PolicyPlacementAnnotation: `{"invalidField": "value"}`} + return b + }(), + expected: false, + }, + { + name: "Nil ReplicaScheduling", + binding: func() *workv1alpha2.ResourceBinding { + p := &policyv1alpha1.Placement{ + ReplicaScheduling: nil, + } + return createBinding("binding8", "apps/v1", "Deployment", p) + }(), + expected: false, + }, } for _, tt := range tests { @@ -163,6 +226,12 @@ func TestValidatePlacement(t *testing.T) { } } +func createBindingWithInvalidPlacementAnnotation(name, apiVersion, kind string) *workv1alpha2.ResourceBinding { + binding := createBinding(name, apiVersion, kind, nil) + binding.Annotations = map[string]string{util.PolicyPlacementAnnotation: "invalid json"} + return binding +} + func createBinding(name, apiVersion, kind string, placement *policyv1alpha1.Placement) *workv1alpha2.ResourceBinding { binding := &workv1alpha2.ResourceBinding{ ObjectMeta: metav1.ObjectMeta{ diff --git a/pkg/descheduler/core/helper_test.go b/pkg/descheduler/core/helper_test.go index a0f7fda5ee1e..154006bdaa84 100644 --- a/pkg/descheduler/core/helper_test.go +++ b/pkg/descheduler/core/helper_test.go @@ -18,6 +18,7 @@ package core import ( "context" + "fmt" "reflect" "testing" "time" @@ -30,143 +31,336 @@ import ( ) func TestNewSchedulingResultHelper(t *testing.T) { - binding := &workv1alpha2.ResourceBinding{ - Spec: workv1alpha2.ResourceBindingSpec{ - Clusters: []workv1alpha2.TargetCluster{ - {Name: "cluster1", Replicas: 3}, - {Name: "cluster2", Replicas: 2}, + tests := []struct { + name string + binding *workv1alpha2.ResourceBinding + expected *SchedulingResultHelper + }{ + { + name: "Valid binding with ready replicas", + binding: &workv1alpha2.ResourceBinding{ + Spec: workv1alpha2.ResourceBindingSpec{ + Clusters: []workv1alpha2.TargetCluster{ + {Name: "cluster1", Replicas: 3}, + {Name: "cluster2", Replicas: 2}, + }, + }, + Status: workv1alpha2.ResourceBindingStatus{ + AggregatedStatus: []workv1alpha2.AggregatedStatusItem{ + { + ClusterName: "cluster1", + Status: &runtime.RawExtension{Raw: []byte(`{"readyReplicas": 2}`)}, + }, + { + ClusterName: "cluster2", + Status: &runtime.RawExtension{Raw: []byte(`{"readyReplicas": 1}`)}, + }, + }, + }, + }, + expected: &SchedulingResultHelper{ + TargetClusters: []*TargetClusterWrapper{ + {ClusterName: "cluster1", Spec: 3, Ready: 2}, + {ClusterName: "cluster2", Spec: 2, Ready: 1}, + }, }, }, - Status: workv1alpha2.ResourceBindingStatus{ - AggregatedStatus: []workv1alpha2.AggregatedStatusItem{ - { - ClusterName: "cluster1", - Status: &runtime.RawExtension{Raw: []byte(`{"readyReplicas": 2}`)}, + { + name: "Binding with invalid status", + binding: &workv1alpha2.ResourceBinding{ + Spec: workv1alpha2.ResourceBindingSpec{ + Clusters: []workv1alpha2.TargetCluster{ + {Name: "cluster1", Replicas: 3}, + {Name: "cluster2", Replicas: 2}, + }, }, - { - ClusterName: "cluster2", - Status: &runtime.RawExtension{Raw: []byte(`{"readyReplicas": 1}`)}, + Status: workv1alpha2.ResourceBindingStatus{ + AggregatedStatus: []workv1alpha2.AggregatedStatusItem{ + { + ClusterName: "cluster1", + Status: &runtime.RawExtension{Raw: []byte(`{"readyReplicas": 2}`)}, + }, + { + ClusterName: "cluster2", + Status: &runtime.RawExtension{Raw: []byte(`invalid json`)}, + }, + }, + }, + }, + expected: &SchedulingResultHelper{ + TargetClusters: []*TargetClusterWrapper{ + {ClusterName: "cluster1", Spec: 3, Ready: 2}, + {ClusterName: "cluster2", Spec: 2, Ready: client.UnauthenticReplica}, }, }, }, } - helper := NewSchedulingResultHelper(binding) - - expected := &SchedulingResultHelper{ - ResourceBinding: binding, - TargetClusters: []*TargetClusterWrapper{ - {ClusterName: "cluster1", Spec: 3, Ready: 2}, - {ClusterName: "cluster2", Spec: 2, Ready: 1}, - }, - } - - if !reflect.DeepEqual(helper, expected) { - t.Errorf("NewSchedulingResultHelper() = %v, want %v", helper, expected) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + helper := NewSchedulingResultHelper(tt.binding) + helper.ResourceBinding = nil + if !reflect.DeepEqual(helper, tt.expected) { + t.Errorf("NewSchedulingResultHelper() = %v, want %v", helper, tt.expected) + } + }) } } -func TestSchedulingResultHelper_GetUndesiredClusters(t *testing.T) { - helper := &SchedulingResultHelper{ - TargetClusters: []*TargetClusterWrapper{ - {ClusterName: "cluster1", Spec: 3, Ready: 2}, - {ClusterName: "cluster2", Spec: 2, Ready: 2}, - {ClusterName: "cluster3", Spec: 4, Ready: 1}, +func TestSchedulingResultHelper_FillUnschedulableReplicas(t *testing.T) { + tests := []struct { + name string + helper *SchedulingResultHelper + mockEstimator *mockUnschedulableReplicaEstimator + expected []*TargetClusterWrapper + expectedErrLog string + }{ + { + name: "Successful fill", + helper: &SchedulingResultHelper{ + ResourceBinding: &workv1alpha2.ResourceBinding{ + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "test-deployment", + Namespace: "default", + }, + }, + }, + TargetClusters: []*TargetClusterWrapper{ + {ClusterName: "cluster1", Spec: 3, Ready: 2}, + {ClusterName: "cluster2", Spec: 2, Ready: 1}, + }, + }, + mockEstimator: &mockUnschedulableReplicaEstimator{}, + expected: []*TargetClusterWrapper{ + {ClusterName: "cluster1", Spec: 3, Ready: 2, Unschedulable: 1}, + {ClusterName: "cluster2", Spec: 2, Ready: 1, Unschedulable: 1}, + }, + }, + { + name: "Estimator error", + helper: &SchedulingResultHelper{ + ResourceBinding: &workv1alpha2.ResourceBinding{ + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "test-deployment", + Namespace: "default", + }, + }, + }, + TargetClusters: []*TargetClusterWrapper{ + {ClusterName: "cluster1", Spec: 3, Ready: 2}, + }, + }, + mockEstimator: &mockUnschedulableReplicaEstimator{shouldError: true}, + expected: []*TargetClusterWrapper{ + {ClusterName: "cluster1", Spec: 3, Ready: 2, Unschedulable: 0}, + }, + expectedErrLog: "Max cluster unschedulable replicas error: mock error", + }, + { + name: "UnauthenticReplica handling", + helper: &SchedulingResultHelper{ + ResourceBinding: &workv1alpha2.ResourceBinding{ + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "test-deployment", + Namespace: "default", + }, + }, + }, + TargetClusters: []*TargetClusterWrapper{ + {ClusterName: "cluster1", Spec: 3, Ready: 2}, + {ClusterName: "cluster2", Spec: 2, Ready: 1}, + }, + }, + mockEstimator: &mockUnschedulableReplicaEstimator{unauthenticCluster: "cluster2"}, + expected: []*TargetClusterWrapper{ + {ClusterName: "cluster1", Spec: 3, Ready: 2, Unschedulable: 1}, + {ClusterName: "cluster2", Spec: 2, Ready: 1, Unschedulable: 0}, + }, }, } - clusters, names := helper.GetUndesiredClusters() + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + client.GetUnschedulableReplicaEstimators()["mock"] = tt.mockEstimator + defer delete(client.GetUnschedulableReplicaEstimators(), "mock") - expectedClusters := []*TargetClusterWrapper{ - {ClusterName: "cluster1", Spec: 3, Ready: 2}, - {ClusterName: "cluster3", Spec: 4, Ready: 1}, - } - expectedNames := []string{"cluster1", "cluster3"} + tt.helper.FillUnschedulableReplicas(time.Minute) - if !reflect.DeepEqual(clusters, expectedClusters) { - t.Errorf("GetUndesiredClusters() clusters = %v, want %v", clusters, expectedClusters) - } - if !reflect.DeepEqual(names, expectedNames) { - t.Errorf("GetUndesiredClusters() names = %v, want %v", names, expectedNames) + if !reflect.DeepEqual(tt.helper.TargetClusters, tt.expected) { + t.Errorf("FillUnschedulableReplicas() = %v, want %v", tt.helper.TargetClusters, tt.expected) + } + }) } } -func TestSchedulingResultHelper_FillUnschedulableReplicas(t *testing.T) { - mockEstimator := &mockUnschedulableReplicaEstimator{} - client.GetUnschedulableReplicaEstimators()["mock"] = mockEstimator - defer delete(client.GetUnschedulableReplicaEstimators(), "mock") - - helper := &SchedulingResultHelper{ - ResourceBinding: &workv1alpha2.ResourceBinding{ - Spec: workv1alpha2.ResourceBindingSpec{ - Resource: workv1alpha2.ObjectReference{ - APIVersion: "apps/v1", - Kind: "Deployment", - Name: "test-deployment", - Namespace: "default", +func TestSchedulingResultHelper_GetUndesiredClusters(t *testing.T) { + tests := []struct { + name string + helper *SchedulingResultHelper + expectedClusters []*TargetClusterWrapper + expectedNames []string + }{ + { + name: "Mixed desired and undesired clusters", + helper: &SchedulingResultHelper{ + TargetClusters: []*TargetClusterWrapper{ + {ClusterName: "cluster1", Spec: 3, Ready: 2}, + {ClusterName: "cluster2", Spec: 2, Ready: 2}, + {ClusterName: "cluster3", Spec: 4, Ready: 1}, }, }, + expectedClusters: []*TargetClusterWrapper{ + {ClusterName: "cluster1", Spec: 3, Ready: 2}, + {ClusterName: "cluster3", Spec: 4, Ready: 1}, + }, + expectedNames: []string{"cluster1", "cluster3"}, }, - TargetClusters: []*TargetClusterWrapper{ - {ClusterName: "cluster1", Spec: 3, Ready: 2}, - {ClusterName: "cluster2", Spec: 2, Ready: 1}, + { + name: "All clusters desired", + helper: &SchedulingResultHelper{ + TargetClusters: []*TargetClusterWrapper{ + {ClusterName: "cluster1", Spec: 2, Ready: 2}, + {ClusterName: "cluster2", Spec: 3, Ready: 3}, + }, + }, + expectedClusters: nil, + expectedNames: nil, }, } - helper.FillUnschedulableReplicas(time.Minute) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + clusters, names := tt.helper.GetUndesiredClusters() - expected := []*TargetClusterWrapper{ - {ClusterName: "cluster1", Spec: 3, Ready: 2, Unschedulable: 1}, - {ClusterName: "cluster2", Spec: 2, Ready: 1, Unschedulable: 1}, - } - - if !reflect.DeepEqual(helper.TargetClusters, expected) { - t.Errorf("FillUnschedulableReplicas() = %v, want %v", helper.TargetClusters, expected) + if !reflect.DeepEqual(clusters, tt.expectedClusters) { + t.Errorf("GetUndesiredClusters() clusters = %v, want %v", clusters, tt.expectedClusters) + } + if !reflect.DeepEqual(names, tt.expectedNames) { + t.Errorf("GetUndesiredClusters() names = %v, want %v", names, tt.expectedNames) + } + }) } } func TestGetReadyReplicas(t *testing.T) { - binding := &workv1alpha2.ResourceBinding{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-binding", - Namespace: "default", + tests := []struct { + name string + binding *workv1alpha2.ResourceBinding + expected map[string]int32 + }{ + { + name: "Valid status with readyReplicas", + binding: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + Namespace: "default", + }, + Status: workv1alpha2.ResourceBindingStatus{ + AggregatedStatus: []workv1alpha2.AggregatedStatusItem{ + { + ClusterName: "cluster1", + Status: &runtime.RawExtension{Raw: []byte(`{"readyReplicas": 2}`)}, + }, + { + ClusterName: "cluster2", + Status: &runtime.RawExtension{Raw: []byte(`{"readyReplicas": 3}`)}, + }, + }, + }, + }, + expected: map[string]int32{ + "cluster1": 2, + "cluster2": 3, + }, }, - Status: workv1alpha2.ResourceBindingStatus{ - AggregatedStatus: []workv1alpha2.AggregatedStatusItem{ - { - ClusterName: "cluster1", - Status: &runtime.RawExtension{Raw: []byte(`{"readyReplicas": 2}`)}, + { + name: "Status with missing readyReplicas field", + binding: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + Namespace: "default", }, - { - ClusterName: "cluster2", - Status: &runtime.RawExtension{Raw: []byte(`{"readyReplicas": 3}`)}, + Status: workv1alpha2.ResourceBindingStatus{ + AggregatedStatus: []workv1alpha2.AggregatedStatusItem{ + { + ClusterName: "cluster1", + Status: &runtime.RawExtension{Raw: []byte(`{"readyReplicas": 2}`)}, + }, + { + ClusterName: "cluster2", + Status: &runtime.RawExtension{Raw: []byte(`{"someOtherField": 3}`)}, + }, + }, + }, + }, + expected: map[string]int32{ + "cluster1": 2, + }, + }, + { + name: "Status with invalid JSON", + binding: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + Namespace: "default", }, - { - ClusterName: "cluster3", - Status: &runtime.RawExtension{Raw: []byte(`{"someOtherField": 1}`)}, + Status: workv1alpha2.ResourceBindingStatus{ + AggregatedStatus: []workv1alpha2.AggregatedStatusItem{ + { + ClusterName: "cluster1", + Status: &runtime.RawExtension{Raw: []byte(`{"readyReplicas": 2}`)}, + }, + { + ClusterName: "cluster2", + Status: &runtime.RawExtension{Raw: []byte(`invalid json`)}, + }, + }, }, }, + expected: map[string]int32{ + "cluster1": 2, + }, }, } - result := getReadyReplicas(binding) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := getReadyReplicas(tt.binding) - expected := map[string]int32{ - "cluster1": 2, - "cluster2": 3, + if !reflect.DeepEqual(result, tt.expected) { + t.Errorf("getReadyReplicas() = %v, want %v", result, tt.expected) + } + }) } +} - if !reflect.DeepEqual(result, expected) { - t.Errorf("getReadyReplicas() = %v, want %v", result, expected) - } +// Mock implementation of UnschedulableReplicaEstimator +type mockUnschedulableReplicaEstimator struct { + shouldError bool + unauthenticCluster string } -// mockUnschedulableReplicaEstimator is a mock implementation of the UnschedulableReplicaEstimator interface -type mockUnschedulableReplicaEstimator struct{} +func (m *mockUnschedulableReplicaEstimator) GetUnschedulableReplicas(_ context.Context, clusters []string, _ *workv1alpha2.ObjectReference, _ time.Duration) ([]workv1alpha2.TargetCluster, error) { + if m.shouldError { + return nil, fmt.Errorf("mock error") + } -func (m *mockUnschedulableReplicaEstimator) GetUnschedulableReplicas(_ context.Context, _ []string, _ *workv1alpha2.ObjectReference, _ time.Duration) ([]workv1alpha2.TargetCluster, error) { - return []workv1alpha2.TargetCluster{ - {Name: "cluster1", Replicas: 1}, - {Name: "cluster2", Replicas: 1}, - }, nil + result := make([]workv1alpha2.TargetCluster, len(clusters)) + for i, cluster := range clusters { + replicas := int32(1) + if cluster == m.unauthenticCluster { + replicas = client.UnauthenticReplica + } + result[i] = workv1alpha2.TargetCluster{Name: cluster, Replicas: replicas} + } + return result, nil } diff --git a/pkg/descheduler/descheduler.go b/pkg/descheduler/descheduler.go index f37d79db5758..41fbafcf8678 100644 --- a/pkg/descheduler/descheduler.go +++ b/pkg/descheduler/descheduler.go @@ -64,10 +64,11 @@ type Descheduler struct { eventRecorder record.EventRecorder - schedulerEstimatorCache *estimatorclient.SchedulerEstimatorCache - schedulerEstimatorServicePrefix string - schedulerEstimatorClientConfig *grpcconnection.ClientConfig - schedulerEstimatorWorker util.AsyncWorker + schedulerEstimatorCache *estimatorclient.SchedulerEstimatorCache + schedulerEstimatorServiceNamespace string + schedulerEstimatorServicePrefix string + schedulerEstimatorClientConfig *grpcconnection.ClientConfig + schedulerEstimatorWorker util.AsyncWorker unschedulableThreshold time.Duration deschedulingInterval time.Duration @@ -93,9 +94,10 @@ func NewDescheduler(karmadaClient karmadaclientset.Interface, kubeClient kuberne KeyFile: opts.SchedulerEstimatorKeyFile, TargetPort: opts.SchedulerEstimatorPort, }, - schedulerEstimatorServicePrefix: opts.SchedulerEstimatorServicePrefix, - unschedulableThreshold: opts.UnschedulableThreshold.Duration, - deschedulingInterval: opts.DeschedulingInterval.Duration, + schedulerEstimatorServiceNamespace: opts.SchedulerEstimatorServiceNamespace, + schedulerEstimatorServicePrefix: opts.SchedulerEstimatorServicePrefix, + unschedulableThreshold: opts.UnschedulableThreshold.Duration, + deschedulingInterval: opts.DeschedulingInterval.Duration, } // ignore the error here because the informers haven't been started _ = desched.bindingInformer.SetTransform(fedinformer.StripUnusedFields) @@ -186,6 +188,10 @@ func (d *Descheduler) worker(key util.QueueKey) error { } return fmt.Errorf("get ResourceBinding(%s) error: %v", namespacedName, err) } + if !binding.DeletionTimestamp.IsZero() { + klog.Infof("ResourceBinding(%s) in work queue is being deleted, ignore.", namespacedName) + return nil + } h := core.NewSchedulingResultHelper(binding) if _, undesiredClusters := h.GetUndesiredClusters(); len(undesiredClusters) == 0 { @@ -280,7 +286,12 @@ func (d *Descheduler) establishEstimatorConnections() { return } for i := range clusterList.Items { - if err = estimatorclient.EstablishConnection(d.KubeClient, clusterList.Items[i].Name, d.schedulerEstimatorCache, d.schedulerEstimatorServicePrefix, d.schedulerEstimatorClientConfig); err != nil { + serviceInfo := estimatorclient.SchedulerEstimatorServiceInfo{ + Name: clusterList.Items[i].Name, + Namespace: d.schedulerEstimatorServiceNamespace, + NamePrefix: d.schedulerEstimatorServicePrefix, + } + if err = estimatorclient.EstablishConnection(d.KubeClient, serviceInfo, d.schedulerEstimatorCache, d.schedulerEstimatorClientConfig); err != nil { klog.Error(err) } } @@ -300,7 +311,12 @@ func (d *Descheduler) reconcileEstimatorConnection(key util.QueueKey) error { } return err } - return estimatorclient.EstablishConnection(d.KubeClient, name, d.schedulerEstimatorCache, d.schedulerEstimatorServicePrefix, d.schedulerEstimatorClientConfig) + serviceInfo := estimatorclient.SchedulerEstimatorServiceInfo{ + Name: name, + Namespace: d.schedulerEstimatorServiceNamespace, + NamePrefix: d.schedulerEstimatorServicePrefix, + } + return estimatorclient.EstablishConnection(d.KubeClient, serviceInfo, d.schedulerEstimatorCache, d.schedulerEstimatorClientConfig) } func (d *Descheduler) recordDescheduleResultEventForResourceBinding(rb *workv1alpha2.ResourceBinding, message string, err error) { diff --git a/pkg/descheduler/descheduler_test.go b/pkg/descheduler/descheduler_test.go index 9f77fda6279d..7ecbe8dfa461 100644 --- a/pkg/descheduler/descheduler_test.go +++ b/pkg/descheduler/descheduler_test.go @@ -18,6 +18,8 @@ package descheduler import ( "context" + "errors" + "fmt" "reflect" "testing" "time" @@ -25,8 +27,12 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "google.golang.org/grpc" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" @@ -39,10 +45,195 @@ import ( estimatorservice "github.com/karmada-io/karmada/pkg/estimator/service" fakekarmadaclient "github.com/karmada-io/karmada/pkg/generated/clientset/versioned/fake" informerfactory "github.com/karmada-io/karmada/pkg/generated/informers/externalversions" + worklister "github.com/karmada-io/karmada/pkg/generated/listers/work/v1alpha2" "github.com/karmada-io/karmada/pkg/util" "github.com/karmada-io/karmada/pkg/util/helper" ) +func TestRecordDescheduleResultEventForResourceBinding(t *testing.T) { + tests := []struct { + name string + rb *workv1alpha2.ResourceBinding + message string + err error + expectedEvents []string + }{ + { + name: "Nil ResourceBinding", + rb: nil, + message: "Test message", + err: nil, + expectedEvents: []string{}, + }, + { + name: "Successful descheduling", + rb: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + Namespace: "test-namespace", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "test-deployment", + Namespace: "test-namespace", + UID: types.UID("test-uid"), + }, + }, + }, + message: "Descheduling succeeded", + err: nil, + expectedEvents: []string{ + "Normal DescheduleBindingSucceed Descheduling succeeded", + "Normal DescheduleBindingSucceed Descheduling succeeded", + }, + }, + { + name: "Failed descheduling", + rb: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + Namespace: "test-namespace", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "test-deployment", + Namespace: "test-namespace", + UID: types.UID("test-uid"), + }, + }, + }, + message: "Descheduling failed", + err: errors.New("descheduling error"), + expectedEvents: []string{ + "Warning DescheduleBindingFailed descheduling error", + "Warning DescheduleBindingFailed descheduling error", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fakeRecorder := record.NewFakeRecorder(10) + d := &Descheduler{ + eventRecorder: fakeRecorder, + } + + d.recordDescheduleResultEventForResourceBinding(tt.rb, tt.message, tt.err) + + close(fakeRecorder.Events) + actualEvents := []string{} + for event := range fakeRecorder.Events { + actualEvents = append(actualEvents, event) + } + + assert.Equal(t, tt.expectedEvents, actualEvents, "Recorded events do not match expected events") + }) + } +} + +func TestUpdateCluster(t *testing.T) { + tests := []struct { + name string + newObj interface{} + expectedAdd bool + }{ + { + name: "Valid cluster update", + newObj: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + }, + }, + expectedAdd: true, + }, + { + name: "Invalid object type", + newObj: &corev1.Pod{}, + expectedAdd: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mockWorker := &mockAsyncWorker{} + d := &Descheduler{ + schedulerEstimatorWorker: mockWorker, + } + + if tt.expectedAdd { + mockWorker.On("Add", mock.AnythingOfType("string")).Return() + } + + d.updateCluster(nil, tt.newObj) + + if tt.expectedAdd { + mockWorker.AssertCalled(t, "Add", "test-cluster") + } else { + mockWorker.AssertNotCalled(t, "Add", mock.Anything) + } + }) + } +} + +func TestDeleteCluster(t *testing.T) { + tests := []struct { + name string + obj interface{} + expectedAdd bool + }{ + { + name: "Delete Cluster object", + obj: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + }, + }, + expectedAdd: true, + }, + { + name: "Delete DeletedFinalStateUnknown object", + obj: cache.DeletedFinalStateUnknown{ + Obj: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + }, + }, + }, + expectedAdd: true, + }, + { + name: "Invalid object type", + obj: &corev1.Pod{}, + expectedAdd: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mockWorker := &mockAsyncWorker{} + d := &Descheduler{ + schedulerEstimatorWorker: mockWorker, + } + + if tt.expectedAdd { + mockWorker.On("Add", mock.AnythingOfType("string")).Return() + } + + d.deleteCluster(tt.obj) + + if tt.expectedAdd { + mockWorker.AssertCalled(t, "Add", "test-cluster") + } else { + mockWorker.AssertNotCalled(t, "Add", mock.Anything) + } + }) + } +} + func buildBinding(name, ns string, target, status []workv1alpha2.TargetCluster) (*workv1alpha2.ResourceBinding, error) { bindingStatus := workv1alpha2.ResourceBindingStatus{} for _, cluster := range status { @@ -630,3 +821,132 @@ func TestDescheduler_worker(t *testing.T) { }) } } + +func TestDescheduler_workerErrors(t *testing.T) { + tests := []struct { + name string + key interface{} + setupMocks func(*Descheduler) + expectedError string + }{ + { + name: "Invalid key type", + key: 123, + setupMocks: func(_ *Descheduler) {}, + expectedError: "failed to deschedule as invalid key: 123", + }, + { + name: "Invalid resource key format", + key: "invalid/key/format", + setupMocks: func(_ *Descheduler) {}, + expectedError: "invalid resource key: invalid/key/format", + }, + { + name: "ResourceBinding not found", + key: "default/non-existent-binding", + setupMocks: func(d *Descheduler) { + d.bindingLister = &mockBindingLister{ + getErr: apierrors.NewNotFound(schema.GroupResource{Resource: "resourcebindings"}, "non-existent-binding"), + } + }, + expectedError: "", + }, + { + name: "Error getting ResourceBinding", + key: "default/error-binding", + setupMocks: func(d *Descheduler) { + d.bindingLister = &mockBindingLister{ + getErr: fmt.Errorf("internal error"), + } + }, + expectedError: "get ResourceBinding(default/error-binding) error: internal error", + }, + { + name: "ResourceBinding being deleted", + key: "default/deleted-binding", + setupMocks: func(d *Descheduler) { + d.bindingLister = &mockBindingLister{ + binding: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "deleted-binding", + Namespace: "default", + DeletionTimestamp: &metav1.Time{Time: time.Now()}, + }, + }, + } + }, + expectedError: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + d := &Descheduler{} + tt.setupMocks(d) + + err := d.worker(tt.key) + + if tt.expectedError == "" { + assert.NoError(t, err) + } else { + assert.EqualError(t, err, tt.expectedError) + } + }) + } +} + +// Mock Implementations + +type mockAsyncWorker struct { + mock.Mock +} + +func (m *mockAsyncWorker) Add(item interface{}) { + m.Called(item) +} + +func (m *mockAsyncWorker) AddAfter(item interface{}, duration time.Duration) { + m.Called(item, duration) +} + +func (m *mockAsyncWorker) Run(_ int, _ <-chan struct{}) {} + +func (m *mockAsyncWorker) Enqueue(obj interface{}) { + m.Called(obj) +} + +func (m *mockAsyncWorker) EnqueueAfter(obj interface{}, duration time.Duration) { + m.Called(obj, duration) +} + +type mockBindingLister struct { + binding *workv1alpha2.ResourceBinding + getErr error +} + +func (m *mockBindingLister) List(_ labels.Selector) (ret []*workv1alpha2.ResourceBinding, err error) { + return nil, nil +} + +func (m *mockBindingLister) ResourceBindings(_ string) worklister.ResourceBindingNamespaceLister { + return &mockBindingNamespaceLister{ + binding: m.binding, + getErr: m.getErr, + } +} + +type mockBindingNamespaceLister struct { + binding *workv1alpha2.ResourceBinding + getErr error +} + +func (m *mockBindingNamespaceLister) List(_ labels.Selector) (ret []*workv1alpha2.ResourceBinding, err error) { + return nil, nil +} + +func (m *mockBindingNamespaceLister) Get(_ string) (*workv1alpha2.ResourceBinding, error) { + if m.getErr != nil { + return nil, m.getErr + } + return m.binding, nil +} diff --git a/pkg/detector/claim.go b/pkg/detector/claim.go new file mode 100644 index 000000000000..8e9efa1ffbc7 --- /dev/null +++ b/pkg/detector/claim.go @@ -0,0 +1,91 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package detector + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" + "github.com/karmada-io/karmada/pkg/util" +) + +var ( + propagationPolicyClaimLabels = []string{ + policyv1alpha1.PropagationPolicyPermanentIDLabel, + } + propagationPolicyClaimAnnotations = []string{ + policyv1alpha1.PropagationPolicyNamespaceAnnotation, + policyv1alpha1.PropagationPolicyNameAnnotation, + } + clusterPropagationPolicyClaimLabels = []string{ + policyv1alpha1.ClusterPropagationPolicyPermanentIDLabel, + } + clusterPropagationPolicyClaimAnnotations = []string{ + policyv1alpha1.ClusterPropagationPolicyAnnotation, + } +) + +// AddPPClaimMetadata adds PropagationPolicy claim metadata, such as labels and annotations +func AddPPClaimMetadata(obj metav1.Object, policyID string, policyMeta metav1.ObjectMeta) { + util.MergeLabel(obj, policyv1alpha1.PropagationPolicyPermanentIDLabel, policyID) + + objectAnnotations := obj.GetAnnotations() + if objectAnnotations == nil { + objectAnnotations = make(map[string]string) + } + objectAnnotations[policyv1alpha1.PropagationPolicyNamespaceAnnotation] = policyMeta.GetNamespace() + objectAnnotations[policyv1alpha1.PropagationPolicyNameAnnotation] = policyMeta.GetName() + obj.SetAnnotations(objectAnnotations) +} + +// AddCPPClaimMetadata adds ClusterPropagationPolicy claim metadata, such as labels and annotations +func AddCPPClaimMetadata(obj metav1.Object, policyID string, policyMeta metav1.ObjectMeta) { + util.MergeLabel(obj, policyv1alpha1.ClusterPropagationPolicyPermanentIDLabel, policyID) + + objectAnnotations := obj.GetAnnotations() + if objectAnnotations == nil { + objectAnnotations = make(map[string]string) + } + objectAnnotations[policyv1alpha1.ClusterPropagationPolicyAnnotation] = policyMeta.GetName() + obj.SetAnnotations(objectAnnotations) +} + +// CleanupPPClaimMetadata removes PropagationPolicy claim metadata, such as labels and annotations +func CleanupPPClaimMetadata(obj metav1.Object) { + util.RemoveLabels(obj, propagationPolicyClaimLabels...) + util.RemoveAnnotations(obj, propagationPolicyClaimAnnotations...) +} + +// CleanupCPPClaimMetadata removes ClusterPropagationPolicy claim metadata, such as labels and annotations +func CleanupCPPClaimMetadata(obj metav1.Object) { + util.RemoveLabels(obj, clusterPropagationPolicyClaimLabels...) + util.RemoveAnnotations(obj, clusterPropagationPolicyClaimAnnotations...) +} + +// NeedCleanupClaimMetadata determines whether the object's claim metadata needs to be cleaned up. +// We need to ensure that the claim metadata being deleted belong to the current PropagationPolicy/ClusterPropagationPolicy, +// otherwise, there is a risk of mistakenly deleting the ones belonging to another PropagationPolicy/ClusterPropagationPolicy. +// This situation could occur during the rapid deletion and creation of PropagationPolicy(s)/ClusterPropagationPolicy(s). +// More info can refer to https://github.com/karmada-io/karmada/issues/5307. +func NeedCleanupClaimMetadata(obj metav1.Object, targetClaimMetadata map[string]string) bool { + for k, v := range targetClaimMetadata { + if obj.GetLabels()[k] != v { + return false + } + } + return true +} diff --git a/pkg/detector/claim_test.go b/pkg/detector/claim_test.go new file mode 100644 index 000000000000..5c2f906e4e7d --- /dev/null +++ b/pkg/detector/claim_test.go @@ -0,0 +1,210 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package detector + +import ( + "testing" + + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + + policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" +) + +func TestAddPPClaimMetadata(t *testing.T) { + tests := []struct { + name string + policyID string + policyMeta metav1.ObjectMeta + obj metav1.Object + result metav1.Object + }{ + { + name: "add policy claim metadata", + policyID: "f2507cgb-f3f3-4a4b-b289-5691a4fef979", + policyMeta: metav1.ObjectMeta{Name: "pp-example", Namespace: "test"}, + obj: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "labels": map[string]interface{}{}, + }, + }, + }, + result: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "labels": map[string]interface{}{policyv1alpha1.PropagationPolicyPermanentIDLabel: "f2507cgb-f3f3-4a4b-b289-5691a4fef979"}, + "annotations": map[string]interface{}{policyv1alpha1.PropagationPolicyNamespaceAnnotation: "test", policyv1alpha1.PropagationPolicyNameAnnotation: "pp-example"}, + }, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + AddPPClaimMetadata(tt.obj, tt.policyID, tt.policyMeta) + assert.Equal(t, tt.obj, tt.result) + }) + } +} + +func TestAddCPPClaimMetadata(t *testing.T) { + tests := []struct { + name string + policyID string + policyMeta metav1.ObjectMeta + obj metav1.Object + result metav1.Object + }{ + { + name: "add cluster policy claim metadata", + policyID: "f2507cgb-f3f3-4a4b-b289-5691a4fef979", + policyMeta: metav1.ObjectMeta{Name: "cpp-example"}, + obj: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "labels": map[string]interface{}{}, + }, + }, + }, + result: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "labels": map[string]interface{}{policyv1alpha1.ClusterPropagationPolicyPermanentIDLabel: "f2507cgb-f3f3-4a4b-b289-5691a4fef979"}, + "annotations": map[string]interface{}{policyv1alpha1.ClusterPropagationPolicyAnnotation: "cpp-example"}, + }, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + AddCPPClaimMetadata(tt.obj, tt.policyID, tt.policyMeta) + assert.Equal(t, tt.obj, tt.result) + }) + } +} + +func TestCleanupPPClaimMetadata(t *testing.T) { + tests := []struct { + name string + obj metav1.Object + result metav1.Object + }{ + { + name: "clean up policy claim metadata", + obj: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "labels": map[string]interface{}{policyv1alpha1.PropagationPolicyPermanentIDLabel: "f2507cgb-f3f3-4a4b-b289-5691a4fef979"}, + "annotations": map[string]interface{}{policyv1alpha1.PropagationPolicyNamespaceAnnotation: "default", policyv1alpha1.PropagationPolicyNameAnnotation: "pp-example"}, + }, + }, + }, + result: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "labels": map[string]interface{}{}, + "annotations": map[string]interface{}{}, + }, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + CleanupPPClaimMetadata(tt.obj) + assert.Equal(t, tt.obj, tt.result) + }) + } +} + +func TestCleanupCPPClaimMetadata(t *testing.T) { + tests := []struct { + name string + obj metav1.Object + result metav1.Object + }{ + { + name: "clean up policy claim metadata", + obj: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "labels": map[string]interface{}{policyv1alpha1.ClusterPropagationPolicyPermanentIDLabel: "f2507cgb-f3f3-4a4b-b289-5691a4fef979"}, + "annotations": map[string]interface{}{policyv1alpha1.ClusterPropagationPolicyAnnotation: "cpp-example"}, + }, + }, + }, + result: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "labels": map[string]interface{}{}, + "annotations": map[string]interface{}{}, + }, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + CleanupCPPClaimMetadata(tt.obj) + assert.Equal(t, tt.obj, tt.result) + }) + } +} + +func TestNeedCleanupClaimMetadata(t *testing.T) { + tests := []struct { + name string + obj metav1.Object + targetClaimMetadata map[string]string + needCleanup bool + }{ + { + name: "need cleanup", + obj: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "labels": map[string]interface{}{policyv1alpha1.ClusterPropagationPolicyPermanentIDLabel: "f2507cgb-f3f3-4a4b-b289-5691a4fef979"}, + "annotations": map[string]interface{}{policyv1alpha1.ClusterPropagationPolicyAnnotation: "cpp-example"}, + }, + }, + }, + targetClaimMetadata: map[string]string{policyv1alpha1.ClusterPropagationPolicyPermanentIDLabel: "f2507cgb-f3f3-4a4b-b289-5691a4fef979"}, + needCleanup: true, + }, + { + name: "no need cleanup", + obj: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "labels": map[string]interface{}{policyv1alpha1.PropagationPolicyPermanentIDLabel: "b0907cgb-f3f3-4a4b-b289-5691a4fef979"}, + "annotations": map[string]interface{}{policyv1alpha1.PropagationPolicyNamespaceAnnotation: "default", policyv1alpha1.PropagationPolicyNameAnnotation: "pp-example"}, + }, + }, + }, + targetClaimMetadata: map[string]string{policyv1alpha1.PropagationPolicyPermanentIDLabel: "f2507cgb-f3f3-4a4b-b289-5691a4fef979"}, + needCleanup: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equal(t, tt.needCleanup, NeedCleanupClaimMetadata(tt.obj, tt.targetClaimMetadata)) + }) + } +} diff --git a/pkg/detector/detector.go b/pkg/detector/detector.go index eaf682901bfa..aacac44ae78e 100644 --- a/pkg/detector/detector.go +++ b/pkg/detector/detector.go @@ -62,22 +62,6 @@ import ( "github.com/karmada-io/karmada/pkg/util/restmapper" ) -var ( - propagationPolicyMarkedLabels = []string{ - policyv1alpha1.PropagationPolicyPermanentIDLabel, - } - propagationPolicyMarkedAnnotations = []string{ - policyv1alpha1.PropagationPolicyNamespaceAnnotation, - policyv1alpha1.PropagationPolicyNameAnnotation, - } - clusterPropagationPolicyMarkedLabels = []string{ - policyv1alpha1.ClusterPropagationPolicyPermanentIDLabel, - } - clusterPropagationPolicyMarkedAnnotations = []string{ - policyv1alpha1.ClusterPropagationPolicyAnnotation, - } -) - // ResourceDetector is a resource watcher which watches all resources and reconcile the events. type ResourceDetector struct { // DiscoveryClientSet is used to resource discovery. @@ -464,15 +448,7 @@ func (d *ResourceDetector) ApplyPolicy(object *unstructured.Unstructured, object return nil } - policyLabels := map[string]string{ - policyv1alpha1.PropagationPolicyPermanentIDLabel: policyID, - } - policyAnnotations := map[string]string{ - policyv1alpha1.PropagationPolicyNamespaceAnnotation: policy.GetNamespace(), - policyv1alpha1.PropagationPolicyNameAnnotation: policy.GetName(), - } - - binding, err := d.BuildResourceBinding(object, policyLabels, policyAnnotations, &policy.Spec) + binding, err := d.BuildResourceBinding(object, &policy.Spec, policyID, policy.ObjectMeta, AddPPClaimMetadata) if err != nil { klog.Errorf("Failed to build resourceBinding for object: %s. error: %v", objectKey, err) return err @@ -501,7 +477,8 @@ func (d *ResourceDetector) ApplyPolicy(object *unstructured.Unstructured, object bindingCopy.Spec.Failover = binding.Spec.Failover bindingCopy.Spec.ConflictResolution = binding.Spec.ConflictResolution bindingCopy.Spec.Suspension = binding.Spec.Suspension - excludeClusterPolicy(bindingCopy.Labels) + bindingCopy.Spec.PreserveResourcesOnDeletion = binding.Spec.PreserveResourcesOnDeletion + excludeClusterPolicy(bindingCopy) return nil }) if err != nil { @@ -556,18 +533,11 @@ func (d *ResourceDetector) ApplyClusterPolicy(object *unstructured.Unstructured, return nil } - policyLabels := map[string]string{ - policyv1alpha1.ClusterPropagationPolicyPermanentIDLabel: policyID, - } - policyAnnotations := map[string]string{ - policyv1alpha1.ClusterPropagationPolicyAnnotation: policy.GetName(), - } - // Build `ResourceBinding` or `ClusterResourceBinding` according to the resource template's scope. // For namespace-scoped resources, which namespace is not empty, building `ResourceBinding`. // For cluster-scoped resources, which namespace is empty, building `ClusterResourceBinding`. if object.GetNamespace() != "" { - binding, err := d.BuildResourceBinding(object, policyLabels, policyAnnotations, &policy.Spec) + binding, err := d.BuildResourceBinding(object, &policy.Spec, policyID, policy.ObjectMeta, AddCPPClaimMetadata) if err != nil { klog.Errorf("Failed to build resourceBinding for object: %s. error: %v", objectKey, err) return err @@ -596,6 +566,7 @@ func (d *ResourceDetector) ApplyClusterPolicy(object *unstructured.Unstructured, bindingCopy.Spec.Failover = binding.Spec.Failover bindingCopy.Spec.ConflictResolution = binding.Spec.ConflictResolution bindingCopy.Spec.Suspension = binding.Spec.Suspension + bindingCopy.Spec.PreserveResourcesOnDeletion = binding.Spec.PreserveResourcesOnDeletion return nil }) return err @@ -614,7 +585,7 @@ func (d *ResourceDetector) ApplyClusterPolicy(object *unstructured.Unstructured, klog.V(2).Infof("ResourceBinding(%s) is up to date.", binding.GetName()) } } else { - binding, err := d.BuildClusterResourceBinding(object, policyLabels, policyAnnotations, &policy.Spec) + binding, err := d.BuildClusterResourceBinding(object, &policy.Spec, policyID, policy.ObjectMeta) if err != nil { klog.Errorf("Failed to build clusterResourceBinding for object: %s. error: %v", objectKey, err) return err @@ -642,6 +613,7 @@ func (d *ResourceDetector) ApplyClusterPolicy(object *unstructured.Unstructured, bindingCopy.Spec.Failover = binding.Spec.Failover bindingCopy.Spec.ConflictResolution = binding.Spec.ConflictResolution bindingCopy.Spec.Suspension = binding.Spec.Suspension + bindingCopy.Spec.PreserveResourcesOnDeletion = binding.Spec.PreserveResourcesOnDeletion return nil }) return err @@ -705,28 +677,16 @@ func (d *ResourceDetector) ClaimPolicyForObject(object *unstructured.Unstructure policyID := policy.Labels[policyv1alpha1.PropagationPolicyPermanentIDLabel] objLabels := object.GetLabels() - if objLabels == nil { - objLabels = make(map[string]string) - } else if len(objLabels) > 0 { + if len(objLabels) > 0 { // object has been claimed, don't need to claim again - if !excludeClusterPolicy(objLabels) && + if !excludeClusterPolicy(object) && objLabels[policyv1alpha1.PropagationPolicyPermanentIDLabel] == policyID { return policyID, nil } } - objLabels[policyv1alpha1.PropagationPolicyPermanentIDLabel] = policyID - - objectAnnotations := object.GetAnnotations() - if objectAnnotations == nil { - objectAnnotations = make(map[string]string) - } - objectAnnotations[policyv1alpha1.PropagationPolicyNamespaceAnnotation] = policy.Namespace - objectAnnotations[policyv1alpha1.PropagationPolicyNameAnnotation] = policy.Name - objectCopy := object.DeepCopy() - objectCopy.SetLabels(objLabels) - objectCopy.SetAnnotations(objectAnnotations) + AddPPClaimMetadata(objectCopy, policyID, policy.ObjectMeta) return policyID, d.Client.Update(context.TODO(), objectCopy) } @@ -741,15 +701,13 @@ func (d *ResourceDetector) ClaimClusterPolicyForObject(object *unstructured.Unst } objectCopy := object.DeepCopy() - util.MergeLabel(objectCopy, policyv1alpha1.ClusterPropagationPolicyPermanentIDLabel, policyID) + AddCPPClaimMetadata(objectCopy, policyID, policy.ObjectMeta) - util.MergeAnnotation(objectCopy, policyv1alpha1.ClusterPropagationPolicyAnnotation, policy.Name) return policyID, d.Client.Update(context.TODO(), objectCopy) } // BuildResourceBinding builds a desired ResourceBinding for object. -func (d *ResourceDetector) BuildResourceBinding(object *unstructured.Unstructured, - labels, annotations map[string]string, policySpec *policyv1alpha1.PropagationSpec) (*workv1alpha2.ResourceBinding, error) { +func (d *ResourceDetector) BuildResourceBinding(object *unstructured.Unstructured, policySpec *policyv1alpha1.PropagationSpec, policyID string, policyMeta metav1.ObjectMeta, claimFunc func(object metav1.Object, policyId string, objectMeta metav1.ObjectMeta)) (*workv1alpha2.ResourceBinding, error) { bindingName := names.GenerateBindingName(object.GetKind(), object.GetName()) propagationBinding := &workv1alpha2.ResourceBinding{ ObjectMeta: metav1.ObjectMeta{ @@ -758,17 +716,16 @@ func (d *ResourceDetector) BuildResourceBinding(object *unstructured.Unstructure OwnerReferences: []metav1.OwnerReference{ *metav1.NewControllerRef(object, object.GroupVersionKind()), }, - Annotations: annotations, - Labels: labels, - Finalizers: []string{util.BindingControllerFinalizer}, + Finalizers: []string{util.BindingControllerFinalizer}, }, Spec: workv1alpha2.ResourceBindingSpec{ - PropagateDeps: policySpec.PropagateDeps, - SchedulerName: policySpec.SchedulerName, - Placement: &policySpec.Placement, - Failover: policySpec.Failover, - ConflictResolution: policySpec.ConflictResolution, - Suspension: policySpec.Suspension, + PropagateDeps: policySpec.PropagateDeps, + SchedulerName: policySpec.SchedulerName, + Placement: &policySpec.Placement, + Failover: policySpec.Failover, + ConflictResolution: policySpec.ConflictResolution, + Suspension: policySpec.Suspension, + PreserveResourcesOnDeletion: policySpec.PreserveResourcesOnDeletion, Resource: workv1alpha2.ObjectReference{ APIVersion: object.GetAPIVersion(), Kind: object.GetKind(), @@ -779,6 +736,7 @@ func (d *ResourceDetector) BuildResourceBinding(object *unstructured.Unstructure }, }, } + claimFunc(propagationBinding, policyID, policyMeta) if d.ResourceInterpreter.HookEnabled(object.GroupVersionKind(), configv1alpha1.InterpreterOperationInterpretReplica) { replicas, replicaRequirements, err := d.ResourceInterpreter.GetReplicas(object) @@ -795,7 +753,7 @@ func (d *ResourceDetector) BuildResourceBinding(object *unstructured.Unstructure // BuildClusterResourceBinding builds a desired ClusterResourceBinding for object. func (d *ResourceDetector) BuildClusterResourceBinding(object *unstructured.Unstructured, - labels, annotations map[string]string, policySpec *policyv1alpha1.PropagationSpec) (*workv1alpha2.ClusterResourceBinding, error) { + policySpec *policyv1alpha1.PropagationSpec, policyID string, policyMeta metav1.ObjectMeta) (*workv1alpha2.ClusterResourceBinding, error) { bindingName := names.GenerateBindingName(object.GetKind(), object.GetName()) binding := &workv1alpha2.ClusterResourceBinding{ ObjectMeta: metav1.ObjectMeta{ @@ -803,17 +761,16 @@ func (d *ResourceDetector) BuildClusterResourceBinding(object *unstructured.Unst OwnerReferences: []metav1.OwnerReference{ *metav1.NewControllerRef(object, object.GroupVersionKind()), }, - Annotations: annotations, - Labels: labels, - Finalizers: []string{util.ClusterResourceBindingControllerFinalizer}, + Finalizers: []string{util.ClusterResourceBindingControllerFinalizer}, }, Spec: workv1alpha2.ResourceBindingSpec{ - PropagateDeps: policySpec.PropagateDeps, - SchedulerName: policySpec.SchedulerName, - Placement: &policySpec.Placement, - Failover: policySpec.Failover, - ConflictResolution: policySpec.ConflictResolution, - Suspension: policySpec.Suspension, + PropagateDeps: policySpec.PropagateDeps, + SchedulerName: policySpec.SchedulerName, + Placement: &policySpec.Placement, + Failover: policySpec.Failover, + ConflictResolution: policySpec.ConflictResolution, + Suspension: policySpec.Suspension, + PreserveResourcesOnDeletion: policySpec.PreserveResourcesOnDeletion, Resource: workv1alpha2.ObjectReference{ APIVersion: object.GetAPIVersion(), Kind: object.GetKind(), @@ -824,6 +781,8 @@ func (d *ResourceDetector) BuildClusterResourceBinding(object *unstructured.Unst }, } + AddCPPClaimMetadata(binding, policyID, policyMeta) + if d.ResourceInterpreter.HookEnabled(object.GroupVersionKind(), configv1alpha1.InterpreterOperationInterpretReplica) { replicas, replicaRequirements, err := d.ResourceInterpreter.GetReplicas(object) if err != nil { @@ -1092,37 +1051,34 @@ func (d *ResourceDetector) ReconcileClusterPropagationPolicy(key util.QueueKey) } // HandlePropagationPolicyDeletion handles PropagationPolicy delete event. -// After a policy is removed, the label and annotations marked on relevant resource template will be removed (which gives +// After a policy is removed, the label and annotations claimed on relevant resource template will be removed (which gives // the resource template a change to match another policy). // // Note: The relevant ResourceBinding will continue to exist until the resource template is gone. func (d *ResourceDetector) HandlePropagationPolicyDeletion(policyID string) error { - rbs, err := helper.GetResourceBindings(d.Client, labels.Set{policyv1alpha1.PropagationPolicyPermanentIDLabel: policyID}) + claimMetadata := labels.Set{policyv1alpha1.PropagationPolicyPermanentIDLabel: policyID} + rbs, err := helper.GetResourceBindings(d.Client, claimMetadata) if err != nil { klog.Errorf("Failed to list propagation bindings with policy permanentID(%s): %v", policyID, err) return err } - cleanupMarksFunc := func(obj metav1.Object) { - util.RemoveLabels(obj, propagationPolicyMarkedLabels...) - util.RemoveAnnotations(obj, propagationPolicyMarkedAnnotations...) - } var errs []error for index, binding := range rbs.Items { - // Must remove the marks, such as labels and annotations, from the resource template ahead of ResourceBinding, - // otherwise might lose the chance to do that in a retry loop (in particular, the marks was successfully removed + // Must remove the claim metadata, such as labels and annotations, from the resource template ahead of ResourceBinding, + // otherwise might lose the chance to do that in a retry loop (in particular, the claim metadata was successfully removed // from ResourceBinding, but resource template not), since the ResourceBinding will not be listed again. - if err := d.CleanupResourceTemplateMarks(binding.Spec.Resource, cleanupMarksFunc); err != nil { - klog.Errorf("Failed to clean up marks from resource(%s-%s/%s) when propagationPolicy removed, error: %v", + if err := d.CleanupResourceTemplateClaimMetadata(binding.Spec.Resource, claimMetadata, CleanupPPClaimMetadata); err != nil { + klog.Errorf("Failed to clean up claim metadata from resource(%s-%s/%s) when propagationPolicy removed, error: %v", binding.Spec.Resource.Kind, binding.Spec.Resource.Namespace, binding.Spec.Resource.Name, err) errs = append(errs, err) // Skip cleaning up policy labels and annotations from ResourceBinding, give a chance to do that in a retry loop. continue } - // Clean up the marks from the reference binding so that the karmada scheduler won't reschedule the binding. - if err := d.CleanupResourceBindingMarks(&rbs.Items[index], cleanupMarksFunc); err != nil { - klog.Errorf("Failed to clean up marks from resource binding(%s/%s) when propagationPolicy removed, error: %v", + // Clean up the claim metadata from the reference binding so that the karmada scheduler won't reschedule the binding. + if err := d.CleanupResourceBindingClaimMetadata(&rbs.Items[index], claimMetadata, CleanupPPClaimMetadata); err != nil { + klog.Errorf("Failed to clean up claim metadata from resource binding(%s/%s) when propagationPolicy removed, error: %v", binding.Namespace, binding.Name, err) errs = append(errs, err) } @@ -1131,7 +1087,7 @@ func (d *ResourceDetector) HandlePropagationPolicyDeletion(policyID string) erro } // HandleClusterPropagationPolicyDeletion handles ClusterPropagationPolicy delete event. -// After a policy is removed, the label and annotation marked on relevant resource template will be removed (which gives +// After a policy is removed, the label and annotation claimed on relevant resource template will be removed (which gives // the resource template a change to match another policy). // // Note: The relevant ClusterResourceBinding or ResourceBinding will continue to exist until the resource template is gone. @@ -1141,11 +1097,6 @@ func (d *ResourceDetector) HandleClusterPropagationPolicyDeletion(policyID strin policyv1alpha1.ClusterPropagationPolicyPermanentIDLabel: policyID, } - cleanupMarksFun := func(obj metav1.Object) { - util.RemoveLabels(obj, clusterPropagationPolicyMarkedLabels...) - util.RemoveAnnotations(obj, clusterPropagationPolicyMarkedAnnotations...) - } - // load the ClusterResourceBindings which labeled with current policy crbs, err := helper.GetClusterResourceBindings(d.Client, labelSet) if err != nil { @@ -1153,20 +1104,20 @@ func (d *ResourceDetector) HandleClusterPropagationPolicyDeletion(policyID strin errs = append(errs, err) } else if len(crbs.Items) > 0 { for index, binding := range crbs.Items { - // Must remove the marks, such as labels and annotations, from the resource template ahead of + // Must remove the claim metadata, such as labels and annotations, from the resource template ahead of // ClusterResourceBinding, otherwise might lose the chance to do that in a retry loop (in particular, the - // marks was successfully removed from ClusterResourceBinding, but resource template not), since the + // claim metadata was successfully removed from ClusterResourceBinding, but resource template not), since the // ClusterResourceBinding will not be listed again. - if err := d.CleanupResourceTemplateMarks(binding.Spec.Resource, cleanupMarksFun); err != nil { - klog.Errorf("Failed to clean up marks from resource(%s-%s) when clusterPropagationPolicy removed, error: %v", + if err := d.CleanupResourceTemplateClaimMetadata(binding.Spec.Resource, labelSet, CleanupCPPClaimMetadata); err != nil { + klog.Errorf("Failed to clean up claim metadata from resource(%s-%s) when clusterPropagationPolicy removed, error: %v", binding.Spec.Resource.Kind, binding.Spec.Resource.Name, err) // Skip cleaning up policy labels and annotations from ClusterResourceBinding, give a chance to do that in a retry loop. continue } - // Clean up the marks from the reference binding so that the Karmada scheduler won't reschedule the binding. - if err := d.CleanupClusterResourceBindingMarks(&crbs.Items[index], cleanupMarksFun); err != nil { - klog.Errorf("Failed to clean up marks from clusterResourceBinding(%s) when clusterPropagationPolicy removed, error: %v", + // Clean up the claim metadata from the reference binding so that the Karmada scheduler won't reschedule the binding. + if err := d.CleanupClusterResourceBindingClaimMetadata(&crbs.Items[index], labelSet); err != nil { + klog.Errorf("Failed to clean up claim metadata from clusterResourceBinding(%s) when clusterPropagationPolicy removed, error: %v", binding.Name, err) errs = append(errs, err) } @@ -1180,20 +1131,20 @@ func (d *ResourceDetector) HandleClusterPropagationPolicyDeletion(policyID strin errs = append(errs, err) } else if len(rbs.Items) > 0 { for index, binding := range rbs.Items { - // Must remove the marks, such as labels and annotations, from the resource template ahead of ResourceBinding, + // Must remove the claim metadata, such as labels and annotations, from the resource template ahead of ResourceBinding, // otherwise might lose the chance to do that in a retry loop (in particular, the label was successfully // removed from ResourceBinding, but resource template not), since the ResourceBinding will not be listed again. - if err := d.CleanupResourceTemplateMarks(binding.Spec.Resource, cleanupMarksFun); err != nil { - klog.Errorf("Failed to clean up marks from resource(%s-%s/%s) when clusterPropagationPolicy removed, error: %v", + if err := d.CleanupResourceTemplateClaimMetadata(binding.Spec.Resource, labelSet, CleanupCPPClaimMetadata); err != nil { + klog.Errorf("Failed to clean up claim metadata from resource(%s-%s/%s) when clusterPropagationPolicy removed, error: %v", binding.Spec.Resource.Kind, binding.Spec.Resource.Namespace, binding.Spec.Resource.Name, err) errs = append(errs, err) // Skip cleaning up policy labels and annotations from ResourceBinding, give a chance to do that in a retry loop. continue } - // Clean up the marks from the reference binding so that the Karmada scheduler won't reschedule the binding. - if err := d.CleanupResourceBindingMarks(&rbs.Items[index], cleanupMarksFun); err != nil { - klog.Errorf("Failed to clean up marks from resourceBinding(%s/%s) when clusterPropagationPolicy removed, error: %v", + // Clean up the claim metadata from the reference binding so that the Karmada scheduler won't reschedule the binding. + if err := d.CleanupResourceBindingClaimMetadata(&rbs.Items[index], labelSet, CleanupCPPClaimMetadata); err != nil { + klog.Errorf("Failed to clean up claim metadata from resourceBinding(%s/%s) when clusterPropagationPolicy removed, error: %v", binding.Namespace, binding.Name, err) errs = append(errs, err) } @@ -1209,7 +1160,7 @@ func (d *ResourceDetector) HandleClusterPropagationPolicyDeletion(policyID strin // from waiting list and throw the object to it's reconcile queue. If not, do nothing. // Finally, handle the propagation policy preemption process if preemption is enabled. func (d *ResourceDetector) HandlePropagationPolicyCreationOrUpdate(policy *policyv1alpha1.PropagationPolicy) error { - // If the Policy's ResourceSelectors change, causing certain resources to no longer match the Policy, the label marked + // If the Policy's ResourceSelectors change, causing certain resources to no longer match the Policy, the label claimed // on relevant resource template will be removed (which gives the resource template a change to match another policy). policyID := policy.Labels[policyv1alpha1.PropagationPolicyPermanentIDLabel] err := d.cleanPPUnmatchedRBs(policyID, policy.Namespace, policy.Name, policy.Spec.ResourceSelectors) @@ -1251,7 +1202,7 @@ func (d *ResourceDetector) HandlePropagationPolicyCreationOrUpdate(policy *polic } // If preemption is enabled, handle the preemption process. - // If this policy succeeds in preempting resource managed by other policy, the label marked on relevant resource + // If this policy succeeds in preempting resource managed by other policy, the label claimed on relevant resource // will be replaced, which gives the resource template a change to match to this policy. if preemptionEnabled(policy.Spec.Preemption) { return d.handlePropagationPolicyPreemption(policy) @@ -1267,7 +1218,7 @@ func (d *ResourceDetector) HandlePropagationPolicyCreationOrUpdate(policy *polic // from waiting list and throw the object to it's reconcile queue. If not, do nothing. // Finally, handle the cluster propagation policy preemption process if preemption is enabled. func (d *ResourceDetector) HandleClusterPropagationPolicyCreationOrUpdate(policy *policyv1alpha1.ClusterPropagationPolicy) error { - // If the Policy's ResourceSelectors change, causing certain resources to no longer match the Policy, the label marked + // If the Policy's ResourceSelectors change, causing certain resources to no longer match the Policy, the label claimed // on relevant resource template will be removed (which gives the resource template a change to match another policy). policyID := policy.Labels[policyv1alpha1.ClusterPropagationPolicyPermanentIDLabel] err := d.cleanCPPUnmatchedRBs(policyID, policy.Name, policy.Spec.ResourceSelectors) @@ -1324,7 +1275,7 @@ func (d *ResourceDetector) HandleClusterPropagationPolicyCreationOrUpdate(policy } // If preemption is enabled, handle the preemption process. - // If this policy succeeds in preempting resource managed by other policy, the label marked on relevant resource + // If this policy succeeds in preempting resource managed by other policy, the label claimed on relevant resource // will be replaced, which gives the resource template a change to match to this policy. if preemptionEnabled(policy.Spec.Preemption) { return d.handleClusterPropagationPolicyPreemption(policy) @@ -1333,8 +1284,8 @@ func (d *ResourceDetector) HandleClusterPropagationPolicyCreationOrUpdate(policy return nil } -// CleanupResourceTemplateMarks removes marks, such as labels and annotations, from object referencing by objRef. -func (d *ResourceDetector) CleanupResourceTemplateMarks(objRef workv1alpha2.ObjectReference, cleanupFunc func(obj metav1.Object)) error { +// CleanupResourceTemplateClaimMetadata removes claim metadata, such as labels and annotations, from object referencing by objRef. +func (d *ResourceDetector) CleanupResourceTemplateClaimMetadata(objRef workv1alpha2.ObjectReference, targetClaimMetadata map[string]string, cleanupFunc func(obj metav1.Object)) error { gvr, err := restmapper.GetGroupVersionResource(d.RESTMapper, schema.FromAPIVersionAndKind(objRef.APIVersion, objRef.Kind)) if err != nil { klog.Errorf("Failed to convert GVR from GVK(%s/%s), err: %v", objRef.APIVersion, objRef.Kind, err) @@ -1352,6 +1303,11 @@ func (d *ResourceDetector) CleanupResourceTemplateMarks(objRef workv1alpha2.Obje return err } + if !NeedCleanupClaimMetadata(workload, targetClaimMetadata) { + klog.Infof("No need to clean up the claim metadata on resource(kind=%s, %s/%s) since they have changed", workload.GetKind(), workload.GetNamespace(), workload.GetName()) + return nil + } + cleanupFunc(workload) _, err = d.DynamicClient.Resource(gvr).Namespace(workload.GetNamespace()).Update(context.TODO(), workload, metav1.UpdateOptions{}) @@ -1364,9 +1320,13 @@ func (d *ResourceDetector) CleanupResourceTemplateMarks(objRef workv1alpha2.Obje }) } -// CleanupResourceBindingMarks removes marks, such as labels and annotations, from resource binding. -func (d *ResourceDetector) CleanupResourceBindingMarks(rb *workv1alpha2.ResourceBinding, cleanupFunc func(obj metav1.Object)) error { +// CleanupResourceBindingClaimMetadata removes claim metadata, such as labels and annotations, from resource binding. +func (d *ResourceDetector) CleanupResourceBindingClaimMetadata(rb *workv1alpha2.ResourceBinding, targetClaimMetadata map[string]string, cleanupFunc func(obj metav1.Object)) error { return retry.RetryOnConflict(retry.DefaultRetry, func() (err error) { + if !NeedCleanupClaimMetadata(rb, targetClaimMetadata) { + klog.Infof("No need to clean up the claim metadata on ResourceBinding(%s/%s) since they have changed", rb.GetNamespace(), rb.GetName()) + return nil + } cleanupFunc(rb) updateErr := d.Client.Update(context.TODO(), rb) if updateErr == nil { @@ -1377,16 +1337,20 @@ func (d *ResourceDetector) CleanupResourceBindingMarks(rb *workv1alpha2.Resource if err = d.Client.Get(context.TODO(), client.ObjectKey{Namespace: rb.GetNamespace(), Name: rb.GetName()}, updated); err == nil { rb = updated.DeepCopy() } else { - klog.Errorf("Failed to get updated resource binding %s/%s: %v", rb.GetNamespace(), rb.GetName(), err) + klog.Errorf("Failed to get updated ResourceBinding(%s/%s): %v", rb.GetNamespace(), rb.GetName(), err) } return updateErr }) } -// CleanupClusterResourceBindingMarks removes marks, such as labels and annotations, from cluster resource binding. -func (d *ResourceDetector) CleanupClusterResourceBindingMarks(crb *workv1alpha2.ClusterResourceBinding, cleanupFunc func(obj metav1.Object)) error { +// CleanupClusterResourceBindingClaimMetadata removes claim metadata, such as labels and annotations, from cluster resource binding. +func (d *ResourceDetector) CleanupClusterResourceBindingClaimMetadata(crb *workv1alpha2.ClusterResourceBinding, targetClaimMetadata map[string]string) error { return retry.RetryOnConflict(retry.DefaultRetry, func() (err error) { - cleanupFunc(crb) + if !NeedCleanupClaimMetadata(crb, targetClaimMetadata) { + klog.Infof("No need to clean up the claim metadata on ClusterResourceBinding(%s) since they have changed", crb.GetName()) + return nil + } + CleanupCPPClaimMetadata(crb) updateErr := d.Client.Update(context.TODO(), crb) if updateErr == nil { return nil @@ -1396,7 +1360,7 @@ func (d *ResourceDetector) CleanupClusterResourceBindingMarks(crb *workv1alpha2. if err = d.Client.Get(context.TODO(), client.ObjectKey{Name: crb.GetName()}, updated); err == nil { crb = updated.DeepCopy() } else { - klog.Errorf("Failed to get updated cluster resource binding %s: %v", crb.GetName(), err) + klog.Errorf("Failed to get updated ClusterResourceBinding(%s):: %v", crb.GetName(), err) } return updateErr }) diff --git a/pkg/detector/detector_test.go b/pkg/detector/detector_test.go index e576aae64429..10f77fa06343 100644 --- a/pkg/detector/detector_test.go +++ b/pkg/detector/detector_test.go @@ -17,10 +17,32 @@ limitations under the License. package detector import ( + "context" + "fmt" "regexp" + "strings" "testing" + "time" + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + dynamicfake "k8s.io/client-go/dynamic/fake" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + configv1alpha1 "github.com/karmada-io/karmada/pkg/apis/config/v1alpha1" + policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" + workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2" + "github.com/karmada-io/karmada/pkg/util" + "github.com/karmada-io/karmada/pkg/util/fedinformer/keys" ) func BenchmarkEventFilterNoSkipNameSpaces(b *testing.B) { @@ -158,3 +180,1098 @@ func BenchmarkEventFilterExtensionApiserverAuthentication(b *testing.B) { }) } } + +func TestGVRDisabled(t *testing.T) { + tests := []struct { + name string + gvr schema.GroupVersionResource + config *util.SkippedResourceConfig + expected bool + }{ + { + name: "GVR not disabled", + gvr: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}, + config: &util.SkippedResourceConfig{}, + expected: false, + }, + { + name: "GVR disabled by group", + gvr: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}, + config: &util.SkippedResourceConfig{Groups: map[string]struct{}{"apps": {}}}, + expected: true, + }, + { + name: "GVR disabled by group version", + gvr: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}, + config: &util.SkippedResourceConfig{GroupVersions: map[schema.GroupVersion]struct{}{{Group: "apps", Version: "v1"}: {}}}, + expected: true, + }, + { + name: "SkippedResourceConfig is nil", + gvr: schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}, + config: nil, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + d := &ResourceDetector{ + SkippedResourceConfig: tt.config, + RESTMapper: &mockRESTMapper{}, + } + result := d.gvrDisabled(tt.gvr) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestNeedLeaderElection(t *testing.T) { + tests := []struct { + name string + want bool + }{ + { + name: "NeedLeaderElection always returns true", + want: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + d := &ResourceDetector{} + got := d.NeedLeaderElection() + assert.Equal(t, tt.want, got, "NeedLeaderElection() = %v, want %v", got, tt.want) + }) + } +} + +func TestEventFilter(t *testing.T) { + tests := []struct { + name string + obj *unstructured.Unstructured + skippedPropagatingNamespaces []*regexp.Regexp + expected bool + }{ + { + name: "object in karmada-system namespace", + obj: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "Pod", + "metadata": map[string]interface{}{ + "namespace": "karmada-system", + "name": "test-obj", + }, + }, + }, + expected: false, + }, + { + name: "object in karmada-cluster namespace", + obj: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "Pod", + "metadata": map[string]interface{}{ + "namespace": "karmada-cluster", + "name": "test-obj", + }, + }, + }, + expected: false, + }, + { + name: "object in karmada-es-* namespace", + obj: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "Pod", + "metadata": map[string]interface{}{ + "namespace": "karmada-es-test", + "name": "test-obj", + }, + }, + }, + expected: false, + }, + { + name: "object in skipped namespace", + obj: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "Pod", + "metadata": map[string]interface{}{ + "namespace": "kube-system", + "name": "test-obj", + }, + }, + }, + skippedPropagatingNamespaces: []*regexp.Regexp{regexp.MustCompile("kube-.*")}, + expected: false, + }, + { + name: "object in non-skipped namespace", + obj: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "Pod", + "metadata": map[string]interface{}{ + "namespace": "default", + "name": "test-obj", + }, + }, + }, + expected: true, + }, + { + name: "extension-apiserver-authentication configmap in kube-system", + obj: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": map[string]interface{}{ + "namespace": "kube-system", + "name": "extension-apiserver-authentication", + }, + }, + }, + expected: false, + }, + { + name: "cluster-scoped resource", + obj: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "Node", + "metadata": map[string]interface{}{ + "name": "test-node", + }, + }, + }, + expected: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + d := &ResourceDetector{ + SkippedPropagatingNamespaces: tt.skippedPropagatingNamespaces, + } + result := d.EventFilter(tt.obj) + assert.Equal(t, tt.expected, result, "For test case: %s", tt.name) + }) + } +} + +func TestOnAdd(t *testing.T) { + tests := []struct { + name string + obj interface{} + expectedEnqueue bool + }{ + { + name: "valid unstructured object", + obj: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": map[string]interface{}{ + "name": "test-deployment", + "namespace": "default", + }, + }, + }, + expectedEnqueue: true, + }, + { + name: "invalid unstructured object", + obj: &unstructured.Unstructured{ + Object: map[string]interface{}{}, + }, + expectedEnqueue: true, // The function doesn't check for validity, so it will still enqueue + }, + { + name: "non-runtime object", + obj: "not a runtime.Object", + expectedEnqueue: false, + }, + { + name: "core v1 object", + obj: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "default", + }, + }, + expectedEnqueue: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mockProcessor := &mockAsyncWorker{} + d := &ResourceDetector{ + Processor: mockProcessor, + } + d.OnAdd(tt.obj) + if tt.expectedEnqueue { + assert.Equal(t, 1, mockProcessor.enqueueCount, "Object should be enqueued") + assert.IsType(t, ResourceItem{}, mockProcessor.lastEnqueued, "Enqueued item should be of type ResourceItem") + enqueued := mockProcessor.lastEnqueued.(ResourceItem) + assert.Equal(t, tt.obj, enqueued.Obj, "Enqueued object should match the input object") + } else { + assert.Equal(t, 0, mockProcessor.enqueueCount, "Object should not be enqueued") + } + }) + } +} + +func TestOnUpdate(t *testing.T) { + tests := []struct { + name string + oldObj interface{} + newObj interface{} + expectedEnqueue bool + expectedChangeByKarmada bool + expectToUnstructuredError bool + }{ + { + name: "valid update with changes", + oldObj: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": map[string]interface{}{ + "name": "test-deployment", + "namespace": "default", + }, + "spec": map[string]interface{}{ + "replicas": int64(1), + }, + }, + }, + newObj: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": map[string]interface{}{ + "name": "test-deployment", + "namespace": "default", + }, + "spec": map[string]interface{}{ + "replicas": int64(2), + }, + }, + }, + expectedEnqueue: true, + expectedChangeByKarmada: false, + }, + { + name: "update without changes", + oldObj: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": map[string]interface{}{ + "name": "test-deployment", + "namespace": "default", + }, + "spec": map[string]interface{}{ + "replicas": int64(1), + }, + }, + }, + newObj: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": map[string]interface{}{ + "name": "test-deployment", + "namespace": "default", + }, + "spec": map[string]interface{}{ + "replicas": int64(1), + }, + }, + }, + expectedEnqueue: false, + }, + { + name: "invalid object", + oldObj: "not a runtime.Object", + newObj: "not a runtime.Object", + expectedEnqueue: false, + }, + { + name: "change by Karmada", + oldObj: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": map[string]interface{}{ + "name": "test-deployment", + "namespace": "default", + }, + }, + }, + newObj: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": map[string]interface{}{ + "name": "test-deployment", + "namespace": "default", + "annotations": map[string]interface{}{ + util.PolicyPlacementAnnotation: "test", + }, + }, + }, + }, + expectedEnqueue: true, + expectedChangeByKarmada: true, + }, + { + name: "core v1 object", + oldObj: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "default", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: "container1"}}, + }, + }, + newObj: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "default", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: "container1"}, {Name: "container2"}}, + }, + }, + expectedEnqueue: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mockProcessor := &mockAsyncWorker{} + d := &ResourceDetector{ + Processor: mockProcessor, + } + + d.OnUpdate(tt.oldObj, tt.newObj) + + if tt.expectedEnqueue { + assert.Equal(t, 1, mockProcessor.enqueueCount, "Object should be enqueued") + assert.IsType(t, ResourceItem{}, mockProcessor.lastEnqueued, "Enqueued item should be of type ResourceItem") + enqueued := mockProcessor.lastEnqueued.(ResourceItem) + assert.Equal(t, tt.newObj, enqueued.Obj, "Enqueued object should match the new object") + assert.Equal(t, tt.expectedChangeByKarmada, enqueued.ResourceChangeByKarmada, "ResourceChangeByKarmada flag should match expected value") + } else { + assert.Equal(t, 0, mockProcessor.enqueueCount, "Object should not be enqueued") + } + }) + } +} + +func TestOnDelete(t *testing.T) { + tests := []struct { + name string + obj runtime.Object + expectedEnqueue bool + }{ + { + name: "valid object", + obj: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": map[string]interface{}{ + "name": "test-deployment", + "namespace": "default", + }, + }, + }, + expectedEnqueue: true, + }, + { + name: "invalid object", + obj: &unstructured.Unstructured{ + Object: map[string]interface{}{}, + }, + expectedEnqueue: true, // The function doesn't check for validity, so it will still enqueue + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mockProcessor := &mockAsyncWorker{} + d := &ResourceDetector{ + Processor: mockProcessor, + } + + d.OnDelete(tt.obj) + + if tt.expectedEnqueue { + assert.Equal(t, 1, mockProcessor.enqueueCount, "Object should be enqueued") + } else { + assert.Equal(t, 0, mockProcessor.enqueueCount, "Object should not be enqueued") + } + }) + } +} + +func TestLookForMatchedPolicy(t *testing.T) { + tests := []struct { + name string + object *unstructured.Unstructured + policies []*policyv1alpha1.PropagationPolicy + expectedPolicy *policyv1alpha1.PropagationPolicy + }{ + { + name: "matching policy found", + object: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": map[string]interface{}{ + "name": "test-deployment", + "namespace": "default", + }, + }, + }, + policies: []*policyv1alpha1.PropagationPolicy{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "policy-1", + Namespace: "default", + }, + Spec: policyv1alpha1.PropagationSpec{ + ResourceSelectors: []policyv1alpha1.ResourceSelector{ + { + APIVersion: "apps/v1", + Kind: "Deployment", + }, + }, + }, + }, + }, + expectedPolicy: &policyv1alpha1.PropagationPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "policy-1", + Namespace: "default", + }, + Spec: policyv1alpha1.PropagationSpec{ + ResourceSelectors: []policyv1alpha1.ResourceSelector{ + { + APIVersion: "apps/v1", + Kind: "Deployment", + }, + }, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + scheme := setupTestScheme() + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme) + + d := &ResourceDetector{ + DynamicClient: fakeClient, + propagationPolicyLister: &mockPropagationPolicyLister{ + policies: tt.policies, + }, + } + + objectKey := keys.ClusterWideKey{ + Name: tt.object.GetName(), + Namespace: tt.object.GetNamespace(), + Kind: tt.object.GetKind(), + } + + policy, err := d.LookForMatchedPolicy(tt.object, objectKey) + + if err != nil { + t.Errorf("LookForMatchedPolicy returned an error: %v", err) + } + + fmt.Printf("Returned policy: %+v\n", policy) + + if tt.expectedPolicy == nil { + assert.Nil(t, policy) + } else { + assert.NotNil(t, policy) + if policy != nil { + assert.Equal(t, tt.expectedPolicy.Name, policy.Name) + assert.Equal(t, tt.expectedPolicy.Namespace, policy.Namespace) + } + } + }) + } +} + +func TestLookForMatchedClusterPolicy(t *testing.T) { + tests := []struct { + name string + object *unstructured.Unstructured + policies []*policyv1alpha1.ClusterPropagationPolicy + expectedPolicy *policyv1alpha1.ClusterPropagationPolicy + }{ + { + name: "matching cluster policy found", + object: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": map[string]interface{}{ + "name": "test-deployment", + "namespace": "default", + }, + }, + }, + policies: []*policyv1alpha1.ClusterPropagationPolicy{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster-policy-1", + }, + Spec: policyv1alpha1.PropagationSpec{ + ResourceSelectors: []policyv1alpha1.ResourceSelector{ + { + APIVersion: "apps/v1", + Kind: "Deployment", + }, + }, + }, + }, + }, + expectedPolicy: &policyv1alpha1.ClusterPropagationPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster-policy-1", + }, + Spec: policyv1alpha1.PropagationSpec{ + ResourceSelectors: []policyv1alpha1.ResourceSelector{ + { + APIVersion: "apps/v1", + Kind: "Deployment", + }, + }, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + scheme := setupTestScheme() + fakeClient := dynamicfake.NewSimpleDynamicClient(scheme) + + d := &ResourceDetector{ + DynamicClient: fakeClient, + clusterPropagationPolicyLister: &mockClusterPropagationPolicyLister{ + policies: tt.policies, + }, + } + + objectKey := keys.ClusterWideKey{ + Name: tt.object.GetName(), + Namespace: tt.object.GetNamespace(), + Kind: tt.object.GetKind(), + } + + policy, err := d.LookForMatchedClusterPolicy(tt.object, objectKey) + + if err != nil { + t.Errorf("LookForMatchedClusterPolicy returned an error: %v", err) + } + + fmt.Printf("Returned cluster policy: %+v\n", policy) + + if tt.expectedPolicy == nil { + assert.Nil(t, policy) + } else { + assert.NotNil(t, policy) + if policy != nil { + assert.Equal(t, tt.expectedPolicy.Name, policy.Name) + } + } + }) + } +} + +func TestApplyPolicy(t *testing.T) { + tests := []struct { + name string + object *unstructured.Unstructured + policy *policyv1alpha1.PropagationPolicy + resourceChangeByKarmada bool + expectError bool + }{ + { + name: "basic apply policy", + object: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": map[string]interface{}{ + "name": "test-deployment", + "namespace": "default", + "uid": "test-uid", + }, + }, + }, + policy: &policyv1alpha1.PropagationPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-policy", + Namespace: "default", + }, + Spec: policyv1alpha1.PropagationSpec{}, + }, + resourceChangeByKarmada: false, + expectError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + scheme := setupTestScheme() + fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithRuntimeObjects(tt.object).Build() + fakeRecorder := record.NewFakeRecorder(10) + fakeDynamicClient := dynamicfake.NewSimpleDynamicClient(scheme) + + mockDetector := &mockResourceDetector{ + ResourceDetector: ResourceDetector{ + Client: fakeClient, + DynamicClient: fakeDynamicClient, + EventRecorder: fakeRecorder, + ResourceInterpreter: &mockResourceInterpreter{}, + RESTMapper: &mockRESTMapper{}, + }, + mockClaimPolicyForObject: func(_ *unstructured.Unstructured, _ *policyv1alpha1.PropagationPolicy) (string, error) { + return "mocked-policy-id", nil + }, + mockBuildResourceBinding: func(object *unstructured.Unstructured, _, _ map[string]string, _ *policyv1alpha1.PropagationSpec) (*workv1alpha2.ResourceBinding, error) { + return &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: object.GetName() + "-" + strings.ToLower(object.GetKind()), + Namespace: object.GetNamespace(), + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: object.GetAPIVersion(), + Kind: object.GetKind(), + Name: object.GetName(), + Namespace: object.GetNamespace(), + }, + }, + }, nil + }, + } + + err := mockDetector.ApplyPolicy(tt.object, keys.ClusterWideKey{}, tt.resourceChangeByKarmada, tt.policy) + + if tt.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + + // Verify that the ResourceBinding was created + binding := &workv1alpha2.ResourceBinding{} + err = fakeClient.Get(context.TODO(), client.ObjectKey{ + Namespace: tt.object.GetNamespace(), + Name: tt.object.GetName() + "-" + strings.ToLower(tt.object.GetKind()), + }, binding) + assert.NoError(t, err) + assert.Equal(t, tt.object.GetName(), binding.Spec.Resource.Name) + } + }) + } +} +func TestApplyClusterPolicy(t *testing.T) { + tests := []struct { + name string + object *unstructured.Unstructured + policy *policyv1alpha1.ClusterPropagationPolicy + resourceChangeByKarmada bool + expectError bool + }{ + { + name: "apply cluster policy for namespaced resource", + object: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": map[string]interface{}{ + "name": "test-deployment", + "namespace": "default", + "uid": "test-uid", + }, + }, + }, + policy: &policyv1alpha1.ClusterPropagationPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster-policy", + }, + Spec: policyv1alpha1.PropagationSpec{}, + }, + resourceChangeByKarmada: false, + expectError: false, + }, + { + name: "apply cluster policy for cluster-scoped resource", + object: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "rbac.authorization.k8s.io/v1", + "kind": "ClusterRole", + "metadata": map[string]interface{}{ + "name": "test-cluster-role", + "uid": "test-uid", + }, + }, + }, + policy: &policyv1alpha1.ClusterPropagationPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster-policy", + }, + Spec: policyv1alpha1.PropagationSpec{}, + }, + resourceChangeByKarmada: false, + expectError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + scheme := setupTestScheme() + fakeClient := fake.NewClientBuilder().WithScheme(scheme).Build() + fakeRecorder := record.NewFakeRecorder(10) + fakeDynamicClient := dynamicfake.NewSimpleDynamicClient(scheme) + + d := &mockResourceDetector{ + ResourceDetector: ResourceDetector{ + Client: fakeClient, + DynamicClient: fakeDynamicClient, + EventRecorder: fakeRecorder, + ResourceInterpreter: &mockResourceInterpreter{}, + RESTMapper: &mockRESTMapper{}, + }, + mockClaimClusterPolicyForObject: func(_ *unstructured.Unstructured, _ *policyv1alpha1.ClusterPropagationPolicy) (string, error) { + return "mocked-cluster-policy-id", nil + }, + mockBuildResourceBinding: func(object *unstructured.Unstructured, _, _ map[string]string, _ *policyv1alpha1.PropagationSpec) (*workv1alpha2.ResourceBinding, error) { + binding := &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: object.GetName() + "-" + strings.ToLower(object.GetKind()), + Namespace: object.GetNamespace(), + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: object.GetAPIVersion(), + Kind: object.GetKind(), + Name: object.GetName(), + Namespace: object.GetNamespace(), + }, + }, + } + return binding, nil + }, + mockBuildClusterResourceBinding: func(object *unstructured.Unstructured, _, _ map[string]string, _ *policyv1alpha1.PropagationSpec) (*workv1alpha2.ClusterResourceBinding, error) { + binding := &workv1alpha2.ClusterResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: object.GetName() + "-" + strings.ToLower(object.GetKind()), + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: object.GetAPIVersion(), + Kind: object.GetKind(), + Name: object.GetName(), + }, + }, + } + return binding, nil + }, + } + + err := d.ApplyClusterPolicy(tt.object, keys.ClusterWideKey{}, tt.resourceChangeByKarmada, tt.policy) + + if tt.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + + // Check if ResourceBinding or ClusterResourceBinding was created + if tt.object.GetNamespace() != "" { + binding := &workv1alpha2.ResourceBinding{} + err = fakeClient.Get(context.TODO(), client.ObjectKey{ + Namespace: tt.object.GetNamespace(), + Name: tt.object.GetName() + "-" + strings.ToLower(tt.object.GetKind()), + }, binding) + assert.NoError(t, err) + assert.Equal(t, tt.object.GetName(), binding.Spec.Resource.Name) + } else { + binding := &workv1alpha2.ClusterResourceBinding{} + err = fakeClient.Get(context.TODO(), client.ObjectKey{ + Name: tt.object.GetName() + "-" + strings.ToLower(tt.object.GetKind()), + }, binding) + assert.NoError(t, err) + assert.Equal(t, tt.object.GetName(), binding.Spec.Resource.Name) + } + }) + } +} + +//Helper Functions + +// setupTestScheme creates a runtime scheme with necessary types for testing +func setupTestScheme() *runtime.Scheme { + scheme := runtime.NewScheme() + _ = workv1alpha2.Install(scheme) + _ = corev1.AddToScheme(scheme) + return scheme +} + +// Mock implementations + +// mockAsyncWorker is a mock implementation of util.AsyncWorker +type mockAsyncWorker struct { + enqueueCount int + lastEnqueued interface{} +} + +func (m *mockAsyncWorker) Enqueue(item interface{}) { + m.enqueueCount++ + m.lastEnqueued = item +} + +func (m *mockAsyncWorker) Add(_ interface{}) { + m.enqueueCount++ +} +func (m *mockAsyncWorker) AddAfter(_ interface{}, _ time.Duration) {} + +func (m *mockAsyncWorker) Run(_ int, _ <-chan struct{}) {} + +// mockRESTMapper is a simple mock that satisfies the meta.RESTMapper interface +type mockRESTMapper struct{} + +func (m *mockRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { + return schema.GroupVersionKind{Group: resource.Group, Version: resource.Version, Kind: resource.Resource}, nil +} + +func (m *mockRESTMapper) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { + gvk, err := m.KindFor(resource) + if err != nil { + return nil, err + } + return []schema.GroupVersionKind{gvk}, nil +} + +func (m *mockRESTMapper) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) { + return input, nil +} + +func (m *mockRESTMapper) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { + return []schema.GroupVersionResource{input}, nil +} + +func (m *mockRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { + return &meta.RESTMapping{ + Resource: schema.GroupVersionResource{Group: gk.Group, Version: versions[0], Resource: gk.Kind}, + GroupVersionKind: schema.GroupVersionKind{Group: gk.Group, Version: versions[0], Kind: gk.Kind}, + Scope: meta.RESTScopeNamespace, + }, nil +} + +func (m *mockRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) { + mapping, err := m.RESTMapping(gk, versions...) + if err != nil { + return nil, err + } + return []*meta.RESTMapping{mapping}, nil +} + +func (m *mockRESTMapper) ResourceSingularizer(resource string) (string, error) { + return resource, nil +} + +// mockResourceDetector is a mock implementation of ResourceDetector +type mockResourceDetector struct { + ResourceDetector + mockClaimPolicyForObject func(object *unstructured.Unstructured, policy *policyv1alpha1.PropagationPolicy) (string, error) + mockClaimClusterPolicyForObject func(object *unstructured.Unstructured, policy *policyv1alpha1.ClusterPropagationPolicy) (string, error) + mockBuildResourceBinding func(object *unstructured.Unstructured, labels, annotations map[string]string, spec *policyv1alpha1.PropagationSpec) (*workv1alpha2.ResourceBinding, error) + mockBuildClusterResourceBinding func(object *unstructured.Unstructured, labels, annotations map[string]string, spec *policyv1alpha1.PropagationSpec) (*workv1alpha2.ClusterResourceBinding, error) +} + +func (m *mockResourceDetector) ClaimPolicyForObject(object *unstructured.Unstructured, policy *policyv1alpha1.PropagationPolicy) (string, error) { + if m.mockClaimPolicyForObject != nil { + return m.mockClaimPolicyForObject(object, policy) + } + return "", nil +} + +func (m *mockResourceDetector) ClaimClusterPolicyForObject(object *unstructured.Unstructured, policy *policyv1alpha1.ClusterPropagationPolicy) (string, error) { + if m.mockClaimClusterPolicyForObject != nil { + return m.mockClaimClusterPolicyForObject(object, policy) + } + return "", nil +} + +func (m *mockResourceDetector) BuildResourceBinding(object *unstructured.Unstructured, labels, annotations map[string]string, spec *policyv1alpha1.PropagationSpec) (*workv1alpha2.ResourceBinding, error) { + if m.mockBuildResourceBinding != nil { + return m.mockBuildResourceBinding(object, labels, annotations, spec) + } + return &workv1alpha2.ResourceBinding{}, nil +} + +func (m *mockResourceDetector) BuildClusterResourceBinding(object *unstructured.Unstructured, labels, annotations map[string]string, spec *policyv1alpha1.PropagationSpec) (*workv1alpha2.ClusterResourceBinding, error) { + if m.mockBuildClusterResourceBinding != nil { + return m.mockBuildClusterResourceBinding(object, labels, annotations, spec) + } + return &workv1alpha2.ClusterResourceBinding{}, nil +} + +// mockPropagationPolicyLister is a mock implementation of the PropagationPolicyLister +type mockPropagationPolicyLister struct { + policies []*policyv1alpha1.PropagationPolicy +} + +func (m *mockPropagationPolicyLister) List(_ labels.Selector) (ret []runtime.Object, err error) { + var result []runtime.Object + for _, p := range m.policies { + u, err := runtime.DefaultUnstructuredConverter.ToUnstructured(p) + if err != nil { + return nil, err + } + result = append(result, &unstructured.Unstructured{Object: u}) + } + return result, nil +} + +func (m *mockPropagationPolicyLister) Get(name string) (runtime.Object, error) { + for _, p := range m.policies { + if p.Name == name { + u, err := runtime.DefaultUnstructuredConverter.ToUnstructured(p) + if err != nil { + return nil, err + } + return &unstructured.Unstructured{Object: u}, nil + } + } + return nil, nil +} + +func (m *mockPropagationPolicyLister) ByNamespace(namespace string) cache.GenericNamespaceLister { + return &mockGenericNamespaceLister{ + policies: m.policies, + namespace: namespace, + } +} + +// mockGenericNamespaceLister is a mock implementation of cache.GenericNamespaceLister +type mockGenericNamespaceLister struct { + policies []*policyv1alpha1.PropagationPolicy + namespace string +} + +func (m *mockGenericNamespaceLister) List(_ labels.Selector) (ret []runtime.Object, err error) { + var result []runtime.Object + for _, p := range m.policies { + if p.Namespace == m.namespace { + u, err := runtime.DefaultUnstructuredConverter.ToUnstructured(p) + if err != nil { + return nil, err + } + result = append(result, &unstructured.Unstructured{Object: u}) + } + } + return result, nil +} + +func (m *mockGenericNamespaceLister) Get(name string) (runtime.Object, error) { + for _, p := range m.policies { + if p.Name == name && p.Namespace == m.namespace { + u, err := runtime.DefaultUnstructuredConverter.ToUnstructured(p) + if err != nil { + return nil, err + } + return &unstructured.Unstructured{Object: u}, nil + } + } + return nil, nil +} + +// mockClusterPropagationPolicyLister is a mock implementation of the ClusterPropagationPolicyLister +type mockClusterPropagationPolicyLister struct { + policies []*policyv1alpha1.ClusterPropagationPolicy +} + +func (m *mockClusterPropagationPolicyLister) List(_ labels.Selector) (ret []runtime.Object, err error) { + var result []runtime.Object + for _, p := range m.policies { + u, err := runtime.DefaultUnstructuredConverter.ToUnstructured(p) + if err != nil { + return nil, err + } + result = append(result, &unstructured.Unstructured{Object: u}) + } + return result, nil +} + +func (m *mockClusterPropagationPolicyLister) Get(name string) (runtime.Object, error) { + for _, p := range m.policies { + if p.Name == name { + u, err := runtime.DefaultUnstructuredConverter.ToUnstructured(p) + if err != nil { + return nil, err + } + return &unstructured.Unstructured{Object: u}, nil + } + } + return nil, nil +} + +func (m *mockClusterPropagationPolicyLister) ByNamespace(_ string) cache.GenericNamespaceLister { + return nil // ClusterPropagationPolicies are not namespaced +} + +// mockResourceInterpreter is a mock implementation of the ResourceInterpreter interface +type mockResourceInterpreter struct{} + +func (m *mockResourceInterpreter) Start(_ context.Context) error { + return nil +} + +func (m *mockResourceInterpreter) HookEnabled(_ schema.GroupVersionKind, _ configv1alpha1.InterpreterOperation) bool { + return false +} + +func (m *mockResourceInterpreter) GetReplicas(_ *unstructured.Unstructured) (int32, *workv1alpha2.ReplicaRequirements, error) { + return 0, nil, nil +} + +func (m *mockResourceInterpreter) ReviseReplica(object *unstructured.Unstructured, _ int64) (*unstructured.Unstructured, error) { + return object, nil +} + +func (m *mockResourceInterpreter) Retain(desired *unstructured.Unstructured, _ *unstructured.Unstructured) (*unstructured.Unstructured, error) { + return desired, nil +} + +func (m *mockResourceInterpreter) AggregateStatus(object *unstructured.Unstructured, _ []workv1alpha2.AggregatedStatusItem) (*unstructured.Unstructured, error) { + return object, nil +} + +func (m *mockResourceInterpreter) GetDependencies(_ *unstructured.Unstructured) ([]configv1alpha1.DependentObjectReference, error) { + return nil, nil +} + +func (m *mockResourceInterpreter) ReflectStatus(_ *unstructured.Unstructured) (*runtime.RawExtension, error) { + return nil, nil +} + +func (m *mockResourceInterpreter) InterpretHealth(_ *unstructured.Unstructured) (bool, error) { + return true, nil +} diff --git a/pkg/detector/policy.go b/pkg/detector/policy.go index 47b1077e62ec..1a1b7c69c3fe 100644 --- a/pkg/detector/policy.go +++ b/pkg/detector/policy.go @@ -22,6 +22,7 @@ import ( "time" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/errors" @@ -187,7 +188,7 @@ func (d *ResourceDetector) cleanPPUnmatchedRBs(policyID, policyNamespace, policy return err } - return d.removeRBsMarks(bindings, selectors, propagationPolicyMarkedLabels, propagationPolicyMarkedAnnotations) + return d.removeRBsClaimMetadata(bindings, selectors, propagationPolicyClaimLabels, propagationPolicyClaimAnnotations) } func (d *ResourceDetector) cleanCPPUnmatchedRBs(policyID, policyName string, selectors []policyv1alpha1.ResourceSelector) error { @@ -196,7 +197,7 @@ func (d *ResourceDetector) cleanCPPUnmatchedRBs(policyID, policyName string, sel return err } - return d.removeRBsMarks(bindings, selectors, clusterPropagationPolicyMarkedLabels, clusterPropagationPolicyMarkedAnnotations) + return d.removeRBsClaimMetadata(bindings, selectors, clusterPropagationPolicyClaimLabels, clusterPropagationPolicyClaimAnnotations) } func (d *ResourceDetector) cleanUnmatchedCRBs(policyID, policyName string, selectors []policyv1alpha1.ResourceSelector) error { @@ -205,13 +206,13 @@ func (d *ResourceDetector) cleanUnmatchedCRBs(policyID, policyName string, selec return err } - return d.removeCRBsMarks(bindings, selectors, clusterPropagationPolicyMarkedLabels, clusterPropagationPolicyMarkedAnnotations) + return d.removeCRBsClaimMetadata(bindings, selectors, clusterPropagationPolicyClaimLabels, clusterPropagationPolicyClaimAnnotations) } -func (d *ResourceDetector) removeRBsMarks(bindings *workv1alpha2.ResourceBindingList, selectors []policyv1alpha1.ResourceSelector, labels, annotations []string) error { +func (d *ResourceDetector) removeRBsClaimMetadata(bindings *workv1alpha2.ResourceBindingList, selectors []policyv1alpha1.ResourceSelector, labels, annotations []string) error { var errs []error for _, binding := range bindings.Items { - removed, err := d.removeResourceMarksIfNotMatched(binding.Spec.Resource, selectors, labels, annotations) + removed, err := d.removeResourceClaimMetadataIfNotMatched(binding.Spec.Resource, selectors, labels, annotations) if err != nil { klog.Errorf("Failed to remove resource labels and annotations when resource not match with policy selectors, err: %v", err) errs = append(errs, err) @@ -234,11 +235,11 @@ func (d *ResourceDetector) removeRBsMarks(bindings *workv1alpha2.ResourceBinding return errors.NewAggregate(errs) } -func (d *ResourceDetector) removeCRBsMarks(bindings *workv1alpha2.ClusterResourceBindingList, +func (d *ResourceDetector) removeCRBsClaimMetadata(bindings *workv1alpha2.ClusterResourceBindingList, selectors []policyv1alpha1.ResourceSelector, removeLabels, removeAnnotations []string) error { var errs []error for _, binding := range bindings.Items { - removed, err := d.removeResourceMarksIfNotMatched(binding.Spec.Resource, selectors, removeLabels, removeAnnotations) + removed, err := d.removeResourceClaimMetadataIfNotMatched(binding.Spec.Resource, selectors, removeLabels, removeAnnotations) if err != nil { klog.Errorf("Failed to remove resource labels and annotations when resource not match with policy selectors, err: %v", err) errs = append(errs, err) @@ -261,7 +262,7 @@ func (d *ResourceDetector) removeCRBsMarks(bindings *workv1alpha2.ClusterResourc return errors.NewAggregate(errs) } -func (d *ResourceDetector) removeResourceMarksIfNotMatched(objectReference workv1alpha2.ObjectReference, +func (d *ResourceDetector) removeResourceClaimMetadataIfNotMatched(objectReference workv1alpha2.ObjectReference, selectors []policyv1alpha1.ResourceSelector, labels, annotations []string) (bool, error) { objectKey, err := helper.ConstructClusterWideKey(objectReference) if err != nil { @@ -340,10 +341,10 @@ func (d *ResourceDetector) listCPPDerivedCRBs(policyID, policyName string) (*wor // excludeClusterPolicy excludes cluster propagation policy. // If propagation policy was claimed, cluster propagation policy should not exist. -func excludeClusterPolicy(objLabels map[string]string) bool { - if _, ok := objLabels[policyv1alpha1.ClusterPropagationPolicyPermanentIDLabel]; !ok { +func excludeClusterPolicy(obj metav1.Object) (hasClaimedClusterPolicy bool) { + if _, ok := obj.GetLabels()[policyv1alpha1.ClusterPropagationPolicyPermanentIDLabel]; !ok { return false } - delete(objLabels, policyv1alpha1.ClusterPropagationPolicyPermanentIDLabel) + CleanupCPPClaimMetadata(obj) return true } diff --git a/pkg/detector/policy_test.go b/pkg/detector/policy_test.go index 154842ae697e..90cc8bc9b9cb 100644 --- a/pkg/detector/policy_test.go +++ b/pkg/detector/policy_test.go @@ -37,7 +37,867 @@ import ( "github.com/karmada-io/karmada/pkg/util/fedinformer/genericmanager" ) -func Test_removeResourceMarksIfNotMatched(t *testing.T) { +func Test_cleanPPUnmatchedRBs(t *testing.T) { + scheme := runtime.NewScheme() + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + utilruntime.Must(appsv1.AddToScheme(scheme)) + utilruntime.Must(workv1alpha2.Install(scheme)) + + restMapper := meta.NewDefaultRESTMapper([]schema.GroupVersion{{Group: "apps", Version: "v1"}}) + deploymentGVK := appsv1.SchemeGroupVersion.WithKind("Deployment") + restMapper.Add(deploymentGVK, meta.RESTScopeNamespace) + tests := []struct { + name string + policyID string + policyName string + policyNamespace string + selectors []policyv1alpha1.ResourceSelector + wantErr bool + setupClient func() *fake.ClientBuilder + existingObject *unstructured.Unstructured + expectedBindings *workv1alpha2.ResourceBindingList + }{ + { + name: "clean unmatched binding resource with policy and namespace", + policyID: "f2507cgb-f3f3-4a4b-b289-5691a4fef979", + policyName: "test-policy-1", + policyNamespace: "fake-namespace-1", + selectors: []policyv1alpha1.ResourceSelector{ + { + APIVersion: "apps/v1", + Kind: "Pod", + Namespace: "default", + }, + }, + wantErr: false, + setupClient: func() *fake.ClientBuilder { + obj := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": map[string]interface{}{ + "name": "deployment", + "namespace": "test", + }, + }, + } + rb := &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "binding-1", + ResourceVersion: "999", + Namespace: "fake-namespace-1", + Labels: map[string]string{ + policyv1alpha1.PropagationPolicyPermanentIDLabel: "f2507cgb-f3f3-4a4b-b289-5691a4fef979", + }, + Annotations: map[string]string{ + policyv1alpha1.PropagationPolicyNamespaceAnnotation: "deploy-match-namespace-1", + policyv1alpha1.PropagationPolicyNameAnnotation: "deploy-match-name-1", + }, + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Namespace: "test", + Name: "deployment", + }, + }, + } + return fake.NewClientBuilder().WithScheme(scheme).WithObjects(obj, rb).WithRESTMapper(restMapper) + }, + existingObject: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": map[string]interface{}{ + "name": "deployment", + "namespace": "test", + }, + }, + }, + expectedBindings: &workv1alpha2.ResourceBindingList{Items: []workv1alpha2.ResourceBinding{}}, + }, + { + name: "cannot list unmatched binding resource with policy and namespace", + policyID: "f2507cgb-f3f3-4a4b-b289-5691a4fef979", + policyName: "test-policy-2", + policyNamespace: "fake-namespace-2", + selectors: []policyv1alpha1.ResourceSelector{ + { + APIVersion: "apps/v1", + Kind: "Pod", + Namespace: "default", + }, + }, + wantErr: true, + setupClient: func() *fake.ClientBuilder { + return fake.NewClientBuilder().WithRESTMapper(restMapper) + }, + existingObject: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": map[string]interface{}{ + "name": "deployment", + "namespace": "test", + }, + }, + }, + expectedBindings: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fakeClient := tt.setupClient().Build() + stopCh := make(chan struct{}) + defer close(stopCh) + fakeDynamicClient := dynamicfake.NewSimpleDynamicClient(scheme, tt.existingObject) + genMgr := genericmanager.NewSingleClusterInformerManager(fakeDynamicClient, 0, stopCh) + resourceDetector := &ResourceDetector{ + Client: fakeClient, + DynamicClient: fakeDynamicClient, + RESTMapper: fakeClient.RESTMapper(), + InformerManager: genMgr, + } + err := resourceDetector.cleanPPUnmatchedRBs(tt.policyID, tt.policyNamespace, tt.policyName, tt.selectors) + if (err != nil) != tt.wantErr { + t.Errorf("cleanPPUnmatchedRBs() error = %v, wantErr %v", err, tt.wantErr) + } + + bindings, err := resourceDetector.listPPDerivedRBs(tt.policyID, tt.policyNamespace, tt.policyName) + if (err != nil) != tt.wantErr { + t.Errorf("listPPDerivedRBs() error = %v, wantErr %v", err, tt.wantErr) + } + + if !reflect.DeepEqual(tt.expectedBindings, bindings) { + t.Errorf("listPPDerivedRBs() = %v, want %v", bindings, tt.expectedBindings) + } + }) + } +} + +func Test_cleanUnmatchedRBs(t *testing.T) { + scheme := runtime.NewScheme() + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + utilruntime.Must(appsv1.AddToScheme(scheme)) + utilruntime.Must(workv1alpha2.Install(scheme)) + + restMapper := meta.NewDefaultRESTMapper([]schema.GroupVersion{{Group: "apps", Version: "v1"}}) + deploymentGVK := appsv1.SchemeGroupVersion.WithKind("Deployment") + restMapper.Add(deploymentGVK, meta.RESTScopeNamespace) + tests := []struct { + name string + policyID string + policyName string + selectors []policyv1alpha1.ResourceSelector + wantErr bool + setupClient func() *fake.ClientBuilder + existingObject *unstructured.Unstructured + expectedBindings *workv1alpha2.ResourceBindingList + }{ + { + name: "clean unmatched binding resource", + policyID: "f2507cgb-f3f3-4a4b-b289-5691a4fef979", + policyName: "test-policy-1", + selectors: []policyv1alpha1.ResourceSelector{ + { + APIVersion: "apps/v1", + Kind: "Pod", + Namespace: "default", + }, + }, + wantErr: false, + setupClient: func() *fake.ClientBuilder { + obj := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": map[string]interface{}{ + "name": "deployment", + "namespace": "test", + }, + }, + } + rb := &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "binding-1", + ResourceVersion: "999", + Labels: map[string]string{ + policyv1alpha1.ClusterPropagationPolicyPermanentIDLabel: "f2507cgb-f3f3-4a4b-b289-5691a4fef979", + }, + Annotations: map[string]string{ + policyv1alpha1.ClusterPropagationPolicyAnnotation: "deploy-match-name-1", + }, + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Namespace: "test", + Name: "deployment", + }, + }, + } + return fake.NewClientBuilder().WithScheme(scheme).WithObjects(obj, rb).WithRESTMapper(restMapper) + }, + existingObject: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": map[string]interface{}{ + "name": "deployment", + "namespace": "test", + }, + }, + }, + expectedBindings: &workv1alpha2.ResourceBindingList{Items: []workv1alpha2.ResourceBinding{}}, + }, + { + name: "cannot list unmatched binding resource", + policyID: "f2507cgb-f3f3-4a4b-b289-5691a4fef979", + policyName: "test-policy-1", + selectors: []policyv1alpha1.ResourceSelector{ + { + APIVersion: "apps/v1", + Kind: "Pod", + Namespace: "default", + }, + }, + wantErr: true, + setupClient: func() *fake.ClientBuilder { + return fake.NewClientBuilder().WithRESTMapper(restMapper) + }, + existingObject: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": map[string]interface{}{ + "name": "deployment", + "namespace": "test", + }, + }, + }, + expectedBindings: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fakeClient := tt.setupClient().Build() + stopCh := make(chan struct{}) + defer close(stopCh) + fakeDynamicClient := dynamicfake.NewSimpleDynamicClient(scheme, tt.existingObject) + genMgr := genericmanager.NewSingleClusterInformerManager(fakeDynamicClient, 0, stopCh) + resourceDetector := &ResourceDetector{ + Client: fakeClient, + DynamicClient: fakeDynamicClient, + RESTMapper: fakeClient.RESTMapper(), + InformerManager: genMgr, + } + err := resourceDetector.cleanCPPUnmatchedRBs(tt.policyID, tt.policyName, tt.selectors) + if (err != nil) != tt.wantErr { + t.Errorf("cleanCPPUnmatchedRBs() error = %v, wantErr %v", err, tt.wantErr) + } + + bindings, err := resourceDetector.listCPPDerivedRBs(tt.policyID, tt.policyName) + if (err != nil) != tt.wantErr { + t.Errorf("listCPPDerivedRBs() error = %v, wantErr %v", err, tt.wantErr) + } + + if !reflect.DeepEqual(tt.expectedBindings, bindings) { + t.Errorf("listCPPDerivedRBs() = %v, want %v", bindings, tt.expectedBindings) + } + }) + } +} + +func Test_cleanUnmatchedCRBs(t *testing.T) { + scheme := runtime.NewScheme() + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + utilruntime.Must(appsv1.AddToScheme(scheme)) + utilruntime.Must(workv1alpha2.Install(scheme)) + + restMapper := meta.NewDefaultRESTMapper([]schema.GroupVersion{{Group: "apps", Version: "v1"}}) + deploymentGVK := appsv1.SchemeGroupVersion.WithKind("Deployment") + restMapper.Add(deploymentGVK, meta.RESTScopeNamespace) + tests := []struct { + name string + policyID string + policyName string + selectors []policyv1alpha1.ResourceSelector + wantErr bool + setupClient func() *fake.ClientBuilder + existingObject *unstructured.Unstructured + expectedBindings *workv1alpha2.ClusterResourceBindingList + }{ + { + name: "clean unmatched cluster binding resource", + policyID: "f2507cgb-f3f3-4a4b-b289-5691a4fef979", + policyName: "test-policy-1", + selectors: []policyv1alpha1.ResourceSelector{ + { + APIVersion: "apps/v1", + Kind: "Pod", + Namespace: "default", + }, + }, + wantErr: false, + setupClient: func() *fake.ClientBuilder { + obj := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": map[string]interface{}{ + "name": "deployment", + "namespace": "test", + }, + }, + } + rb := &workv1alpha2.ClusterResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "binding-1", + ResourceVersion: "999", + Labels: map[string]string{ + policyv1alpha1.ClusterPropagationPolicyPermanentIDLabel: "f2507cgb-f3f3-4a4b-b289-5691a4fef979", + }, + Annotations: map[string]string{ + policyv1alpha1.ClusterPropagationPolicyAnnotation: "deploy-match-name-1", + }, + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Namespace: "test", + Name: "deployment", + }, + }, + } + return fake.NewClientBuilder().WithScheme(scheme).WithObjects(obj, rb).WithRESTMapper(restMapper) + }, + existingObject: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": map[string]interface{}{ + "name": "deployment", + "namespace": "test", + }, + }, + }, + expectedBindings: &workv1alpha2.ClusterResourceBindingList{Items: []workv1alpha2.ClusterResourceBinding{}}, + }, + { + name: "cannot list unmatched cluster binding resource", + policyID: "f2507cgb-f3f3-4a4b-b289-5691a4fef979", + policyName: "test-policy-1", + selectors: []policyv1alpha1.ResourceSelector{ + { + APIVersion: "apps/v1", + Kind: "Pod", + Namespace: "default", + }, + }, + wantErr: true, + setupClient: func() *fake.ClientBuilder { + return fake.NewClientBuilder().WithRESTMapper(restMapper) + }, + existingObject: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": map[string]interface{}{ + "name": "deployment", + "namespace": "test", + }, + }, + }, + expectedBindings: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fakeClient := tt.setupClient().Build() + stopCh := make(chan struct{}) + defer close(stopCh) + fakeDynamicClient := dynamicfake.NewSimpleDynamicClient(scheme, tt.existingObject) + genMgr := genericmanager.NewSingleClusterInformerManager(fakeDynamicClient, 0, stopCh) + resourceDetector := &ResourceDetector{ + Client: fakeClient, + DynamicClient: fakeDynamicClient, + RESTMapper: fakeClient.RESTMapper(), + InformerManager: genMgr, + } + err := resourceDetector.cleanUnmatchedCRBs(tt.policyID, tt.policyName, tt.selectors) + if (err != nil) != tt.wantErr { + t.Errorf("cleanUnmatchedCRBs() error = %v, wantErr %v", err, tt.wantErr) + } + + bindings, err := resourceDetector.listCPPDerivedCRBs(tt.policyID, tt.policyName) + if (err != nil) != tt.wantErr { + t.Errorf("listCPPDerivedCRBs() error = %v, wantErr %v", err, tt.wantErr) + } + + if !reflect.DeepEqual(tt.expectedBindings, bindings) { + t.Errorf("listCPPDerivedCRBs() = %v, want %v", bindings, tt.expectedBindings) + } + }) + } +} + +func Test_removeRBsClaimMetadata(t *testing.T) { + scheme := runtime.NewScheme() + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + utilruntime.Must(appsv1.AddToScheme(scheme)) + utilruntime.Must(workv1alpha2.Install(scheme)) + + restMapper := meta.NewDefaultRESTMapper([]schema.GroupVersion{{Group: "apps", Version: "v1"}}) + deploymentGVK := appsv1.SchemeGroupVersion.WithKind("Deployment") + restMapper.Add(deploymentGVK, meta.RESTScopeNamespace) + tests := []struct { + name string + bindings *workv1alpha2.ResourceBindingList + selectors []policyv1alpha1.ResourceSelector + existingObject *unstructured.Unstructured + removeLabels []string + removeAnnotations []string + wantErr bool + setupClient func() *fake.ClientBuilder + }{ + { + name: "cannot remove resource binding with matching selectors", + bindings: &workv1alpha2.ResourceBindingList{ + Items: []workv1alpha2.ResourceBinding{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "binding-1", + Namespace: "fake-namespace-1", + ResourceVersion: "999", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Namespace: "test", + Name: "deployment", + }, + }, + }, + }, + }, + selectors: []policyv1alpha1.ResourceSelector{ + { + APIVersion: "apps/v1", + Kind: "Deployment", + Namespace: "test", + }, + }, + removeLabels: []string{"app"}, + removeAnnotations: []string{"foo"}, + existingObject: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": map[string]interface{}{ + "name": "deployment", + "namespace": "test", + "labels": map[string]interface{}{"app": "nginx"}, + "annotations": map[string]interface{}{"foo": "bar"}, + }, + }, + }, + wantErr: false, + setupClient: func() *fake.ClientBuilder { + obj := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": map[string]interface{}{ + "name": "deployment", + "namespace": "test", + "labels": map[string]interface{}{"app": "nginx"}, + "annotations": map[string]interface{}{"foo": "bar"}, + }, + }, + } + return fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(obj).WithRESTMapper(restMapper) + }, + }, + { + name: "remove resource binding with non-matching selectors", + bindings: &workv1alpha2.ResourceBindingList{ + Items: []workv1alpha2.ResourceBinding{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "binding-1", + Namespace: "fake-namespace-1", + ResourceVersion: "999", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Namespace: "test", + Name: "deployment", + }, + }, + }, + }, + }, + selectors: []policyv1alpha1.ResourceSelector{ + { + APIVersion: "apps/v1", + Kind: "Pod", + Namespace: "default", + }, + }, + removeLabels: []string{"app"}, + removeAnnotations: []string{"foo"}, + existingObject: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": map[string]interface{}{ + "name": "deployment", + "namespace": "test", + "labels": map[string]interface{}{"app": "nginx"}, + "annotations": map[string]interface{}{"foo": "bar"}, + }, + }, + }, + wantErr: false, + setupClient: func() *fake.ClientBuilder { + obj := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": map[string]interface{}{ + "name": "deployment", + "namespace": "test", + "labels": map[string]interface{}{"app": "nginx"}, + "annotations": map[string]interface{}{"foo": "bar"}, + }, + }, + } + rb := &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "binding-1", + Namespace: "fake-namespace-1", + ResourceVersion: "999", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Namespace: "test", + Name: "deployment", + }, + }, + } + return fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(obj, rb).WithRESTMapper(restMapper) + }, + }, + { + name: "failed to remove resource binding with non-matching selectors", + bindings: &workv1alpha2.ResourceBindingList{ + Items: []workv1alpha2.ResourceBinding{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "binding-1", + Namespace: "fake-namespace-1", + ResourceVersion: "999", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Namespace: "test", + Name: "deployment", + }, + }, + }, + }, + }, + selectors: []policyv1alpha1.ResourceSelector{ + { + APIVersion: "apps/v1", + Kind: "Pod", + Namespace: "default", + }, + }, + removeLabels: []string{"app"}, + removeAnnotations: []string{"foo"}, + existingObject: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": map[string]interface{}{ + "name": "deployment", + "namespace": "test", + "labels": map[string]interface{}{"app": "nginx"}, + "annotations": map[string]interface{}{"foo": "bar"}, + }, + }, + }, + wantErr: true, + setupClient: func() *fake.ClientBuilder { + return fake.NewClientBuilder().WithRESTMapper(restMapper) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fakeClient := tt.setupClient().Build() + stopCh := make(chan struct{}) + defer close(stopCh) + fakeDynamicClient := dynamicfake.NewSimpleDynamicClient(scheme, tt.existingObject) + genMgr := genericmanager.NewSingleClusterInformerManager(fakeDynamicClient, 0, stopCh) + resourceDetector := &ResourceDetector{ + Client: fakeClient, + DynamicClient: fakeDynamicClient, + RESTMapper: fakeClient.RESTMapper(), + InformerManager: genMgr, + } + err := resourceDetector.removeRBsClaimMetadata(tt.bindings, tt.selectors, tt.removeLabels, tt.removeAnnotations) + if (err != nil) != tt.wantErr { + t.Errorf("removeRBsClaimMetadata() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func Test_removeCRBsClaimMetadata(t *testing.T) { + scheme := runtime.NewScheme() + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + utilruntime.Must(appsv1.AddToScheme(scheme)) + utilruntime.Must(workv1alpha2.Install(scheme)) + + restMapper := meta.NewDefaultRESTMapper([]schema.GroupVersion{{Group: "apps", Version: "v1"}}) + deploymentGVK := appsv1.SchemeGroupVersion.WithKind("Deployment") + restMapper.Add(deploymentGVK, meta.RESTScopeNamespace) + tests := []struct { + name string + bindings *workv1alpha2.ClusterResourceBindingList + selectors []policyv1alpha1.ResourceSelector + existingObject *unstructured.Unstructured + removeLabels []string + removeAnnotations []string + wantErr bool + setupClient func() *fake.ClientBuilder + }{ + { + name: "cannot remove cluster resource binding with matching selectors", + bindings: &workv1alpha2.ClusterResourceBindingList{ + Items: []workv1alpha2.ClusterResourceBinding{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "binding-1", + Namespace: "fake-namespace-1", + ResourceVersion: "999", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Namespace: "test", + Name: "deployment", + }, + }, + }, + }, + }, + selectors: []policyv1alpha1.ResourceSelector{ + { + APIVersion: "apps/v1", + Kind: "Deployment", + Namespace: "test", + }, + }, + removeLabels: []string{"app"}, + removeAnnotations: []string{"foo"}, + existingObject: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": map[string]interface{}{ + "name": "deployment", + "namespace": "test", + "labels": map[string]interface{}{"app": "nginx"}, + "annotations": map[string]interface{}{"foo": "bar"}, + }, + }, + }, + wantErr: false, + setupClient: func() *fake.ClientBuilder { + obj := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": map[string]interface{}{ + "name": "deployment", + "namespace": "test", + "labels": map[string]interface{}{"app": "nginx"}, + "annotations": map[string]interface{}{"foo": "bar"}, + }, + }, + } + return fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(obj).WithRESTMapper(restMapper) + }, + }, + { + name: "remove cluster resource binding with non-matching selectors", + bindings: &workv1alpha2.ClusterResourceBindingList{ + Items: []workv1alpha2.ClusterResourceBinding{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "binding-1", + Namespace: "fake-namespace-1", + ResourceVersion: "999", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Namespace: "test", + Name: "deployment", + }, + }, + }, + }, + }, + selectors: []policyv1alpha1.ResourceSelector{ + { + APIVersion: "apps/v1", + Kind: "Pod", + Namespace: "default", + }, + }, + removeLabels: []string{"app"}, + removeAnnotations: []string{"foo"}, + existingObject: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": map[string]interface{}{ + "name": "deployment", + "namespace": "test", + "labels": map[string]interface{}{"app": "nginx"}, + "annotations": map[string]interface{}{"foo": "bar"}, + }, + }, + }, + wantErr: false, + setupClient: func() *fake.ClientBuilder { + obj := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": map[string]interface{}{ + "name": "deployment", + "namespace": "test", + "labels": map[string]interface{}{"app": "nginx"}, + "annotations": map[string]interface{}{"foo": "bar"}, + }, + }, + } + crb := &workv1alpha2.ClusterResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "binding-1", + Namespace: "fake-namespace-1", + ResourceVersion: "999", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Namespace: "test", + Name: "deployment", + }, + }, + } + return fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(obj, crb).WithRESTMapper(restMapper) + }, + }, + { + name: "failed to remove cluster resource binding with non-matching selectors", + bindings: &workv1alpha2.ClusterResourceBindingList{ + Items: []workv1alpha2.ClusterResourceBinding{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "binding-1", + Namespace: "fake-namespace-1", + ResourceVersion: "999", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Namespace: "test", + Name: "deployment", + }, + }, + }, + }, + }, + selectors: []policyv1alpha1.ResourceSelector{ + { + APIVersion: "apps/v1", + Kind: "Pod", + Namespace: "default", + }, + }, + removeLabels: []string{"app"}, + removeAnnotations: []string{"foo"}, + existingObject: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": map[string]interface{}{ + "name": "deployment", + "namespace": "test", + "labels": map[string]interface{}{"app": "nginx"}, + "annotations": map[string]interface{}{"foo": "bar"}, + }, + }, + }, + wantErr: true, + setupClient: func() *fake.ClientBuilder { + return fake.NewClientBuilder().WithRESTMapper(restMapper) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fakeClient := tt.setupClient().Build() + stopCh := make(chan struct{}) + defer close(stopCh) + fakeDynamicClient := dynamicfake.NewSimpleDynamicClient(scheme, tt.existingObject) + genMgr := genericmanager.NewSingleClusterInformerManager(fakeDynamicClient, 0, stopCh) + resourceDetector := &ResourceDetector{ + Client: fakeClient, + DynamicClient: fakeDynamicClient, + RESTMapper: fakeClient.RESTMapper(), + InformerManager: genMgr, + } + err := resourceDetector.removeCRBsClaimMetadata(tt.bindings, tt.selectors, tt.removeLabels, tt.removeAnnotations) + if (err != nil) != tt.wantErr { + t.Errorf("removeCRBsClaimMetadata() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func Test_removeResourceClaimMetadataIfNotMatched(t *testing.T) { scheme := runtime.NewScheme() utilruntime.Must(clientgoscheme.AddToScheme(scheme)) utilruntime.Must(appsv1.AddToScheme(scheme)) @@ -253,13 +1113,13 @@ func Test_removeResourceMarksIfNotMatched(t *testing.T) { InformerManager: genMgr, } - updated, err := resourceDetector.removeResourceMarksIfNotMatched(tt.objectReference, tt.selectors, tt.labels, tt.annotations) + updated, err := resourceDetector.removeResourceClaimMetadataIfNotMatched(tt.objectReference, tt.selectors, tt.labels, tt.annotations) if (err != nil) != tt.wantErr { - t.Errorf("removeResourceMarksIfNotMatched() error = %v, wantErr %v", err, tt.wantErr) + t.Errorf("removeResourceClaimMetadataIfNotMatched() error = %v, wantErr %v", err, tt.wantErr) } if updated != tt.wantUpdated { - t.Errorf("removeResourceMarksIfNotMatched() = %v, want %v", updated, tt.wantUpdated) + t.Errorf("removeResourceClaimMetadataIfNotMatched() = %v, want %v", updated, tt.wantUpdated) } }) } @@ -503,26 +1363,54 @@ func Test_listCPPDerivedCRBs(t *testing.T) { func Test_excludeClusterPolicy(t *testing.T) { tests := []struct { - name string - objLabels map[string]string - want bool + name string + obj metav1.Object + result metav1.Object + hasClaimedClusterPolicy bool }{ { - name: "propagation policy was claimed", - objLabels: map[string]string{}, - want: false, + name: "propagation policy was claimed", + obj: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "labels": map[string]interface{}{}, + }, + }, + }, + result: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "labels": map[string]interface{}{}, + }, + }, + }, + hasClaimedClusterPolicy: false, }, { name: "propagation policy was not claimed", - objLabels: map[string]string{ - policyv1alpha1.ClusterPropagationPolicyPermanentIDLabel: "f2507cgb-f3f3-4a4b-b289-5691a4fef979", + obj: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "labels": map[string]interface{}{policyv1alpha1.ClusterPropagationPolicyPermanentIDLabel: "f2507cgb-f3f3-4a4b-b289-5691a4fef979", "foo": "bar"}, + "annotations": map[string]interface{}{policyv1alpha1.ClusterPropagationPolicyAnnotation: "nginx", "foo1": "bar1"}, + }, + }, + }, + result: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "labels": map[string]interface{}{"foo": "bar"}, + "annotations": map[string]interface{}{"foo1": "bar1"}, + }, + }, }, - want: true, + hasClaimedClusterPolicy: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := excludeClusterPolicy(tt.objLabels) - assert.Equal(t, tt.want, got) + got := excludeClusterPolicy(tt.obj) + assert.Equal(t, tt.obj, tt.result) + assert.Equal(t, tt.hasClaimedClusterPolicy, got) }) } } diff --git a/pkg/detector/preemption_test.go b/pkg/detector/preemption_test.go new file mode 100644 index 000000000000..bcda63c957c8 --- /dev/null +++ b/pkg/detector/preemption_test.go @@ -0,0 +1,651 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package detector + +import ( + "fmt" + "testing" + "time" + + appsv1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + dynamicfake "k8s.io/client-go/dynamic/fake" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" + "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2" + "github.com/karmada-io/karmada/pkg/util/fedinformer/genericmanager" +) + +type MockAsyncWorker struct { + queue []interface{} +} + +// Note: This is a dummy implementation of Add for testing purposes. +func (m *MockAsyncWorker) Add(item interface{}) { + // No actual work is done in the mock; we just simulate running + m.queue = append(m.queue, item) +} + +// Note: This is a dummy implementation of AddAfter for testing purposes. +func (m *MockAsyncWorker) AddAfter(item interface{}, duration time.Duration) { + // No actual work is done in the mock; we just simulate running + fmt.Printf("%v", duration) + m.queue = append(m.queue, item) +} + +// Note: This is a dummy implementation of Enqueue for testing purposes. +func (m *MockAsyncWorker) Enqueue(obj interface{}) { + // Assuming KeyFunc is used to generate a key; for simplicity, we use obj directly + m.queue = append(m.queue, obj) +} + +// Note: This is a dummy implementation of Run for testing purposes. +func (m *MockAsyncWorker) Run(workerNumber int, stopChan <-chan struct{}) { + // No actual work is done in the mock; we just simulate running + fmt.Printf("%v", workerNumber) + fmt.Printf("%v", <-stopChan) +} + +// GetQueue returns the current state of the queue +func (m *MockAsyncWorker) GetQueue() []interface{} { + return m.queue +} + +func TestHandleDeprioritizedPropagationPolicy(t *testing.T) { + scheme := runtime.NewScheme() + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + utilruntime.Must(appsv1.AddToScheme(scheme)) + utilruntime.Must(v1alpha2.Install(scheme)) + utilruntime.Must(policyv1alpha1.Install(scheme)) + + propagationPolicyGVR := schema.GroupVersionResource{ + Group: policyv1alpha1.GroupVersion.Group, + Version: policyv1alpha1.GroupVersion.Version, + Resource: policyv1alpha1.ResourcePluralPropagationPolicy, + } + + tests := []struct { + name string + newPolicy *policyv1alpha1.PropagationPolicy + oldPolicy *policyv1alpha1.PropagationPolicy + objects []runtime.Object + setupClient func() *fake.ClientBuilder + wantQueueSize int + }{ + { + name: "preempt deprioritized propagation policy of len 1", + newPolicy: &policyv1alpha1.PropagationPolicy{ + ObjectMeta: metav1.ObjectMeta{Name: "app", Namespace: "test"}, + Spec: policyv1alpha1.PropagationSpec{ + Priority: ptr.To[int32](2), + ResourceSelectors: []policyv1alpha1.ResourceSelector{ + { + APIVersion: "apps/v1", + Kind: "Deployment", + Namespace: "test", + LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "nginx"}}, + }, + }, + }, + }, + oldPolicy: &policyv1alpha1.PropagationPolicy{ + ObjectMeta: metav1.ObjectMeta{Name: "app", Namespace: "test"}, + Spec: policyv1alpha1.PropagationSpec{ + Priority: ptr.To[int32](4), + ResourceSelectors: []policyv1alpha1.ResourceSelector{ + { + APIVersion: "apps/v1", + Kind: "Deployment", + Namespace: "test", + LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "nginx"}}, + }, + }, + }, + }, + objects: []runtime.Object{ + &policyv1alpha1.PropagationPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "test", + Labels: map[string]string{ + policyv1alpha1.PropagationPolicyPermanentIDLabel: "policy-1", + }, + }, + Spec: policyv1alpha1.PropagationSpec{ + Priority: ptr.To[int32](3), + Preemption: policyv1alpha1.PreemptAlways, + ResourceSelectors: []policyv1alpha1.ResourceSelector{ + { + APIVersion: "apps/v1", + Kind: "Deployment", + Namespace: "test", + Name: "default", + }, + }, + }, + }, + }, + setupClient: func() *fake.ClientBuilder { + obj := &policyv1alpha1.PropagationPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "test", + Labels: map[string]string{ + policyv1alpha1.PropagationPolicyPermanentIDLabel: "policy-1", + }, + }, + Spec: policyv1alpha1.PropagationSpec{ + Priority: ptr.To[int32](3), + Preemption: policyv1alpha1.PreemptAlways, + ResourceSelectors: []policyv1alpha1.ResourceSelector{ + { + APIVersion: "apps/v1", + Kind: "Deployment", + Namespace: "test", + Name: "default", + }, + }, + }, + } + return fake.NewClientBuilder().WithScheme(scheme).WithObjects(obj) + }, + wantQueueSize: 1, + }, + { + name: "preempt deprioritized propagation policy of len 2", + newPolicy: &policyv1alpha1.PropagationPolicy{ + ObjectMeta: metav1.ObjectMeta{Name: "app", Namespace: "test"}, + Spec: policyv1alpha1.PropagationSpec{ + Priority: ptr.To[int32](2), + ResourceSelectors: []policyv1alpha1.ResourceSelector{ + { + APIVersion: "apps/v1", + Kind: "Deployment", + Namespace: "test", + LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "nginx"}}, + }, + }, + }, + }, + oldPolicy: &policyv1alpha1.PropagationPolicy{ + ObjectMeta: metav1.ObjectMeta{Name: "app", Namespace: "test"}, + Spec: policyv1alpha1.PropagationSpec{ + Priority: ptr.To[int32](5), + ResourceSelectors: []policyv1alpha1.ResourceSelector{ + { + APIVersion: "apps/v1", + Kind: "Deployment", + Namespace: "test", + LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "nginx"}}, + }, + }, + }, + }, + objects: []runtime.Object{ + &policyv1alpha1.PropagationPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "test", + Labels: map[string]string{ + policyv1alpha1.PropagationPolicyPermanentIDLabel: "policy-1", + }, + }, + Spec: policyv1alpha1.PropagationSpec{ + Priority: ptr.To[int32](3), + Preemption: policyv1alpha1.PreemptAlways, + ResourceSelectors: []policyv1alpha1.ResourceSelector{ + { + APIVersion: "apps/v1", + Kind: "Deployment", + Namespace: "test", + Name: "default", + }, + }, + }, + }, + &policyv1alpha1.PropagationPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo-2", + Namespace: "test", + Labels: map[string]string{ + policyv1alpha1.PropagationPolicyPermanentIDLabel: "policy-2", + }, + }, + Spec: policyv1alpha1.PropagationSpec{ + Priority: ptr.To[int32](4), + Preemption: policyv1alpha1.PreemptAlways, + ResourceSelectors: []policyv1alpha1.ResourceSelector{ + { + APIVersion: "apps/v1", + Kind: "Deployment", + Namespace: "test-2", + Name: "default-2", + }, + }, + }, + }, + }, + setupClient: func() *fake.ClientBuilder { + obj := []client.Object{ + &policyv1alpha1.PropagationPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "test", + Labels: map[string]string{ + policyv1alpha1.PropagationPolicyPermanentIDLabel: "policy-1", + }, + }, + Spec: policyv1alpha1.PropagationSpec{ + Priority: ptr.To[int32](3), + Preemption: policyv1alpha1.PreemptAlways, + ResourceSelectors: []policyv1alpha1.ResourceSelector{ + { + APIVersion: "apps/v1", + Kind: "Deployment", + Namespace: "test", + Name: "default", + }, + }, + }, + }, + &policyv1alpha1.PropagationPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo-2", + Namespace: "test", + Labels: map[string]string{ + policyv1alpha1.PropagationPolicyPermanentIDLabel: "policy-2", + }, + }, + Spec: policyv1alpha1.PropagationSpec{ + Priority: ptr.To[int32](4), + Preemption: policyv1alpha1.PreemptAlways, + ResourceSelectors: []policyv1alpha1.ResourceSelector{ + { + APIVersion: "apps/v1", + Kind: "Deployment", + Namespace: "test-2", + Name: "default-2", + }, + }, + }, + }, + } + return fake.NewClientBuilder().WithScheme(scheme).WithObjects(obj...) + }, + wantQueueSize: 2, + }, + { + name: "no policy to preempt", + newPolicy: &policyv1alpha1.PropagationPolicy{ + ObjectMeta: metav1.ObjectMeta{Name: "app", Namespace: "test"}, + Spec: policyv1alpha1.PropagationSpec{ + Priority: ptr.To[int32](2), + ResourceSelectors: []policyv1alpha1.ResourceSelector{ + { + APIVersion: "apps/v1", + Kind: "Deployment", + Namespace: "test", + LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "nginx"}}, + }, + }, + }, + }, + oldPolicy: &policyv1alpha1.PropagationPolicy{ + ObjectMeta: metav1.ObjectMeta{Name: "app", Namespace: "test"}, + Spec: policyv1alpha1.PropagationSpec{ + Priority: ptr.To[int32](4), + ResourceSelectors: []policyv1alpha1.ResourceSelector{ + { + APIVersion: "apps/v1", + Kind: "Deployment", + Namespace: "test", + LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "nginx"}}, + }, + }, + }, + }, + objects: nil, + setupClient: func() *fake.ClientBuilder { + return fake.NewClientBuilder().WithScheme(scheme) + }, + wantQueueSize: 0, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fakeClient := tt.setupClient().Build() + stopCh := make(chan struct{}) + defer close(stopCh) + fakeDynamicClient := dynamicfake.NewSimpleDynamicClient(scheme, tt.objects...) + genMgr := genericmanager.NewSingleClusterInformerManager(fakeDynamicClient, 0, stopCh) + resourceDetector := &ResourceDetector{ + Client: fakeClient, + DynamicClient: fakeDynamicClient, + InformerManager: genMgr, + propagationPolicyLister: genMgr.Lister(propagationPolicyGVR), + } + mockWorker := &MockAsyncWorker{} + resourceDetector.policyReconcileWorker = mockWorker + resourceDetector.InformerManager.Start() + resourceDetector.InformerManager.WaitForCacheSync() + + resourceDetector.HandleDeprioritizedPropagationPolicy(*tt.oldPolicy, *tt.newPolicy) + + gotQueueSize := len(mockWorker.GetQueue()) + if gotQueueSize != tt.wantQueueSize { + t.Errorf("HandleDeprioritizedPropagationPolicy() want queue size %v, got %v", tt.wantQueueSize, gotQueueSize) + } + }) + } +} + +func TestHandleDeprioritizedClusterPropagationPolicy(t *testing.T) { + scheme := runtime.NewScheme() + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + utilruntime.Must(appsv1.AddToScheme(scheme)) + utilruntime.Must(v1alpha2.Install(scheme)) + utilruntime.Must(policyv1alpha1.Install(scheme)) + + clusterPropagationPolicyGVR := schema.GroupVersionResource{ + Group: policyv1alpha1.GroupVersion.Group, + Version: policyv1alpha1.GroupVersion.Version, + Resource: policyv1alpha1.ResourcePluralClusterPropagationPolicy, + } + + tests := []struct { + name string + newPolicy *policyv1alpha1.ClusterPropagationPolicy + oldPolicy *policyv1alpha1.ClusterPropagationPolicy + objects []runtime.Object + setupClient func() *fake.ClientBuilder + wantQueueSize int + }{ + { + name: "preempt deprioritized cluster propagation policy of len 1", + newPolicy: &policyv1alpha1.ClusterPropagationPolicy{ + ObjectMeta: metav1.ObjectMeta{Name: "app", Namespace: "test"}, + Spec: policyv1alpha1.PropagationSpec{ + Priority: ptr.To[int32](2), + ResourceSelectors: []policyv1alpha1.ResourceSelector{ + { + APIVersion: "apps/v1", + Kind: "Deployment", + Namespace: "test", + LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "nginx"}}, + }, + }, + }, + }, + oldPolicy: &policyv1alpha1.ClusterPropagationPolicy{ + ObjectMeta: metav1.ObjectMeta{Name: "app", Namespace: "test"}, + Spec: policyv1alpha1.PropagationSpec{ + Priority: ptr.To[int32](4), + ResourceSelectors: []policyv1alpha1.ResourceSelector{ + { + APIVersion: "apps/v1", + Kind: "Deployment", + Namespace: "test", + LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "nginx"}}, + }, + }, + }, + }, + objects: []runtime.Object{ + &policyv1alpha1.ClusterPropagationPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "bar", + Labels: map[string]string{ + policyv1alpha1.ClusterPropagationPolicyPermanentIDLabel: "policy-1", + }, + }, + Spec: policyv1alpha1.PropagationSpec{ + Priority: ptr.To[int32](3), + Preemption: policyv1alpha1.PreemptAlways, + ResourceSelectors: []policyv1alpha1.ResourceSelector{ + { + APIVersion: "apps/v1", + Kind: "Deployment", + Namespace: "test", + Name: "default", + }, + }, + }, + }, + }, + setupClient: func() *fake.ClientBuilder { + obj := &policyv1alpha1.ClusterPropagationPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "bar", + Labels: map[string]string{ + policyv1alpha1.ClusterPropagationPolicyPermanentIDLabel: "policy-1", + }, + }, + Spec: policyv1alpha1.PropagationSpec{ + Priority: ptr.To[int32](3), + Preemption: policyv1alpha1.PreemptAlways, + ResourceSelectors: []policyv1alpha1.ResourceSelector{ + { + APIVersion: "apps/v1", + Kind: "Deployment", + Namespace: "test", + Name: "default", + }, + }, + }, + } + return fake.NewClientBuilder().WithScheme(scheme).WithObjects(obj) + }, + wantQueueSize: 1, + }, + { + name: "preempt deprioritized cluster propagation policy of len 2", + newPolicy: &policyv1alpha1.ClusterPropagationPolicy{ + ObjectMeta: metav1.ObjectMeta{Name: "app", Namespace: "test"}, + Spec: policyv1alpha1.PropagationSpec{ + Priority: ptr.To[int32](2), + ResourceSelectors: []policyv1alpha1.ResourceSelector{ + { + APIVersion: "apps/v1", + Kind: "Deployment", + Namespace: "test", + LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "nginx"}}, + }, + }, + }, + }, + oldPolicy: &policyv1alpha1.ClusterPropagationPolicy{ + ObjectMeta: metav1.ObjectMeta{Name: "app", Namespace: "test"}, + Spec: policyv1alpha1.PropagationSpec{ + Priority: ptr.To[int32](5), + ResourceSelectors: []policyv1alpha1.ResourceSelector{ + { + APIVersion: "apps/v1", + Kind: "Deployment", + Namespace: "test", + LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "nginx"}}, + }, + }, + }, + }, + objects: []runtime.Object{ + &policyv1alpha1.ClusterPropagationPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "bar", + Labels: map[string]string{ + policyv1alpha1.ClusterPropagationPolicyPermanentIDLabel: "policy-1", + }, + }, + Spec: policyv1alpha1.PropagationSpec{ + Priority: ptr.To[int32](3), + Preemption: policyv1alpha1.PreemptAlways, + ResourceSelectors: []policyv1alpha1.ResourceSelector{ + { + APIVersion: "apps/v1", + Kind: "Deployment", + Namespace: "test", + Name: "default", + }, + }, + }, + }, + &policyv1alpha1.ClusterPropagationPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo-2", + Namespace: "bar-2", + Labels: map[string]string{ + policyv1alpha1.ClusterPropagationPolicyPermanentIDLabel: "policy-2", + }, + }, + Spec: policyv1alpha1.PropagationSpec{ + Priority: ptr.To[int32](4), + Preemption: policyv1alpha1.PreemptAlways, + ResourceSelectors: []policyv1alpha1.ResourceSelector{ + { + APIVersion: "apps/v1", + Kind: "Deployment", + Namespace: "test-2", + Name: "default-2", + }, + }, + }, + }, + }, + setupClient: func() *fake.ClientBuilder { + obj := []client.Object{ + &policyv1alpha1.ClusterPropagationPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "bar", + Labels: map[string]string{ + policyv1alpha1.ClusterPropagationPolicyPermanentIDLabel: "policy-1", + }, + }, + Spec: policyv1alpha1.PropagationSpec{ + Priority: ptr.To[int32](3), + Preemption: policyv1alpha1.PreemptAlways, + ResourceSelectors: []policyv1alpha1.ResourceSelector{ + { + APIVersion: "apps/v1", + Kind: "Deployment", + Namespace: "test", + Name: "default", + }, + }, + }, + }, + &policyv1alpha1.ClusterPropagationPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo-2", + Namespace: "bar-2", + Labels: map[string]string{ + policyv1alpha1.ClusterPropagationPolicyPermanentIDLabel: "policy-2", + }, + }, + Spec: policyv1alpha1.PropagationSpec{ + Priority: ptr.To[int32](4), + Preemption: policyv1alpha1.PreemptAlways, + ResourceSelectors: []policyv1alpha1.ResourceSelector{ + { + APIVersion: "apps/v1", + Kind: "Deployment", + Namespace: "test-2", + Name: "default-2", + }, + }, + }, + }, + } + return fake.NewClientBuilder().WithScheme(scheme).WithObjects(obj...) + }, + wantQueueSize: 2, + }, + { + name: "no cluster policy to preempt", + newPolicy: &policyv1alpha1.ClusterPropagationPolicy{ + ObjectMeta: metav1.ObjectMeta{Name: "app", Namespace: "test"}, + Spec: policyv1alpha1.PropagationSpec{ + Priority: ptr.To[int32](2), + ResourceSelectors: []policyv1alpha1.ResourceSelector{ + { + APIVersion: "apps/v1", + Kind: "Deployment", + Namespace: "test", + LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "nginx"}}, + }, + }, + }, + }, + oldPolicy: &policyv1alpha1.ClusterPropagationPolicy{ + ObjectMeta: metav1.ObjectMeta{Name: "app", Namespace: "test"}, + Spec: policyv1alpha1.PropagationSpec{ + Priority: ptr.To[int32](4), + ResourceSelectors: []policyv1alpha1.ResourceSelector{ + { + APIVersion: "apps/v1", + Kind: "Deployment", + Namespace: "test", + LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"app": "nginx"}}, + }, + }, + }, + }, + objects: nil, + setupClient: func() *fake.ClientBuilder { + return fake.NewClientBuilder().WithScheme(scheme) + }, + wantQueueSize: 0, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fakeClient := tt.setupClient().Build() + stopCh := make(chan struct{}) + defer close(stopCh) + fakeDynamicClient := dynamicfake.NewSimpleDynamicClient(scheme, tt.objects...) + genMgr := genericmanager.NewSingleClusterInformerManager(fakeDynamicClient, 0, stopCh) + resourceDetector := &ResourceDetector{ + Client: fakeClient, + DynamicClient: fakeDynamicClient, + InformerManager: genMgr, + clusterPropagationPolicyLister: genMgr.Lister(clusterPropagationPolicyGVR), + } + mockWorker := &MockAsyncWorker{} + resourceDetector.clusterPolicyReconcileWorker = mockWorker + resourceDetector.InformerManager.Start() + resourceDetector.InformerManager.WaitForCacheSync() + + resourceDetector.HandleDeprioritizedClusterPropagationPolicy(*tt.oldPolicy, *tt.newPolicy) + + gotQueueSize := len(mockWorker.GetQueue()) + if gotQueueSize != tt.wantQueueSize { + t.Errorf("HandleDeprioritizedClusterPropagationPolicy() want queue size %v, got %v", tt.wantQueueSize, gotQueueSize) + } + }) + } +} diff --git a/pkg/estimator/client/cache.go b/pkg/estimator/client/cache.go index cc7ad0f6a9c9..834915b6ca98 100644 --- a/pkg/estimator/client/cache.go +++ b/pkg/estimator/client/cache.go @@ -18,6 +18,7 @@ package client import ( "fmt" + "strings" "sync" "time" @@ -26,7 +27,6 @@ import ( "k8s.io/klog/v2" estimatorservice "github.com/karmada-io/karmada/pkg/estimator/service" - "github.com/karmada-io/karmada/pkg/util" "github.com/karmada-io/karmada/pkg/util/grpcconnection" "github.com/karmada-io/karmada/pkg/util/names" ) @@ -37,6 +37,13 @@ type SchedulerEstimatorCache struct { estimator map[string]*clientWrapper } +// SchedulerEstimatorServiceInfo contains information needed to discover and connect to a scheduler estimator service. +type SchedulerEstimatorServiceInfo struct { + Name string + NamePrefix string + Namespace string +} + // NewSchedulerEstimatorCache returns an accurate scheduler estimator cache. func NewSchedulerEstimatorCache() *SchedulerEstimatorCache { return &SchedulerEstimatorCache{ @@ -97,25 +104,25 @@ func (c *SchedulerEstimatorCache) GetClient(name string) (estimatorservice.Estim } // EstablishConnection establishes a new gRPC connection with the specified cluster scheduler estimator. -func EstablishConnection(kubeClient kubernetes.Interface, name string, estimatorCache *SchedulerEstimatorCache, estimatorServicePrefix string, grpcConfig *grpcconnection.ClientConfig) error { - if estimatorCache.IsEstimatorExist(name) { +func EstablishConnection(kubeClient kubernetes.Interface, serviceInfo SchedulerEstimatorServiceInfo, estimatorCache *SchedulerEstimatorCache, grpcConfig *grpcconnection.ClientConfig) error { + if estimatorCache.IsEstimatorExist(serviceInfo.Name) { return nil } - serverAddr, err := resolveCluster(kubeClient, util.NamespaceKarmadaSystem, - names.GenerateEstimatorServiceName(estimatorServicePrefix, name), int32(grpcConfig.TargetPort)) + serverAddrs, err := resolveCluster(kubeClient, serviceInfo.Namespace, + names.GenerateEstimatorServiceName(serviceInfo.NamePrefix, serviceInfo.Name), int32(grpcConfig.TargetPort)) if err != nil { return err } - klog.Infof("Start dialing estimator server(%s) of cluster(%s).", serverAddr, name) - cc, err := grpcConfig.DialWithTimeOut(serverAddr, 5*time.Second) + klog.Infof("Start dialing estimator server(%s) of cluster(%s).", strings.Join(serverAddrs, ","), serviceInfo.Name) + cc, err := grpcConfig.DialWithTimeOut(serverAddrs, 5*time.Second) if err != nil { - klog.Errorf("Failed to dial cluster(%s): %v.", name, err) + klog.Errorf("Failed to dial cluster(%s): %v.", serviceInfo.Name, err) return err } c := estimatorservice.NewEstimatorClient(cc) - estimatorCache.AddCluster(name, cc, c) - klog.Infof("Connection with estimator server(%s) of cluster(%s) has been established.", serverAddr, name) + estimatorCache.AddCluster(serviceInfo.Name, cc, c) + klog.Infof("Connection with estimator server(%s) of cluster(%s) has been established.", cc.Target(), serviceInfo.Name) return nil } diff --git a/pkg/estimator/client/general.go b/pkg/estimator/client/general.go index 6023c89f528a..cfaebd6cb0ae 100644 --- a/pkg/estimator/client/general.go +++ b/pkg/estimator/client/general.go @@ -54,7 +54,8 @@ func (ge *GeneralEstimator) MaxAvailableReplicas(_ context.Context, clusters []* } func (ge *GeneralEstimator) maxAvailableReplicas(cluster *clusterv1alpha1.Cluster, replicaRequirements *workv1alpha2.ReplicaRequirements) int32 { - resourceSummary := cluster.Status.ResourceSummary + //Note: resourceSummary must be deep-copied before using in the function to avoid modifying the original data structure. + resourceSummary := cluster.Status.ResourceSummary.DeepCopy() if resourceSummary == nil { return 0 } @@ -209,28 +210,15 @@ func getMaximumReplicasBasedOnResourceModels(cluster *clusterv1alpha1.Cluster, r return -1, fmt.Errorf("resource model is inapplicable as missing resource: %s", string(key)) } - for index, minValue := range quantityArray { - // Suppose there is the following resource model: - // Model1: cpu [1C,2C) - // Model2: cpu [2C,3C) - // if pod cpu request is 1.5C, we regard the nodes in model1 as meeting the requirements of the Pod. - // Suppose there is the following resource model: - // Model1: cpu [1C,2C), memory [1Gi,2Gi) - // Model2: cpu [2C,3C), memory [2Gi,3Gi) - // if pod cpu request is 1.5C and memory request is 2.5Gi - // We regard the node of model1 as not meeting the requirements, and the nodes of model2 and later as meeting the requirements. - if minValue.Cmp(value) > 0 { - // Since the 'min' value of the first model is always 0, hit here - // the index should be >=1, so it's safe to use 'index-1' here. - if index-1 > minCompliantModelIndex { - minCompliantModelIndex = index - 1 - } - break - } - - if index == len(quantityArray)-1 { - minCompliantModelIndex = index - } + // Find the minimum model grade for each type of resource quest, if no + // suitable model is found indicates that there is no appropriate model + // grade and return immediately. + minCompliantModelIndexForResource := minimumModelIndex(quantityArray, value) + if minCompliantModelIndexForResource == -1 { + return 0, nil + } + if minCompliantModelIndex <= minCompliantModelIndexForResource { + minCompliantModelIndex = minCompliantModelIndexForResource } } @@ -244,3 +232,18 @@ func getMaximumReplicasBasedOnResourceModels(cluster *clusterv1alpha1.Cluster, r return maximumReplicasForResource, nil } + +func minimumModelIndex(minimumGrades []resource.Quantity, requestValue resource.Quantity) int { + for index, minValue := range minimumGrades { + // Suppose there is the following resource model: + // Grade1: cpu [1C,2C) + // Grade2: cpu [2C,3C) + // If a Pod requests 1.5C of CPU, grade1 may not be able to provide sufficient resources, + // so we will choose grade2. + if minValue.Cmp(requestValue) >= 0 { + return index + } + } + + return -1 +} diff --git a/pkg/estimator/client/general_test.go b/pkg/estimator/client/general_test.go new file mode 100644 index 000000000000..e0bf851da2c2 --- /dev/null +++ b/pkg/estimator/client/general_test.go @@ -0,0 +1,230 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "testing" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + + clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1" + workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2" +) + +func TestGetMaximumReplicasBasedOnResourceModels(t *testing.T) { + tests := []struct { + name string + cluster clusterv1alpha1.Cluster + replicaRequirements workv1alpha2.ReplicaRequirements + expectError bool + expectedReplicas int64 + }{ + { + name: "No grade defined should result in an error", + cluster: clusterv1alpha1.Cluster{}, + replicaRequirements: workv1alpha2.ReplicaRequirements{ + ResourceRequest: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + }, + }, + expectError: true, + expectedReplicas: -1, + }, + { + name: "Partially compliant grades", + cluster: clusterv1alpha1.Cluster{ + Spec: clusterv1alpha1.ClusterSpec{ + ResourceModels: []clusterv1alpha1.ResourceModel{ + { + Grade: 0, Ranges: []clusterv1alpha1.ResourceModelRange{ + {Name: corev1.ResourceCPU, Min: resource.MustParse("0"), Max: resource.MustParse("1")}}, + }, + { + Grade: 1, Ranges: []clusterv1alpha1.ResourceModelRange{ + {Name: corev1.ResourceCPU, Min: resource.MustParse("1"), Max: resource.MustParse("2")}}, + }, + { + Grade: 2, Ranges: []clusterv1alpha1.ResourceModelRange{ + {Name: corev1.ResourceCPU, Min: resource.MustParse("2"), Max: resource.MustParse("4")}}, + }, + }, + }, + Status: clusterv1alpha1.ClusterStatus{ + ResourceSummary: &clusterv1alpha1.ResourceSummary{ + AllocatableModelings: []clusterv1alpha1.AllocatableModeling{ + {Grade: 0, Count: 1}, + {Grade: 1, Count: 1}, + {Grade: 2, Count: 1}, + }, + }, + }, + }, + replicaRequirements: workv1alpha2.ReplicaRequirements{ + ResourceRequest: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1.5"), + }, + }, + expectError: false, + expectedReplicas: 1, + }, + { + name: "No compliant grades", + cluster: clusterv1alpha1.Cluster{ + Spec: clusterv1alpha1.ClusterSpec{ + ResourceModels: []clusterv1alpha1.ResourceModel{ + { + Grade: 0, Ranges: []clusterv1alpha1.ResourceModelRange{ + {Name: corev1.ResourceCPU, Min: resource.MustParse("0"), Max: resource.MustParse("1")}, + }, + }, + { + Grade: 1, Ranges: []clusterv1alpha1.ResourceModelRange{ + {Name: corev1.ResourceCPU, Min: resource.MustParse("1"), Max: resource.MustParse("2")}, + }, + }, + { + Grade: 2, Ranges: []clusterv1alpha1.ResourceModelRange{ + {Name: corev1.ResourceCPU, Min: resource.MustParse("2"), Max: resource.MustParse("4")}, + }, + }, + }, + }, + Status: clusterv1alpha1.ClusterStatus{ + ResourceSummary: &clusterv1alpha1.ResourceSummary{ + AllocatableModelings: []clusterv1alpha1.AllocatableModeling{ + {Grade: 0, Count: 1}, + {Grade: 1, Count: 1}, + {Grade: 2, Count: 1}, + }, + }, + }, + }, + replicaRequirements: workv1alpha2.ReplicaRequirements{ + ResourceRequest: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("3"), + }, + }, + expectError: false, + expectedReplicas: 0, + }, + { + name: "Multi resource request", + cluster: clusterv1alpha1.Cluster{ + Spec: clusterv1alpha1.ClusterSpec{ + ResourceModels: []clusterv1alpha1.ResourceModel{ + { + Grade: 0, Ranges: []clusterv1alpha1.ResourceModelRange{ + {Name: corev1.ResourceCPU, Min: resource.MustParse("0"), Max: resource.MustParse("1")}, + {Name: corev1.ResourceMemory, Min: resource.MustParse("0"), Max: resource.MustParse("1Gi")}, + }, + }, + { + Grade: 1, Ranges: []clusterv1alpha1.ResourceModelRange{ + {Name: corev1.ResourceCPU, Min: resource.MustParse("1"), Max: resource.MustParse("2")}, + {Name: corev1.ResourceMemory, Min: resource.MustParse("1Gi"), Max: resource.MustParse("2Gi")}, + }, + }, + { + Grade: 2, Ranges: []clusterv1alpha1.ResourceModelRange{ + {Name: corev1.ResourceCPU, Min: resource.MustParse("2"), Max: resource.MustParse("4")}, + {Name: corev1.ResourceMemory, Min: resource.MustParse("2Gi"), Max: resource.MustParse("4Gi")}, + }, + }, + }, + }, + Status: clusterv1alpha1.ClusterStatus{ + ResourceSummary: &clusterv1alpha1.ResourceSummary{ + AllocatableModelings: []clusterv1alpha1.AllocatableModeling{ + {Grade: 0, Count: 1}, + {Grade: 1, Count: 1}, + {Grade: 2, Count: 1}, + }, + }, + }, + }, + replicaRequirements: workv1alpha2.ReplicaRequirements{ + ResourceRequest: corev1.ResourceList{ + // When looking CPU, grade 1 meets, then looking memory, grade 2 meets. + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("1.5Gi"), + }, + }, + expectError: false, + expectedReplicas: 1, + }, + { + name: "request exceeds highest grade", + cluster: clusterv1alpha1.Cluster{ + Spec: clusterv1alpha1.ClusterSpec{ + ResourceModels: []clusterv1alpha1.ResourceModel{ + { + Grade: 0, Ranges: []clusterv1alpha1.ResourceModelRange{ + {Name: corev1.ResourceCPU, Min: resource.MustParse("0"), Max: resource.MustParse("1")}, + {Name: corev1.ResourceMemory, Min: resource.MustParse("0"), Max: resource.MustParse("1Gi")}, + }, + }, + { + Grade: 1, Ranges: []clusterv1alpha1.ResourceModelRange{ + {Name: corev1.ResourceCPU, Min: resource.MustParse("1"), Max: resource.MustParse("2")}, + {Name: corev1.ResourceMemory, Min: resource.MustParse("1Gi"), Max: resource.MustParse("2Gi")}, + }, + }, + { + Grade: 2, Ranges: []clusterv1alpha1.ResourceModelRange{ + {Name: corev1.ResourceCPU, Min: resource.MustParse("2"), Max: resource.MustParse("4")}, + {Name: corev1.ResourceMemory, Min: resource.MustParse("2Gi"), Max: resource.MustParse("4Gi")}, + }, + }, + }, + }, + Status: clusterv1alpha1.ClusterStatus{ + ResourceSummary: &clusterv1alpha1.ResourceSummary{ + AllocatableModelings: []clusterv1alpha1.AllocatableModeling{ + {Grade: 0, Count: 1}, + {Grade: 1, Count: 1}, + {Grade: 2, Count: 1}, + }, + }, + }, + }, + replicaRequirements: workv1alpha2.ReplicaRequirements{ + ResourceRequest: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("2.5Gi"), // no grade can provide sufficient memories. + }, + }, + expectError: false, + expectedReplicas: 0, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + replicas, err := getMaximumReplicasBasedOnResourceModels(&tt.cluster, &tt.replicaRequirements) + if tt.expectError && err == nil { + t.Errorf("Expects an error but got none") + } + if !tt.expectError && err != nil { + t.Errorf("getMaximumReplicasBasedOnResourceModels() returned an unexpected error: %v", err) + } + if replicas != tt.expectedReplicas { + t.Errorf("getMaximumReplicasBasedOnResourceModels() = %v, expectedReplicas %v", replicas, tt.expectedReplicas) + } + }) + } +} diff --git a/pkg/estimator/client/service.go b/pkg/estimator/client/service.go index c1216557763c..274c061c4a43 100644 --- a/pkg/estimator/client/service.go +++ b/pkg/estimator/client/service.go @@ -31,7 +31,7 @@ import ( // ResolveCluster parses Service resource content by itself. // Fixes Issue https://github.com/karmada-io/karmada/issues/2487 // Modified from "k8s.io/apiserver/pkg/util/proxy/proxy.go:92 => func ResolveCluster" -func resolveCluster(kubeClient kubernetes.Interface, namespace, id string, port int32) (string, error) { +func resolveCluster(kubeClient kubernetes.Interface, namespace, id string, port int32) ([]string, error) { svc, err := kubeClient.CoreV1().Services(namespace).Get(context.TODO(), id, metav1.GetOptions{}) if err != nil { if apierrors.IsNotFound(err) { @@ -39,29 +39,33 @@ func resolveCluster(kubeClient kubernetes.Interface, namespace, id string, port * When Deploying Karmada in Host Kubernetes Cluster, the kubeClient will connect kube-apiserver * of Karmada Control Plane, rather than of host cluster. * But the Service resource is defined in Host Kubernetes Cluster. So we cannot get its content here. - * The best thing we can do is just glue host:port together, and try to connect to it. + * The best thing we can do is just assemble hosts and ports according to a specific rule, and try to connect to them. */ - return net.JoinHostPort(fmt.Sprintf("%s.%s.svc.cluster.local", id, namespace), fmt.Sprintf("%d", port)), nil + return []string{ + net.JoinHostPort(fmt.Sprintf("%s.%s.svc.cluster.local", id, namespace), fmt.Sprintf("%d", port)), + // To support the environment with a custom DNS suffix. + net.JoinHostPort(fmt.Sprintf("%s.%s.svc", id, namespace), fmt.Sprintf("%d", port)), + }, nil } - return "", err + return nil, err } if svc.Spec.Type != corev1.ServiceTypeExternalName { // We only support ExternalName type here. // See discussions in PR: https://github.com/karmada-io/karmada/pull/2574#discussion_r979539389 - return "", fmt.Errorf("unsupported service type %q", svc.Spec.Type) + return nil, fmt.Errorf("unsupported service type %q", svc.Spec.Type) } svcPort, err := findServicePort(svc, port) if err != nil { - return "", err + return nil, err } if svcPort.TargetPort.Type != intstr.Int { - return "", fmt.Errorf("ExternalName service type should have int target port, "+ + return nil, fmt.Errorf("ExternalName service type should have int target port, "+ "current target port: %v", svcPort.TargetPort) } - return net.JoinHostPort(svc.Spec.ExternalName, fmt.Sprintf("%d", svcPort.TargetPort.IntVal)), nil + return []string{net.JoinHostPort(svc.Spec.ExternalName, fmt.Sprintf("%d", svcPort.TargetPort.IntVal))}, nil } // findServicePort finds the service port by name or numerically. diff --git a/pkg/estimator/client/service_test.go b/pkg/estimator/client/service_test.go new file mode 100644 index 000000000000..8162e56830c0 --- /dev/null +++ b/pkg/estimator/client/service_test.go @@ -0,0 +1,173 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + "reflect" + "testing" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/kubernetes/fake" +) + +func TestResolveCluster(t *testing.T) { + tests := []struct { + name string + namespace string + id string + port int32 + service *corev1.Service + expectError bool + expected []string + }{ + { + name: "Service not found", + namespace: "default", + id: "nonexistent", + port: 80, + service: nil, + expected: []string{"nonexistent.default.svc.cluster.local:80", "nonexistent.default.svc:80"}, + }, + { + name: "Unsupported service type", + namespace: "default", + id: "myservice", + port: 80, + service: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "myservice", + Namespace: "default", + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeClusterIP, + }, + }, + expectError: true, + }, + { + name: "ExternalName service with int target port", + namespace: "default", + id: "myservice", + port: 80, + service: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "myservice", + Namespace: "default", + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeExternalName, + ExternalName: "example.com", + Ports: []corev1.ServicePort{ + { + Port: 80, + TargetPort: intstr.FromInt(8080), + }, + }, + }, + }, + expected: []string{"example.com:8080"}, + }, + { + name: "ExternalName service with non-int target port", + namespace: "default", + id: "myservice", + port: 80, + service: &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "myservice", + Namespace: "default", + }, + Spec: corev1.ServiceSpec{ + Type: corev1.ServiceTypeExternalName, + ExternalName: "example.com", + Ports: []corev1.ServicePort{ + { + Port: 80, + TargetPort: intstr.FromString("http"), + }, + }, + }, + }, + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + clientset := fake.NewSimpleClientset() + if tt.service != nil { + _, err := clientset.CoreV1().Services(tt.namespace).Create(context.TODO(), tt.service, metav1.CreateOptions{}) + if err != nil { + t.Fatalf("failed to create service: %v", err) + } + } + + result, err := resolveCluster(clientset, tt.namespace, tt.id, tt.port) + if (err != nil) != tt.expectError { + t.Errorf("expected error: %v, got: %v", tt.expectError, err) + } + if !reflect.DeepEqual(tt.expected, result) { + t.Errorf("expected: %v, got: %v", tt.expected, result) + } + }) + } +} + +func TestFindServicePort(t *testing.T) { + tests := []struct { + name string + service *corev1.Service + port int32 + expectError bool + }{ + { + name: "Port found", + service: &corev1.Service{ + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ + {Port: 80}, + }, + }, + }, + port: 80, + }, + { + name: "Port not found", + service: &corev1.Service{ + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{ + {Port: 8080}, + }, + }, + }, + port: 80, + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + _, err := findServicePort(tt.service, tt.port) + if (err != nil) != tt.expectError { + t.Errorf("expected error: %v, got: %v", tt.expectError, err) + } + }) + } +} diff --git a/pkg/generated/openapi/zz_generated.openapi.go b/pkg/generated/openapi/zz_generated.openapi.go index 3b328475e3da..31f52beca3e9 100644 --- a/pkg/generated/openapi/zz_generated.openapi.go +++ b/pkg/generated/openapi/zz_generated.openapi.go @@ -109,9 +109,11 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.FederatedResourceQuotaList": schema_pkg_apis_policy_v1alpha1_FederatedResourceQuotaList(ref), "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.FederatedResourceQuotaSpec": schema_pkg_apis_policy_v1alpha1_FederatedResourceQuotaSpec(ref), "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.FederatedResourceQuotaStatus": schema_pkg_apis_policy_v1alpha1_FederatedResourceQuotaStatus(ref), + "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.FieldOverrider": schema_pkg_apis_policy_v1alpha1_FieldOverrider(ref), "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.FieldSelector": schema_pkg_apis_policy_v1alpha1_FieldSelector(ref), "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.ImageOverrider": schema_pkg_apis_policy_v1alpha1_ImageOverrider(ref), "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.ImagePredicate": schema_pkg_apis_policy_v1alpha1_ImagePredicate(ref), + "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.JSONPatchOperation": schema_pkg_apis_policy_v1alpha1_JSONPatchOperation(ref), "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.LabelAnnotationOverrider": schema_pkg_apis_policy_v1alpha1_LabelAnnotationOverrider(ref), "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.OverridePolicy": schema_pkg_apis_policy_v1alpha1_OverridePolicy(ref), "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.OverridePolicyList": schema_pkg_apis_policy_v1alpha1_OverridePolicyList(ref), @@ -130,6 +132,7 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.StaticClusterWeight": schema_pkg_apis_policy_v1alpha1_StaticClusterWeight(ref), "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.SuspendClusters": schema_pkg_apis_policy_v1alpha1_SuspendClusters(ref), "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.Suspension": schema_pkg_apis_policy_v1alpha1_Suspension(ref), + "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.YAMLPatchOperation": schema_pkg_apis_policy_v1alpha1_YAMLPatchOperation(ref), "github.com/karmada-io/karmada/pkg/apis/remedy/v1alpha1.ClusterAffinity": schema_pkg_apis_remedy_v1alpha1_ClusterAffinity(ref), "github.com/karmada-io/karmada/pkg/apis/remedy/v1alpha1.ClusterConditionRequirement": schema_pkg_apis_remedy_v1alpha1_ClusterConditionRequirement(ref), "github.com/karmada-io/karmada/pkg/apis/remedy/v1alpha1.DecisionMatch": schema_pkg_apis_remedy_v1alpha1_DecisionMatch(ref), @@ -4192,6 +4195,58 @@ func schema_pkg_apis_policy_v1alpha1_FederatedResourceQuotaStatus(ref common.Ref } } +func schema_pkg_apis_policy_v1alpha1_FieldOverrider(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "FieldOverrider represents the rules dedicated to modifying a specific field in any Kubernetes resource. This allows changing a single field within the resource with multiple operations. It is designed to handle structured field values such as those found in ConfigMaps or Secrets. The current implementation supports JSON and YAML formats, but can easily be extended to support XML in the future. Note: In any given instance, FieldOverrider processes either JSON or YAML fields, but not both simultaneously.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "fieldPath": { + SchemaProps: spec.SchemaProps{ + Description: "FieldPath specifies the initial location in the instance document where the operation should take place. The path uses RFC 6901 for navigating into nested structures. For example, the path \"/data/db-config.yaml\" specifies the configuration data key named \"db-config.yaml\" in a ConfigMap: \"/data/db-config.yaml\".", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "json": { + SchemaProps: spec.SchemaProps{ + Description: "JSON represents the operations performed on the JSON document specified by the FieldPath.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.JSONPatchOperation"), + }, + }, + }, + }, + }, + "yaml": { + SchemaProps: spec.SchemaProps{ + Description: "YAML represents the operations performed on the YAML document specified by the FieldPath.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.YAMLPatchOperation"), + }, + }, + }, + }, + }, + }, + Required: []string{"fieldPath"}, + }, + }, + Dependencies: []string{ + "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.JSONPatchOperation", "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.YAMLPatchOperation"}, + } +} + func schema_pkg_apis_policy_v1alpha1_FieldSelector(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -4288,6 +4343,44 @@ func schema_pkg_apis_policy_v1alpha1_ImagePredicate(ref common.ReferenceCallback } } +func schema_pkg_apis_policy_v1alpha1_JSONPatchOperation(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "JSONPatchOperation represents a single field modification operation for JSON format.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "subPath": { + SchemaProps: spec.SchemaProps{ + Description: "SubPath specifies the relative location within the initial FieldPath where the operation should take place. The path uses RFC 6901 for navigating into nested structures.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "operator": { + SchemaProps: spec.SchemaProps{ + Description: "Operator indicates the operation on target field. Available operators are: \"add\", \"remove\", and \"replace\".", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "value": { + SchemaProps: spec.SchemaProps{ + Description: "Value is the new value to set for the specified field if the operation is \"add\" or \"replace\". For \"remove\" operation, this field is ignored.", + Ref: ref("k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1.JSON"), + }, + }, + }, + Required: []string{"subPath", "operator"}, + }, + }, + Dependencies: []string{ + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1.JSON"}, + } +} + func schema_pkg_apis_policy_v1alpha1_LabelAnnotationOverrider(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -4479,7 +4572,7 @@ func schema_pkg_apis_policy_v1alpha1_Overriders(ref common.ReferenceCallback) co return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Description: "Overriders offers various alternatives to represent the override rules.\n\nIf more than one alternative exists, they will be applied with following order: - ImageOverrider - CommandOverrider - ArgsOverrider - LabelsOverrider - AnnotationsOverrider - Plaintext", + Description: "Overriders offers various alternatives to represent the override rules.\n\nIf more than one alternative exists, they will be applied with following order: - ImageOverrider - CommandOverrider - ArgsOverrider - LabelsOverrider - AnnotationsOverrider - FieldOverrider - Plaintext", Type: []string{"object"}, Properties: map[string]spec.Schema{ "plaintext": { @@ -4566,11 +4659,25 @@ func schema_pkg_apis_policy_v1alpha1_Overriders(ref common.ReferenceCallback) co }, }, }, + "fieldOverrider": { + SchemaProps: spec.SchemaProps{ + Description: "FieldOverrider represents the rules dedicated to modifying a specific field in any Kubernetes resource. This allows changing a single field within the resource with multiple operations. It is designed to handle structured field values such as those found in ConfigMaps or Secrets. The current implementation supports JSON and YAML formats, but can easily be extended to support XML in the future.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.FieldOverrider"), + }, + }, + }, + }, + }, }, }, }, Dependencies: []string{ - "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.CommandArgsOverrider", "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.ImageOverrider", "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.LabelAnnotationOverrider", "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.PlaintextOverrider"}, + "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.CommandArgsOverrider", "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.FieldOverrider", "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.ImageOverrider", "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.LabelAnnotationOverrider", "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.PlaintextOverrider"}, } } @@ -4878,6 +4985,13 @@ func schema_pkg_apis_policy_v1alpha1_PropagationSpec(ref common.ReferenceCallbac Ref: ref("github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.Suspension"), }, }, + "preserveResourcesOnDeletion": { + SchemaProps: spec.SchemaProps{ + Description: "PreserveResourcesOnDeletion controls whether resources should be preserved on the member clusters when the resource template is deleted. If set to true, resources will be preserved on the member clusters. Default is false, which means resources will be deleted along with the resource template.\n\nThis setting is particularly useful during workload migration scenarios to ensure that rollback can occur quickly without affecting the workloads running on the member clusters.\n\nAdditionally, this setting applies uniformly across all member clusters and will not selectively control preservation on only some clusters.\n\nNote: This setting does not apply to the deletion of the policy itself. When the policy is deleted, the resource templates and their corresponding propagated resources in member clusters will remain unchanged unless explicitly deleted.", + Type: []string{"boolean"}, + Format: "", + }, + }, }, Required: []string{"resourceSelectors"}, }, @@ -5169,6 +5283,44 @@ func schema_pkg_apis_policy_v1alpha1_Suspension(ref common.ReferenceCallback) co } } +func schema_pkg_apis_policy_v1alpha1_YAMLPatchOperation(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "YAMLPatchOperation represents a single field modification operation for YAML format.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "subPath": { + SchemaProps: spec.SchemaProps{ + Description: "SubPath specifies the relative location within the initial FieldPath where the operation should take place. The path uses RFC 6901 for navigating into nested structures.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "operator": { + SchemaProps: spec.SchemaProps{ + Description: "Operator indicates the operation on target field. Available operators are: \"add\", \"remove\", and \"replace\".", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "value": { + SchemaProps: spec.SchemaProps{ + Description: "Value is the new value to set for the specified field if the operation is \"add\" or \"replace\". For \"remove\" operation, this field is ignored.", + Ref: ref("k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1.JSON"), + }, + }, + }, + Required: []string{"subPath", "operator"}, + }, + }, + Dependencies: []string{ + "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1.JSON"}, + } +} + func schema_pkg_apis_remedy_v1alpha1_ClusterAffinity(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -6378,7 +6530,14 @@ func schema_pkg_apis_work_v1alpha1_WorkSpec(ref common.ReferenceCallback) common }, "suspendDispatching": { SchemaProps: spec.SchemaProps{ - Description: "SuspendDispatching controls whether dispatching should be suspended, nil means not suspend. Note: true means stop propagating to all clusters.", + Description: "SuspendDispatching controls whether dispatching should be suspended, nil means not suspend. Note: true means stop propagating to the corresponding member cluster, and does not prevent status collection.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "preserveResourcesOnDeletion": { + SchemaProps: spec.SchemaProps{ + Description: "PreserveResourcesOnDeletion controls whether resources should be preserved on the member cluster when the Work object is deleted. If set to true, resources will be preserved on the member cluster. Default is false, which means resources will be deleted along with the Work object.", Type: []string{"boolean"}, Format: "", }, @@ -7105,6 +7264,13 @@ func schema_pkg_apis_work_v1alpha2_ResourceBindingSpec(ref common.ReferenceCallb Ref: ref("github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1.Suspension"), }, }, + "preserveResourcesOnDeletion": { + SchemaProps: spec.SchemaProps{ + Description: "PreserveResourcesOnDeletion controls whether resources should be preserved on the member clusters when the binding object is deleted. If set to true, resources will be preserved on the member clusters. Default is false, which means resources will be deleted along with the binding object. This setting applies to all Work objects created under this binding object.", + Type: []string{"boolean"}, + Format: "", + }, + }, }, Required: []string{"resource"}, }, @@ -8320,7 +8486,7 @@ func schema_k8sio_api_admissionregistration_v1_ValidatingAdmissionPolicyBindingS }, }, SchemaProps: spec.SchemaProps{ - Description: "validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced. If a validation evaluates to false it is always enforced according to these actions.\n\nFailures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according to these actions only if the FailurePolicy is set to Fail, otherwise the failures are ignored. This includes compilation errors, runtime errors and misconfigurations of the policy.\n\nvalidationActions is declared as a set of action values. Order does not matter. validationActions may not contain duplicates of the same action.\n\nThe supported actions values are:\n\n\"Deny\" specifies that a validation failure results in a denied request.\n\n\"Warn\" specifies that a validation failure is reported to the request client in HTTP Warning headers, with a warning code of 299. Warnings can be sent both for allowed or denied admission responses.\n\n\"Audit\" specifies that a validation failure is included in the published audit event for the request. The audit event will contain a `validation.policy.admission.k8s.io/validation_failure` audit annotation with a value containing the details of the validation failures, formatted as a JSON list of objects, each with the following fields: - message: The validation failure message string - policy: The resource name of the ValidatingAdmissionPolicy - binding: The resource name of the ValidatingAdmissionPolicyBinding - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy - validationActions: The enforcement actions enacted for the validation failure Example audit annotation: `\"validation.policy.admission.k8s.io/validation_failure\": \"[{\"message\": \"Invalid value\", {\"policy\": \"policy.example.com\", {\"binding\": \"policybinding.example.com\", {\"expressionIndex\": \"1\", {\"validationActions\": [\"Audit\"]}]\"`\n\nClients should expect to handle additional values by ignoring any values not recognized.\n\n\"Deny\" and \"Warn\" may not be used together since this combination needlessly duplicates the validation failure both in the API response body and the HTTP warning headers.\n\nRequired.", + Description: "validationActions declares how Validations of the referenced ValidatingAdmissionPolicy are enforced. If a validation evaluates to false it is always enforced according to these actions.\n\nFailures defined by the ValidatingAdmissionPolicy's FailurePolicy are enforced according to these actions only if the FailurePolicy is set to Fail, otherwise the failures are ignored. This includes compilation errors, runtime errors and misconfigurations of the policy.\n\nvalidationActions is declared as a set of action values. Order does not matter. validationActions may not contain duplicates of the same action.\n\nThe supported actions values are:\n\n\"Deny\" specifies that a validation failure results in a denied request.\n\n\"Warn\" specifies that a validation failure is reported to the request client in HTTP Warning headers, with a warning code of 299. Warnings can be sent both for allowed or denied admission responses.\n\n\"Audit\" specifies that a validation failure is included in the published audit event for the request. The audit event will contain a `validation.policy.admission.k8s.io/validation_failure` audit annotation with a value containing the details of the validation failures, formatted as a JSON list of objects, each with the following fields: - message: The validation failure message string - policy: The resource name of the ValidatingAdmissionPolicy - binding: The resource name of the ValidatingAdmissionPolicyBinding - expressionIndex: The index of the failed validations in the ValidatingAdmissionPolicy - validationActions: The enforcement actions enacted for the validation failure Example audit annotation: `\"validation.policy.admission.k8s.io/validation_failure\": \"[{\\\"message\\\": \\\"Invalid value\\\", {\\\"policy\\\": \\\"policy.example.com\\\", {\\\"binding\\\": \\\"policybinding.example.com\\\", {\\\"expressionIndex\\\": \\\"1\\\", {\\\"validationActions\\\": [\\\"Audit\\\"]}]\"`\n\nClients should expect to handle additional values by ignoring any values not recognized.\n\n\"Deny\" and \"Warn\" may not be used together since this combination needlessly duplicates the validation failure both in the API response body and the HTTP warning headers.\n\nRequired.", Type: []string{"array"}, Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ @@ -18611,7 +18777,7 @@ func schema_k8sio_api_core_v1_PodSpec(ref common.ReferenceCallback) common.OpenA }, "setHostnameAsFQDN": { SchemaProps: spec.SchemaProps{ - Description: "If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\Tcpip\\Parameters to FQDN. If a pod does not have FQDN, this has no effect. Default to false.", + Description: "If true the pod's hostname will be configured as the pod's FQDN, rather than the leaf name (the default). In Linux containers, this means setting the FQDN in the hostname field of the kernel (the nodename field of struct utsname). In Windows containers, this means setting the registry value of hostname for the registry key HKEY_LOCAL_MACHINE\\\\SYSTEM\\\\CurrentControlSet\\\\Services\\\\Tcpip\\\\Parameters to FQDN. If a pod does not have FQDN, this has no effect. Default to false.", Type: []string{"boolean"}, Format: "", }, @@ -25059,7 +25225,7 @@ func schema_pkg_apis_apiextensions_v1_JSONSchemaProps(ref common.ReferenceCallba }, "format": { SchemaProps: spec.SchemaProps{ - Description: "format is an OpenAPI v3 format string. Unknown formats are ignored. The following formats are validated:\n\n- bsonobjectid: a bson object ID, i.e. a 24 characters hex string - uri: an URI as parsed by Golang net/url.ParseRequestURI - email: an email address as parsed by Golang net/mail.ParseAddress - hostname: a valid representation for an Internet host name, as defined by RFC 1034, section 3.1 [RFC1034]. - ipv4: an IPv4 IP as parsed by Golang net.ParseIP - ipv6: an IPv6 IP as parsed by Golang net.ParseIP - cidr: a CIDR as parsed by Golang net.ParseCIDR - mac: a MAC address as parsed by Golang net.ParseMAC - uuid: an UUID that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{12}$ - uuid3: an UUID3 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?3[0-9a-f]{3}-?[0-9a-f]{4}-?[0-9a-f]{12}$ - uuid4: an UUID4 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?4[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$ - uuid5: an UUID5 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?5[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$ - isbn: an ISBN10 or ISBN13 number string like \"0321751043\" or \"978-0321751041\" - isbn10: an ISBN10 number string like \"0321751043\" - isbn13: an ISBN13 number string like \"978-0321751041\" - creditcard: a credit card number defined by the regex ^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11})$ with any non digit characters mixed in - ssn: a U.S. social security number following the regex ^\\d{3}[- ]?\\d{2}[- ]?\\d{4}$ - hexcolor: an hexadecimal color code like \"#FFFFFF: following the regex ^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$ - rgbcolor: an RGB color code like rgb like \"rgb(255,255,2559\" - byte: base64 encoded binary data - password: any kind of string - date: a date string like \"2006-01-02\" as defined by full-date in RFC3339 - duration: a duration string like \"22 ns\" as parsed by Golang time.ParseDuration or compatible with Scala duration format - datetime: a date time string like \"2014-12-15T19:30:20.000Z\" as defined by date-time in RFC3339.", + Description: "format is an OpenAPI v3 format string. Unknown formats are ignored. The following formats are validated:\n\n- bsonobjectid: a bson object ID, i.e. a 24 characters hex string - uri: an URI as parsed by Golang net/url.ParseRequestURI - email: an email address as parsed by Golang net/mail.ParseAddress - hostname: a valid representation for an Internet host name, as defined by RFC 1034, section 3.1 [RFC1034]. - ipv4: an IPv4 IP as parsed by Golang net.ParseIP - ipv6: an IPv6 IP as parsed by Golang net.ParseIP - cidr: a CIDR as parsed by Golang net.ParseCIDR - mac: a MAC address as parsed by Golang net.ParseMAC - uuid: an UUID that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{4}-?[0-9a-f]{12}$ - uuid3: an UUID3 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?3[0-9a-f]{3}-?[0-9a-f]{4}-?[0-9a-f]{12}$ - uuid4: an UUID4 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?4[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$ - uuid5: an UUID5 that allows uppercase defined by the regex (?i)^[0-9a-f]{8}-?[0-9a-f]{4}-?5[0-9a-f]{3}-?[89ab][0-9a-f]{3}-?[0-9a-f]{12}$ - isbn: an ISBN10 or ISBN13 number string like \"0321751043\" or \"978-0321751041\" - isbn10: an ISBN10 number string like \"0321751043\" - isbn13: an ISBN13 number string like \"978-0321751041\" - creditcard: a credit card number defined by the regex ^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\\\d{3})\\\\d{11})$ with any non digit characters mixed in - ssn: a U.S. social security number following the regex ^\\\\d{3}[- ]?\\\\d{2}[- ]?\\\\d{4}$ - hexcolor: an hexadecimal color code like \"#FFFFFF: following the regex ^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$ - rgbcolor: an RGB color code like rgb like \"rgb(255,255,2559\" - byte: base64 encoded binary data - password: any kind of string - date: a date string like \"2006-01-02\" as defined by full-date in RFC3339 - duration: a duration string like \"22 ns\" as parsed by Golang time.ParseDuration or compatible with Scala duration format - datetime: a date time string like \"2014-12-15T19:30:20.000Z\" as defined by date-time in RFC3339.", Type: []string{"string"}, Format: "", }, diff --git a/pkg/karmadactl/addons/descheduler/descheduler.go b/pkg/karmadactl/addons/descheduler/descheduler.go index 3d76f6058a58..b6c455212d40 100644 --- a/pkg/karmadactl/addons/descheduler/descheduler.go +++ b/pkg/karmadactl/addons/descheduler/descheduler.go @@ -76,7 +76,7 @@ var enableDescheduler = func(opts *addoninit.CommandAddonsEnableOption) error { return fmt.Errorf("create karmada descheduler deployment error: %v", err) } - if err := cmdutil.WaitForDeploymentRollout(opts.KubeClientSet, karmadaDeschedulerDeployment, opts.WaitComponentReadyTimeout); err != nil { + if err := addonutils.WaitForDeploymentRollout(opts.KubeClientSet, karmadaDeschedulerDeployment, opts.WaitComponentReadyTimeout); err != nil { return fmt.Errorf("wait karmada descheduler pod timeout: %v", err) } diff --git a/pkg/karmadactl/addons/descheduler/descheduler_test.go b/pkg/karmadactl/addons/descheduler/descheduler_test.go new file mode 100644 index 000000000000..d27d9181c8c2 --- /dev/null +++ b/pkg/karmadactl/addons/descheduler/descheduler_test.go @@ -0,0 +1,265 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package descheduler + +import ( + "context" + "fmt" + "strings" + "testing" + + appsv1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kuberuntime "k8s.io/apimachinery/pkg/runtime" + clientset "k8s.io/client-go/kubernetes" + fakeclientset "k8s.io/client-go/kubernetes/fake" + clientsetscheme "k8s.io/client-go/kubernetes/scheme" + "k8s.io/utils/ptr" + + addoninit "github.com/karmada-io/karmada/pkg/karmadactl/addons/init" + addonutils "github.com/karmada-io/karmada/pkg/karmadactl/addons/utils" + cmdutil "github.com/karmada-io/karmada/pkg/karmadactl/util" +) + +func TestStatus(t *testing.T) { + name, namespace := addoninit.DeschedulerResourceName, "test" + var replicas int32 = 2 + tests := []struct { + name string + listOpts *addoninit.CommandAddonsListOption + prep func(*addoninit.CommandAddonsListOption) error + wantStatus string + wantErr bool + errMsg string + }{ + { + name: "Status_WithoutDescheduler_AddonDisabledStatus", + listOpts: &addoninit.CommandAddonsListOption{ + GlobalCommandOptions: addoninit.GlobalCommandOptions{ + KubeClientSet: fakeclientset.NewSimpleClientset(), + }, + }, + prep: func(*addoninit.CommandAddonsListOption) error { return nil }, + wantStatus: addoninit.AddonDisabledStatus, + }, + { + name: "Status_WithNetworkIssue_AddonUnknownStatus", + listOpts: &addoninit.CommandAddonsListOption{ + GlobalCommandOptions: addoninit.GlobalCommandOptions{ + KubeClientSet: fakeclientset.NewSimpleClientset(), + }, + }, + prep: func(listOpts *addoninit.CommandAddonsListOption) error { + return addonutils.SimulateNetworkErrorOnOp(listOpts.KubeClientSet, "get", "deployments") + }, + wantStatus: addoninit.AddonUnknownStatus, + wantErr: true, + errMsg: "unexpected error: encountered a network issue while get the deployments", + }, + { + name: "Status_ForKarmadaDeschedulerNotFullyAvailable_AddonUnhealthyStatus", + listOpts: &addoninit.CommandAddonsListOption{ + GlobalCommandOptions: addoninit.GlobalCommandOptions{ + Namespace: namespace, + KubeClientSet: fakeclientset.NewSimpleClientset(), + }, + }, + prep: func(listOpts *addoninit.CommandAddonsListOption) error { + if err := createKarmadaDeschedulerDeployment(listOpts.KubeClientSet, replicas, listOpts.Namespace); err != nil { + return fmt.Errorf("failed to create karmada descheduler deployment, got error: %v", err) + } + return addonutils.SimulateDeploymentUnready(listOpts.KubeClientSet, name, listOpts.Namespace) + }, + wantStatus: addoninit.AddonUnhealthyStatus, + }, + { + name: "Status_WithAvailableKarmadaDeschedulerDeployment_AddonEnabledStatus", + listOpts: &addoninit.CommandAddonsListOption{ + GlobalCommandOptions: addoninit.GlobalCommandOptions{ + Namespace: namespace, + KubeClientSet: fakeclientset.NewSimpleClientset(), + }, + }, + prep: func(listOpts *addoninit.CommandAddonsListOption) error { + if err := createKarmadaDeschedulerDeployment(listOpts.KubeClientSet, replicas, listOpts.Namespace); err != nil { + return fmt.Errorf("failed to create karmada descheduler deployment, got error: %v", err) + } + return nil + }, + wantStatus: addoninit.AddonEnabledStatus, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if err := test.prep(test.listOpts); err != nil { + t.Fatalf("failed to prep test env before checking on karmada descheduler addon status, got error: %v", err) + } + deschedulerAddonStatus, err := status(test.listOpts) + if err == nil && test.wantErr { + t.Fatal("expected an error, but got none") + } + if err != nil && !test.wantErr { + t.Fatalf("unexpected error, got: %v", err) + } + if err != nil && test.wantErr && !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("expected error message %s to be in %s", test.errMsg, err.Error()) + } + if deschedulerAddonStatus != test.wantStatus { + t.Errorf("expected addon status to be %s, but got %s", test.wantStatus, deschedulerAddonStatus) + } + }) + } +} + +func TestEnableDescheduler(t *testing.T) { + name, namespace := addoninit.DeschedulerResourceName, "test" + var replicas int32 = 2 + tests := []struct { + name string + enableOpts *addoninit.CommandAddonsEnableOption + prep func() error + wantErr bool + errMsg string + }{ + { + name: "EnableDescheduler_WaitingForKarmadaDescheduler_Created", + enableOpts: &addoninit.CommandAddonsEnableOption{ + GlobalCommandOptions: addoninit.GlobalCommandOptions{ + Namespace: namespace, + KubeClientSet: fakeclientset.NewSimpleClientset(), + }, + KarmadaDeschedulerReplicas: replicas, + }, + prep: func() error { + addonutils.WaitForDeploymentRollout = func(client clientset.Interface, _ *appsv1.Deployment, _ int) error { + _, err := client.AppsV1().Deployments(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get deployment %s, got an error: %v", name, err) + } + return nil + } + return nil + }, + wantErr: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if err := test.prep(); err != nil { + t.Fatalf("failed to prep test environment before enabling descheduler, got an error: %v", err) + } + err := enableDescheduler(test.enableOpts) + if err == nil && test.wantErr { + t.Fatal("expected an error, but got none") + } + if err != nil && !test.wantErr { + t.Errorf("unexpected error, got: %v", err) + } + }) + } +} + +func TestDisableDescheduler(t *testing.T) { + name, namespace := addoninit.DeschedulerResourceName, "test" + client := fakeclientset.NewSimpleClientset() + var replicas int32 = 2 + tests := []struct { + name string + enableOpts *addoninit.CommandAddonsEnableOption + disableOpts *addoninit.CommandAddonsDisableOption + prep func(*addoninit.CommandAddonsEnableOption) error + verify func(clientset.Interface) error + wantErr bool + errMsg string + }{ + { + name: "DisableDescheduler_DisablingKarmadaDescheduler_Disabled", + enableOpts: &addoninit.CommandAddonsEnableOption{ + GlobalCommandOptions: addoninit.GlobalCommandOptions{ + Namespace: namespace, + KubeClientSet: client, + }, + KarmadaDeschedulerReplicas: replicas, + }, + disableOpts: &addoninit.CommandAddonsDisableOption{ + GlobalCommandOptions: addoninit.GlobalCommandOptions{ + Namespace: namespace, + KubeClientSet: client, + }, + }, + prep: func(enableOpts *addoninit.CommandAddonsEnableOption) error { + addonutils.WaitForDeploymentRollout = func(client clientset.Interface, _ *appsv1.Deployment, _ int) error { + _, err := client.AppsV1().Deployments(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get deployment %s, got an error: %v", name, err) + } + return nil + } + if err := enableDescheduler(enableOpts); err != nil { + return fmt.Errorf("failed to enable descheduler, got an error: %v", err) + } + return nil + }, + verify: func(client clientset.Interface) error { + _, err := client.AppsV1().Deployments(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err == nil { + return fmt.Errorf("deployment %s was expected to be deleted, but it was still found", name) + } + return nil + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if err := test.prep(test.enableOpts); err != nil { + t.Fatalf("failed to prep test environment before disabling descheduler, got an error: %v", err) + } + err := disableDescheduler(test.disableOpts) + if err == nil && test.wantErr { + t.Fatal("expected an error, but got none") + } + if err != nil && !test.wantErr { + t.Errorf("unexpected error, got: %v", err) + } + if err := test.verify(client); err != nil { + t.Errorf("failed to verify disabling descheduler, got an error: %v", err) + } + }) + } +} + +// createKarmadaDeschedulerDeployment creates or updates a Deployment for the Karmada descheduler +// in the specified namespace with the provided number of replicas. +// It parses and decodes the template for the Deployment before applying it to the cluster. +func createKarmadaDeschedulerDeployment(c clientset.Interface, replicas int32, namespace string) error { + karmadaDeschedulerDeploymentBytes, err := addonutils.ParseTemplate(karmadaDeschedulerDeployment, DeploymentReplace{ + Namespace: namespace, + Replicas: ptr.To[int32](replicas), + }) + if err != nil { + return fmt.Errorf("error when parsing karmada descheduler deployment template: %v", err) + } + + karmadaDeschedulerDeployment := &appsv1.Deployment{} + if err = kuberuntime.DecodeInto(clientsetscheme.Codecs.UniversalDecoder(), karmadaDeschedulerDeploymentBytes, karmadaDeschedulerDeployment); err != nil { + return fmt.Errorf("failed to decode karmada descheduler deployment, got error: %v", err) + } + if err = cmdutil.CreateOrUpdateDeployment(c, karmadaDeschedulerDeployment); err != nil { + return fmt.Errorf("failed to create karmada descheduler deployment, got error: %v", err) + } + return nil +} diff --git a/pkg/karmadactl/addons/descheduler/manifests.go b/pkg/karmadactl/addons/descheduler/manifests.go index 5b54f25ce098..59e5755741ae 100644 --- a/pkg/karmadactl/addons/descheduler/manifests.go +++ b/pkg/karmadactl/addons/descheduler/manifests.go @@ -45,8 +45,8 @@ spec: command: - /bin/karmada-descheduler - --kubeconfig=/etc/kubeconfig - - --bind-address=0.0.0.0 - - --secure-port=10358 + - --metrics-bind-address=0.0.0.0:8080 + - --health-probe-bind-address=0.0.0.0:10358 - --leader-elect-resource-namespace={{ .Namespace }} - --scheduler-estimator-ca-file=/etc/karmada/pki/ca.crt - --scheduler-estimator-cert-file=/etc/karmada/pki/karmada.crt @@ -62,7 +62,7 @@ spec: periodSeconds: 15 timeoutSeconds: 5 ports: - - containerPort: 10358 + - containerPort: 8080 name: metrics protocol: TCP volumeMounts: diff --git a/pkg/karmadactl/addons/estimator/estimator.go b/pkg/karmadactl/addons/estimator/estimator.go index 00f88cdf8eac..449385452cc7 100644 --- a/pkg/karmadactl/addons/estimator/estimator.go +++ b/pkg/karmadactl/addons/estimator/estimator.go @@ -127,7 +127,7 @@ var enableEstimator = func(opts *addoninit.CommandAddonsEnableOption) error { return fmt.Errorf("create or update scheduler estimator deployment error: %v", err) } - if err := cmdutil.WaitForDeploymentRollout(opts.KubeClientSet, karmadaEstimatorDeployment, opts.WaitComponentReadyTimeout); err != nil { + if err := addonutils.WaitForDeploymentRollout(opts.KubeClientSet, karmadaEstimatorDeployment, opts.WaitComponentReadyTimeout); err != nil { klog.Warning(err) } klog.Infof("Karmada scheduler estimator of member cluster %s is installed successfully.", opts.Cluster) diff --git a/pkg/karmadactl/addons/estimator/manifests.go b/pkg/karmadactl/addons/estimator/manifests.go index a640fbc0446f..1ee0e17f2ebd 100644 --- a/pkg/karmadactl/addons/estimator/manifests.go +++ b/pkg/karmadactl/addons/estimator/manifests.go @@ -48,12 +48,11 @@ spec: - /bin/karmada-scheduler-estimator - --kubeconfig=/etc/{{ .MemberClusterName}}-kubeconfig - --cluster-name={{ .MemberClusterName}} - - --bind-address=0.0.0.0 - - --secure-port=10351 - --grpc-auth-cert-file=/etc/karmada/pki/karmada.crt - --grpc-auth-key-file=/etc/karmada/pki/karmada.key - - --client-cert-auth=true - --grpc-client-ca-file=/etc/karmada/pki/ca.crt + - --metrics-bind-address=0.0.0.0:8080 + - --health-probe-bind-address=0.0.0.0:10351 livenessProbe: httpGet: path: /healthz @@ -64,7 +63,7 @@ spec: periodSeconds: 15 timeoutSeconds: 5 ports: - - containerPort: 10351 + - containerPort: 8080 name: metrics protocol: TCP volumeMounts: diff --git a/pkg/karmadactl/addons/init/global.go b/pkg/karmadactl/addons/init/global.go index cb9ef6be54ad..a5107634c400 100644 --- a/pkg/karmadactl/addons/init/global.go +++ b/pkg/karmadactl/addons/init/global.go @@ -43,11 +43,11 @@ type GlobalCommandOptions struct { // Cluster holds the name of member cluster to enable or disable scheduler estimator Cluster string - KubeClientSet *kubernetes.Clientset + KubeClientSet kubernetes.Interface KarmadaRestConfig *rest.Config - KarmadaAggregatorClientSet *aggregator.Clientset + KarmadaAggregatorClientSet aggregator.Interface } // AddFlags adds flags to the specified FlagSet. diff --git a/pkg/karmadactl/addons/metricsadapter/metricsadapter.go b/pkg/karmadactl/addons/metricsadapter/metricsadapter.go index 49221e35f578..ed82e3d005a7 100644 --- a/pkg/karmadactl/addons/metricsadapter/metricsadapter.go +++ b/pkg/karmadactl/addons/metricsadapter/metricsadapter.go @@ -169,7 +169,7 @@ func installComponentsOnHostCluster(opts *addoninit.CommandAddonsEnableOption) e return fmt.Errorf("create karmada metrics adapter deployment error: %v", err) } - if err = cmdutil.WaitForDeploymentRollout(opts.KubeClientSet, karmadaMetricsAdapterDeployment, opts.WaitComponentReadyTimeout); err != nil { + if err = addonutils.WaitForDeploymentRollout(opts.KubeClientSet, karmadaMetricsAdapterDeployment, opts.WaitComponentReadyTimeout); err != nil { return fmt.Errorf("wait karmada metrics adapter pod status ready timeout: %v", err) } diff --git a/pkg/karmadactl/addons/metricsadapter/metricsadapter_test.go b/pkg/karmadactl/addons/metricsadapter/metricsadapter_test.go new file mode 100644 index 000000000000..ed152b716534 --- /dev/null +++ b/pkg/karmadactl/addons/metricsadapter/metricsadapter_test.go @@ -0,0 +1,242 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metricsadapter + +import ( + "context" + "fmt" + "strings" + "testing" + + appsv1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kuberuntime "k8s.io/apimachinery/pkg/runtime" + clientset "k8s.io/client-go/kubernetes" + fakeclientset "k8s.io/client-go/kubernetes/fake" + clientsetscheme "k8s.io/client-go/kubernetes/scheme" + apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" + aggregator "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset" + fakeAggregator "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/fake" + "k8s.io/utils/ptr" + + addoninit "github.com/karmada-io/karmada/pkg/karmadactl/addons/init" + addonutils "github.com/karmada-io/karmada/pkg/karmadactl/addons/utils" + cmdutil "github.com/karmada-io/karmada/pkg/karmadactl/util" +) + +func TestStatus(t *testing.T) { + name, namespace := addoninit.MetricsAdapterResourceName, "test" + var replicas int32 = 2 + tests := []struct { + name string + listOpts *addoninit.CommandAddonsListOption + prep func(*addoninit.CommandAddonsListOption) error + wantStatus string + wantErr bool + errMsg string + }{ + { + name: "Status_WithoutKarmadaMetricsAdapter_AddonDisabledStatus", + listOpts: &addoninit.CommandAddonsListOption{ + GlobalCommandOptions: addoninit.GlobalCommandOptions{ + KubeClientSet: fakeclientset.NewSimpleClientset(), + }, + }, + prep: func(*addoninit.CommandAddonsListOption) error { return nil }, + wantStatus: addoninit.AddonDisabledStatus, + }, + { + name: "Status_WithNetworkIssue_AddonUnknownStatus", + listOpts: &addoninit.CommandAddonsListOption{ + GlobalCommandOptions: addoninit.GlobalCommandOptions{ + KubeClientSet: fakeclientset.NewSimpleClientset(), + }, + }, + prep: func(listOpts *addoninit.CommandAddonsListOption) error { + return addonutils.SimulateNetworkErrorOnOp(listOpts.KubeClientSet, "get", "deployments") + }, + wantStatus: addoninit.AddonUnknownStatus, + wantErr: true, + errMsg: "unexpected error: encountered a network issue while get the deployments", + }, + { + name: "Status_ForKarmadaMetricsAdapterNotFullyAvailable_AddonUnhealthyStatus", + listOpts: &addoninit.CommandAddonsListOption{ + GlobalCommandOptions: addoninit.GlobalCommandOptions{ + Namespace: namespace, + KubeClientSet: fakeclientset.NewSimpleClientset(), + }, + }, + prep: func(listOpts *addoninit.CommandAddonsListOption) error { + if err := createKarmadaMetricsDeployment(listOpts.KubeClientSet, replicas, listOpts.Namespace); err != nil { + return fmt.Errorf("failed to create karmada metrics deployment, got error: %v", err) + } + return addonutils.SimulateDeploymentUnready(listOpts.KubeClientSet, name, listOpts.Namespace) + }, + wantStatus: addoninit.AddonUnhealthyStatus, + }, + { + name: "Status_WithoutAAAPIService_AddonDisabledStatus", + listOpts: &addoninit.CommandAddonsListOption{ + GlobalCommandOptions: addoninit.GlobalCommandOptions{ + Namespace: namespace, + KubeClientSet: fakeclientset.NewSimpleClientset(), + KarmadaAggregatorClientSet: fakeAggregator.NewSimpleClientset(), + }, + }, + prep: func(listOpts *addoninit.CommandAddonsListOption) error { + return createKarmadaMetricsDeployment(listOpts.KubeClientSet, replicas, listOpts.Namespace) + }, + wantStatus: addoninit.AddonDisabledStatus, + }, + { + name: "Status_WithoutAvailableAPIService_AddonUnhealthyStatus", + listOpts: &addoninit.CommandAddonsListOption{ + GlobalCommandOptions: addoninit.GlobalCommandOptions{ + Namespace: namespace, + KubeClientSet: fakeclientset.NewSimpleClientset(), + KarmadaAggregatorClientSet: fakeAggregator.NewSimpleClientset(), + }, + }, + prep: func(listOpts *addoninit.CommandAddonsListOption) error { + if err := createKarmadaMetricsDeployment(listOpts.KubeClientSet, replicas, listOpts.Namespace); err != nil { + return fmt.Errorf("failed to create karmada metrics deployment, got error: %v", err) + } + + if _, err := createAAAPIServices(listOpts.KarmadaAggregatorClientSet); err != nil { + return err + } + + return nil + }, + wantStatus: addoninit.AddonUnhealthyStatus, + }, + { + name: "Status_WithAllAPIServicesAreAvailable_AddonEnabledStatus", + listOpts: &addoninit.CommandAddonsListOption{ + GlobalCommandOptions: addoninit.GlobalCommandOptions{ + Namespace: namespace, + KubeClientSet: fakeclientset.NewSimpleClientset(), + KarmadaAggregatorClientSet: fakeAggregator.NewSimpleClientset(), + }, + }, + prep: func(listOpts *addoninit.CommandAddonsListOption) error { + if err := createKarmadaMetricsDeployment(listOpts.KubeClientSet, replicas, listOpts.Namespace); err != nil { + return fmt.Errorf("failed to create karmada metrics deployment, got error: %v", err) + } + return createAndMarkAAAPIServicesAvailable(listOpts.KarmadaAggregatorClientSet) + }, + wantStatus: addoninit.AddonEnabledStatus, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if err := test.prep(test.listOpts); err != nil { + t.Fatalf("failed to prep test env before checking on karmada addon statuses, got error: %v", err) + } + addonStatus, err := status(test.listOpts) + if err == nil && test.wantErr { + t.Fatal("expected an error, but got none") + } + if err != nil && !test.wantErr { + t.Fatalf("unexpected error, got: %v", err) + } + if err != nil && test.wantErr && !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("expected error message %s to be in %s", test.errMsg, err.Error()) + } + if addonStatus != test.wantStatus { + t.Errorf("expected addon status to be %s, but got %s", test.wantStatus, addonStatus) + } + }) + } +} + +// createKarmadaMetricsDeployment creates or updates a Deployment for the Karmada metrics adapter +// in the specified namespace with the provided number of replicas. +// It parses and decodes the template for the Deployment before applying it to the cluster. +func createKarmadaMetricsDeployment(c clientset.Interface, replicas int32, namespace string) error { + karmadaMetricsAdapterDeploymentBytes, err := addonutils.ParseTemplate(karmadaMetricsAdapterDeployment, DeploymentReplace{ + Namespace: namespace, + Replicas: ptr.To[int32](replicas), + }) + if err != nil { + return fmt.Errorf("error when parsing karmada metrics adapter deployment template :%v", err) + } + + karmadaMetricsAdapterDeployment := &appsv1.Deployment{} + if err = kuberuntime.DecodeInto(clientsetscheme.Codecs.UniversalDecoder(), karmadaMetricsAdapterDeploymentBytes, karmadaMetricsAdapterDeployment); err != nil { + return fmt.Errorf("decode karmada metrics adapter deployment error: %v", err) + } + if err = cmdutil.CreateOrUpdateDeployment(c, karmadaMetricsAdapterDeployment); err != nil { + return fmt.Errorf("create karmada metrics adapter deployment error: %v", err) + } + return nil +} + +// createAAAPIServices creates a set of APIService resources for the specified AA API services +// using the provided aggregator client. It returns a list of created APIService objects or an error if creation fails. +func createAAAPIServices(a aggregator.Interface) ([]*apiregistrationv1.APIService, error) { + var services []*apiregistrationv1.APIService + for _, aaAPIService := range aaAPIServices { + apiServiceCreated, err := a.ApiregistrationV1().APIServices().Create(context.TODO(), &apiregistrationv1.APIService{ + ObjectMeta: metav1.ObjectMeta{ + Name: aaAPIService, + }, + }, metav1.CreateOptions{}) + if err != nil { + return nil, fmt.Errorf("failed to create api service, got error: %v", err) + } + services = append(services, apiServiceCreated) + } + return services, nil +} + +// updateAAAPIServicesCondition updates the specified condition type and status +// for each APIService in the provided list using the aggregator client. +// This helps set conditions such as Availability for API services. +func updateAAAPIServicesCondition(services []*apiregistrationv1.APIService, a aggregator.Interface, + conditionType apiregistrationv1.APIServiceConditionType, conditionStatus apiregistrationv1.ConditionStatus) error { + for _, service := range services { + service.Status.Conditions = []apiregistrationv1.APIServiceCondition{ + { + Type: conditionType, + Status: conditionStatus, + }, + } + _, err := a.ApiregistrationV1().APIServices().UpdateStatus(context.TODO(), service, metav1.UpdateOptions{}) + if err != nil { + return fmt.Errorf("failed to update status of apiservice, got error: %v", err) + } + } + return nil +} + +// createAndMarkAAAPIServicesAvailable creates the specified AA API services and then +// updates their conditions to mark them as available, setting a "ConditionTrue" status. +// This function is a combination of the creation and condition-setting operations for convenience. +func createAndMarkAAAPIServicesAvailable(a aggregator.Interface) error { + var aaAPIServicesCreated []*apiregistrationv1.APIService + aaAPIServicesCreated, err := createAAAPIServices(a) + if err != nil { + return err + } + + return updateAAAPIServicesCondition( + aaAPIServicesCreated, a, apiregistrationv1.Available, + apiregistrationv1.ConditionTrue, + ) +} diff --git a/pkg/karmadactl/addons/search/search.go b/pkg/karmadactl/addons/search/search.go index 7b9fef39dde3..5b313a82e6f9 100644 --- a/pkg/karmadactl/addons/search/search.go +++ b/pkg/karmadactl/addons/search/search.go @@ -182,7 +182,7 @@ func installComponentsOnHostCluster(opts *addoninit.CommandAddonsEnableOption) e return fmt.Errorf("create karmada search deployment error: %v", err) } - if err := cmdutil.WaitForDeploymentRollout(opts.KubeClientSet, karmadaSearchDeployment, opts.WaitComponentReadyTimeout); err != nil { + if err := addonutils.WaitForDeploymentRollout(opts.KubeClientSet, karmadaSearchDeployment, opts.WaitComponentReadyTimeout); err != nil { return fmt.Errorf("wait karmada search pod status ready timeout: %v", err) } diff --git a/pkg/karmadactl/addons/search/search_test.go b/pkg/karmadactl/addons/search/search_test.go new file mode 100644 index 000000000000..72123f50c55f --- /dev/null +++ b/pkg/karmadactl/addons/search/search_test.go @@ -0,0 +1,236 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package search + +import ( + "context" + "fmt" + "strings" + "testing" + + appsv1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kuberuntime "k8s.io/apimachinery/pkg/runtime" + clientset "k8s.io/client-go/kubernetes" + fakeclientset "k8s.io/client-go/kubernetes/fake" + clientsetscheme "k8s.io/client-go/kubernetes/scheme" + apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" + aggregator "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset" + fakeAggregator "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/fake" + "k8s.io/utils/ptr" + + addoninit "github.com/karmada-io/karmada/pkg/karmadactl/addons/init" + addonutils "github.com/karmada-io/karmada/pkg/karmadactl/addons/utils" + cmdutil "github.com/karmada-io/karmada/pkg/karmadactl/util" +) + +func TestKarmadaSearchAddonStatus(t *testing.T) { + name, namespace := addoninit.SearchResourceName, "test" + var replicas int32 = 2 + tests := []struct { + name string + listOpts *addoninit.CommandAddonsListOption + prep func(*addoninit.CommandAddonsListOption) error + wantErr bool + wantStatus string + errMsg string + }{ + { + name: "Status_WithoutKarmadaSearch_AddonDisabledStatus", + listOpts: &addoninit.CommandAddonsListOption{ + GlobalCommandOptions: addoninit.GlobalCommandOptions{ + KubeClientSet: fakeclientset.NewSimpleClientset(), + }, + }, + prep: func(*addoninit.CommandAddonsListOption) error { return nil }, + wantStatus: addoninit.AddonDisabledStatus, + }, + { + name: "Status_WithNetworkIssue_AddonUnknownStatus", + listOpts: &addoninit.CommandAddonsListOption{ + GlobalCommandOptions: addoninit.GlobalCommandOptions{ + KubeClientSet: fakeclientset.NewSimpleClientset(), + }, + }, + prep: func(listOpts *addoninit.CommandAddonsListOption) error { + return addonutils.SimulateNetworkErrorOnOp(listOpts.KubeClientSet, "get", "deployments") + }, + wantStatus: addoninit.AddonUnknownStatus, + wantErr: true, + errMsg: "unexpected error: encountered a network issue while get the deployments", + }, + { + name: "Status_WithKarmadaSearchNotFullyAvailable_AddonUnhealthyStatus", + listOpts: &addoninit.CommandAddonsListOption{ + GlobalCommandOptions: addoninit.GlobalCommandOptions{ + Namespace: namespace, + KubeClientSet: fakeclientset.NewSimpleClientset(), + }, + }, + prep: func(listOpts *addoninit.CommandAddonsListOption) error { + if err := createKarmadaSearchDeployment(listOpts.KubeClientSet, replicas, listOpts.Namespace); err != nil { + return fmt.Errorf("failed to create karmada search deployment, got error: %v", err) + } + return addonutils.SimulateDeploymentUnready(listOpts.KubeClientSet, name, listOpts.Namespace) + }, + wantStatus: addoninit.AddonUnhealthyStatus, + wantErr: false, + }, + { + name: "Status_WithoutAAAPIServiceOnKarmadaControlplane_AddonDisabledStatus", + listOpts: &addoninit.CommandAddonsListOption{ + GlobalCommandOptions: addoninit.GlobalCommandOptions{ + Namespace: namespace, + KubeClientSet: fakeclientset.NewSimpleClientset(), + KarmadaAggregatorClientSet: fakeAggregator.NewSimpleClientset(), + }, + }, + prep: func(listOpts *addoninit.CommandAddonsListOption) error { + return createKarmadaSearchDeployment(listOpts.KubeClientSet, replicas, listOpts.Namespace) + }, + wantStatus: addoninit.AddonDisabledStatus, + }, + { + name: "Status_WithoutAvailableAAAPIServiceServiceOnKarmadaControlPlane_AddonUnhealthyStatus", + listOpts: &addoninit.CommandAddonsListOption{ + GlobalCommandOptions: addoninit.GlobalCommandOptions{ + Namespace: namespace, + KubeClientSet: fakeclientset.NewSimpleClientset(), + KarmadaAggregatorClientSet: fakeAggregator.NewSimpleClientset(), + }, + }, + prep: func(listOpts *addoninit.CommandAddonsListOption) error { + if err := createKarmadaSearchDeployment(listOpts.KubeClientSet, replicas, listOpts.Namespace); err != nil { + return fmt.Errorf("failed to create karmada search deployment, got error: %v", err) + } + + if _, err := createAAAPIService(listOpts.KarmadaAggregatorClientSet); err != nil { + return err + } + + return nil + }, + wantStatus: addoninit.AddonUnhealthyStatus, + }, + { + name: "Status_WithAllAPIServicesAreAvailable_AddonEnabledStatus", + listOpts: &addoninit.CommandAddonsListOption{ + GlobalCommandOptions: addoninit.GlobalCommandOptions{ + Namespace: namespace, + KubeClientSet: fakeclientset.NewSimpleClientset(), + KarmadaAggregatorClientSet: fakeAggregator.NewSimpleClientset(), + }, + }, + prep: func(listOpts *addoninit.CommandAddonsListOption) error { + if err := createKarmadaSearchDeployment(listOpts.KubeClientSet, replicas, listOpts.Namespace); err != nil { + return fmt.Errorf("failed to create karmada search deployment, got error: %v", err) + } + return createAndMarkAAAPIServiceAvailable(listOpts.KarmadaAggregatorClientSet) + }, + wantStatus: addoninit.AddonEnabledStatus, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if err := test.prep(test.listOpts); err != nil { + t.Fatalf("failed to prep test environment before getting status of karmada search, got: %v", err) + } + searchAddonStatus, err := status(test.listOpts) + if err == nil && test.wantErr { + t.Fatal("expected an error, but got none") + } + if err != nil && !test.wantErr { + t.Fatalf("unexpected error, got: %v", err) + } + if err != nil && test.wantErr && !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("expected error message %s to be in %s", test.errMsg, err.Error()) + } + if searchAddonStatus != test.wantStatus { + t.Errorf("expected karmada search addon status to be %s, but got %s", test.wantStatus, searchAddonStatus) + } + }) + } +} + +// createKarmadaSearchDeployment creates or updates a Deployment for the Karmada search deployment +// in the specified namespace with the provided number of replicas. +// It parses and decodes the template for the Deployment before applying it to the cluster. +func createKarmadaSearchDeployment(c clientset.Interface, replicas int32, namespace string) error { + karmadaSearchDeploymentBytes, err := addonutils.ParseTemplate(karmadaSearchDeployment, DeploymentReplace{ + Namespace: namespace, + Replicas: ptr.To(replicas), + }) + if err != nil { + return fmt.Errorf("error when parsing karmada search deployment template :%v", err) + } + + karmadaSearchDeployment := &appsv1.Deployment{} + if err = kuberuntime.DecodeInto(clientsetscheme.Codecs.UniversalDecoder(), karmadaSearchDeploymentBytes, karmadaSearchDeployment); err != nil { + return fmt.Errorf("decode karmada search deployment error: %v", err) + } + if err = cmdutil.CreateOrUpdateDeployment(c, karmadaSearchDeployment); err != nil { + return fmt.Errorf("create karmada search deployment error: %v", err) + } + return nil +} + +// createAAAPIService creates a single APIService resource for the specified AA API +// using the provided aggregator client. It returns the created APIService object or an error +// if the creation fails. +func createAAAPIService(a aggregator.Interface) (*apiregistrationv1.APIService, error) { + apiServiceCreated, err := a.ApiregistrationV1().APIServices().Create(context.TODO(), &apiregistrationv1.APIService{ + ObjectMeta: metav1.ObjectMeta{ + Name: aaAPIServiceName, + }, + }, metav1.CreateOptions{}) + if err != nil { + return nil, fmt.Errorf("failed to create api service, got error: %v", err) + } + return apiServiceCreated, nil +} + +// createAndMarkAAAPIServiceAvailable creates the specified AA APIService and then +// updates its condition status to "Available" by setting the condition status to "ConditionTrue". +// This function simplifies the combined process of creation and availability marking. +func createAndMarkAAAPIServiceAvailable(a aggregator.Interface) error { + aaAPIServerCreated, err := createAAAPIService(a) + if err != nil { + return err + } + + return updateAAAPIServiceCondition( + aaAPIServerCreated, a, apiregistrationv1.Available, + apiregistrationv1.ConditionTrue, + ) +} + +// updateAAAPIServiceCondition updates the specified condition type and status +// for the provided APIService resource using the aggregator client. +// This function sets conditions like "Available" on the APIService to reflect its current state. +func updateAAAPIServiceCondition(service *apiregistrationv1.APIService, a aggregator.Interface, + conditionType apiregistrationv1.APIServiceConditionType, conditionStatus apiregistrationv1.ConditionStatus) error { + service.Status.Conditions = []apiregistrationv1.APIServiceCondition{ + { + Type: conditionType, + Status: conditionStatus, + }, + } + _, err := a.ApiregistrationV1().APIServices().UpdateStatus(context.TODO(), service, metav1.UpdateOptions{}) + if err != nil { + return fmt.Errorf("failed to update status of apiservice, got error: %v", err) + } + return nil +} diff --git a/pkg/karmadactl/addons/utils/helpers.go b/pkg/karmadactl/addons/utils/helpers.go new file mode 100644 index 000000000000..bb0a79b4546a --- /dev/null +++ b/pkg/karmadactl/addons/utils/helpers.go @@ -0,0 +1,67 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package utils + +import ( + "context" + "fmt" + + appsv1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + clientset "k8s.io/client-go/kubernetes" + fakeclientset "k8s.io/client-go/kubernetes/fake" + coretesting "k8s.io/client-go/testing" + + cmdutil "github.com/karmada-io/karmada/pkg/karmadactl/util" +) + +var ( + // WaitForDeploymentRollout waits for the specified Deployment to reach its desired state within the given timeout. + // This blocks until the Deployment's observed generation and ready replicas match the desired state, + // ensuring it is fully rolled out. + WaitForDeploymentRollout = func(c clientset.Interface, dep *appsv1.Deployment, timeoutSeconds int) error { + return cmdutil.WaitForDeploymentRollout(c, dep, timeoutSeconds) + } +) + +// SimulateNetworkErrorOnOp simulates a network error during the specified +// operation on a resource by prepending a reactor to the fake client. +func SimulateNetworkErrorOnOp(c clientset.Interface, operation, resource string) error { + c.(*fakeclientset.Clientset).Fake.PrependReactor(operation, resource, func(coretesting.Action) (bool, runtime.Object, error) { + return true, nil, fmt.Errorf("unexpected error: encountered a network issue while %s the %s", operation, resource) + }) + return nil +} + +// SimulateDeploymentUnready simulates a "not ready" status by incrementing the replicas +// of the specified Deployment, thus marking it as unready. This is useful for testing the handling +// of Deployment readiness in Karmada. +func SimulateDeploymentUnready(c clientset.Interface, name, namespace string) error { + deployment, err := c.AppsV1().Deployments(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get deployment %s in namespace %s, got error: %v", name, namespace, err) + } + + deployment.Status.Replicas = *deployment.Spec.Replicas + 1 + _, err = c.AppsV1().Deployments(namespace).UpdateStatus(context.TODO(), deployment, metav1.UpdateOptions{}) + if err != nil { + return fmt.Errorf("failed to update replicas status of deployment %s in namespace %s, got error: %v", name, namespace, err) + } + + return nil +} diff --git a/pkg/karmadactl/annotate/annotate.go b/pkg/karmadactl/annotate/annotate.go new file mode 100644 index 000000000000..003da8270d38 --- /dev/null +++ b/pkg/karmadactl/annotate/annotate.go @@ -0,0 +1,64 @@ +/* +Copyright 2024 The Karmada Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package annotate + +import ( + "fmt" + + "github.com/spf13/cobra" + "k8s.io/cli-runtime/pkg/genericiooptions" + kubectlannotate "k8s.io/kubectl/pkg/cmd/annotate" + "k8s.io/kubectl/pkg/util/templates" + + "github.com/karmada-io/karmada/pkg/karmadactl/options" + "github.com/karmada-io/karmada/pkg/karmadactl/util" + utilcomp "github.com/karmada-io/karmada/pkg/karmadactl/util/completion" +) + +var ( + annotateExample = templates.Examples(` + # Update deployment 'foo' with the annotation 'work.karmada.io/conflict-resolution' and the value 'overwrite' + # If the same annotation is set multiple times, only the last value will be applied + [1]%s annotate deployment foo work.karmada.io/conflict-resolution='overwrite' + + # Update a deployment identified by type and name in "deployment.json" + [1]%s annotate -f deployment.json work.karmada.io/conflict-resolution='overwrite' + + # Update deployment 'foo' with the annotation 'work.karmada.io/conflict-resolution' and the value 'abort', overwriting any existing value + [1]%s annotate --overwrite deployment foo work.karmada.io/conflict-resolution='abort' + + # Update all deployments in the namespace + [1]%s annotate deployment --all work.karmada.io/conflict-resolution='abort' + + # Update deployment 'foo' only if the resource is unchanged from version 1 + [1]%s annotate deployment foo work.karmada.io/conflict-resolution='abort' --resource-version=1 + + # Update deployment 'foo' by removing an annotation named 'work.karmada.io/conflict-resolution' if it exists + # Does not require the --overwrite flag + [1]%s annotate deployment foo work.karmada.io/conflict-resolution-`) +) + +// NewCmdAnnotate returns new initialized instance of annotate sub command +func NewCmdAnnotate(f util.Factory, parentCommand string, ioStreams genericiooptions.IOStreams) *cobra.Command { + cmd := kubectlannotate.NewCmdAnnotate(parentCommand, f, ioStreams) + cmd.Example = fmt.Sprintf(annotateExample, parentCommand) + cmd.Annotations = map[string]string{ + util.TagCommandGroup: util.GroupSettingsCommands, + } + options.AddKubeConfigFlags(cmd.Flags()) + options.AddNamespaceFlag(cmd.Flags()) + utilcomp.RegisterCompletionFuncForKarmadaContextFlag(cmd) + utilcomp.RegisterCompletionFuncForNamespaceFlag(cmd, f) + return cmd +} diff --git a/pkg/karmadactl/apiresources/apiresources.go b/pkg/karmadactl/apiresources/apiresources.go new file mode 100644 index 000000000000..6103882629f1 --- /dev/null +++ b/pkg/karmadactl/apiresources/apiresources.go @@ -0,0 +1,130 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apiresources + +import ( + "fmt" + + "github.com/spf13/cobra" + "k8s.io/cli-runtime/pkg/genericiooptions" + kubectlapiresources "k8s.io/kubectl/pkg/cmd/apiresources" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + "k8s.io/kubectl/pkg/util/templates" + + "github.com/karmada-io/karmada/pkg/karmadactl/options" + "github.com/karmada-io/karmada/pkg/karmadactl/util" + utilcomp "github.com/karmada-io/karmada/pkg/karmadactl/util/completion" +) + +var ( + apiresourcesExample = templates.Examples(` + # Print the supported API resources in Karmada control plane + %[1]s api-resources + + # Print the supported API resources with more information in cluster(member1) + %[1]s api-resources -o wide --operation-scope=members --cluster=member1 + + # Print the supported API resources sorted by a column in Karmada control plane + %[1]s api-resources --sort-by=name + + # Print the supported namespaced resources in Karmada control plane + %[1]s api-resources --namespaced=true + + # Print the supported non-namespaced resources in Karmada control plane + %[1]s api-resources --namespaced=false + + # Print the supported API resources with a specific APIGroup in Karmada control plane + %[1]s api-resources --api-group=rbac.authorization.k8s.io`) +) + +// NewCmdAPIResources creates the api-resources command +func NewCmdAPIResources(f util.Factory, parentCommand string, ioStreams genericiooptions.IOStreams) *cobra.Command { + var o CommandAPIResourcesOptions + o.APIResourceOptions = kubectlapiresources.NewAPIResourceOptions(ioStreams) + + cmd := &cobra.Command{ + Use: "api-resources", + Short: "Print the supported API resources on the server", + Long: "Print the supported API resources on the server.", + Example: fmt.Sprintf(apiresourcesExample, parentCommand), + Run: func(cmd *cobra.Command, args []string) { + cmdutil.CheckErr(o.Complete(f, cmd, args)) + cmdutil.CheckErr(o.Validate()) + cmdutil.CheckErr(o.RunAPIResources()) + }, + Annotations: map[string]string{ + util.TagCommandGroup: util.GroupOtherCommands, + }, + } + + o.OperationScope = options.KarmadaControlPlane + options.AddKubeConfigFlags(cmd.Flags()) + cmd.Flags().VarP(&o.OperationScope, "operation-scope", "s", "Used to control the operation scope of the command. The optional values are karmada and members. Defaults to karmada.") + cmd.Flags().StringVar(&o.Cluster, "cluster", "", "Used to specify a target member cluster and only takes effect when the command's operation scope is members, for example: --operation-scope=members --cluster=member1") + cmd.Flags().BoolVar(&o.NoHeaders, "no-headers", o.NoHeaders, "When using the default or custom-column output format, don't print headers (default print headers).") + cmd.Flags().StringVarP(&o.Output, "output", "o", o.Output, `Output format. One of: (wide, name).`) + + cmd.Flags().StringVar(&o.APIGroup, "api-group", o.APIGroup, "Limit to resources in the specified API group.") + cmd.Flags().BoolVar(&o.Namespaced, "namespaced", o.Namespaced, "If false, non-namespaced resources will be returned, otherwise returning namespaced resources by default.") + cmd.Flags().StringSliceVar(&o.Verbs, "verbs", o.Verbs, "Limit to resources that support the specified verbs.") + cmd.Flags().StringVar(&o.SortBy, "sort-by", o.SortBy, "If non-empty, sort list of resources using specified field. The field can be either 'name' or 'kind'.") + cmd.Flags().BoolVar(&o.Cached, "cached", o.Cached, "Use the cached list of resources if available.") + cmd.Flags().StringSliceVar(&o.Categories, "categories", o.Categories, "Limit to resources that belong to the specified categories.") + + utilcomp.RegisterCompletionFuncForKarmadaContextFlag(cmd) + utilcomp.RegisterCompletionFuncForOperationScopeFlag(cmd, options.KarmadaControlPlane, options.Members) + utilcomp.RegisterCompletionFuncForClusterFlag(cmd) + return cmd +} + +// CommandAPIResourcesOptions contains the input to the api-resources command. +type CommandAPIResourcesOptions struct { + // flags specific to describe + *kubectlapiresources.APIResourceOptions + Cluster string + OperationScope options.OperationScope +} + +// Complete adapts from the command line args and validates them +func (o *CommandAPIResourcesOptions) Complete(f util.Factory, cmd *cobra.Command, args []string) error { + var apiFactory cmdutil.Factory = f + if o.OperationScope == options.Members && len(o.Cluster) != 0 { + memberFactory, err := f.FactoryForMemberCluster(o.Cluster) + if err != nil { + return err + } + apiFactory = memberFactory + } + return o.APIResourceOptions.Complete(apiFactory, cmd, args) +} + +// Validate checks to the APIResourceOptions to see if there is sufficient information run the command +func (o *CommandAPIResourcesOptions) Validate() error { + err := options.VerifyOperationScopeFlags(o.OperationScope, options.KarmadaControlPlane, options.Members) + if err != nil { + return err + } + if o.OperationScope == options.Members && len(o.Cluster) == 0 { + return fmt.Errorf("must specify a member cluster") + } + return o.APIResourceOptions.Validate() +} + +// Run does the work +func (o *CommandAPIResourcesOptions) Run() error { + return o.APIResourceOptions.RunAPIResources() +} diff --git a/pkg/karmadactl/apiresources/apiversions.go b/pkg/karmadactl/apiresources/apiversions.go new file mode 100644 index 000000000000..61edc3b14af5 --- /dev/null +++ b/pkg/karmadactl/apiresources/apiversions.go @@ -0,0 +1,110 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apiresources + +import ( + "fmt" + + "github.com/spf13/cobra" + "k8s.io/cli-runtime/pkg/genericiooptions" + kubectlapiresources "k8s.io/kubectl/pkg/cmd/apiresources" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + "k8s.io/kubectl/pkg/util/templates" + + "github.com/karmada-io/karmada/pkg/karmadactl/options" + "github.com/karmada-io/karmada/pkg/karmadactl/util" + utilcomp "github.com/karmada-io/karmada/pkg/karmadactl/util/completion" +) + +var ( + apiversionsExample = templates.Examples(` + # Print the supported API versions + %[1]s api-versions + + # Print the supported API versions in cluster(member1) + %[1]s api-versions --operation-scope=members --cluster=member1`) +) + +// NewCmdAPIVersions creates the api-versions command +func NewCmdAPIVersions(f util.Factory, parentCommand string, ioStreams genericiooptions.IOStreams) *cobra.Command { + var o CommandAPIVersionsOptions + o.APIVersionsOptions = kubectlapiresources.NewAPIVersionsOptions(ioStreams) + cmd := &cobra.Command{ + Use: "api-versions", + Short: "Print the supported API versions on the server, in the form of \"group/version\"", + Long: "Print the supported API versions on the server, in the form of \"group/version\".", + Example: fmt.Sprintf(apiversionsExample, parentCommand), + DisableFlagsInUseLine: true, + Run: func(cmd *cobra.Command, args []string) { + cmdutil.CheckErr(o.Complete(f, cmd, args)) + cmdutil.CheckErr(o.Validate()) + cmdutil.CheckErr(o.RunAPIVersions()) + }, + Annotations: map[string]string{ + util.TagCommandGroup: util.GroupOtherCommands, + }, + } + + o.OperationScope = options.KarmadaControlPlane + options.AddKubeConfigFlags(cmd.Flags()) + cmd.Flags().VarP(&o.OperationScope, "operation-scope", "s", "Used to control the operation scope of the command. The optional values are karmada and members. Defaults to karmada.") + cmd.Flags().StringVar(&o.Cluster, "cluster", "", "Used to specify a target member cluster and only takes effect when the command's operation scope is members, for example: --operation-scope=members --cluster=member1") + + utilcomp.RegisterCompletionFuncForKarmadaContextFlag(cmd) + utilcomp.RegisterCompletionFuncForOperationScopeFlag(cmd, options.KarmadaControlPlane, options.Members) + utilcomp.RegisterCompletionFuncForClusterFlag(cmd) + + return cmd +} + +// CommandAPIVersionsOptions contains the input to the api-versions command. +type CommandAPIVersionsOptions struct { + // flags specific to api-versions + *kubectlapiresources.APIVersionsOptions + Cluster string + OperationScope options.OperationScope +} + +// Complete adapts from the command line args and factory to the data required +func (o *CommandAPIVersionsOptions) Complete(f util.Factory, cmd *cobra.Command, args []string) error { + var apiFactory cmdutil.Factory = f + if o.OperationScope == options.Members && len(o.Cluster) != 0 { + memberFactory, err := f.FactoryForMemberCluster(o.Cluster) + if err != nil { + return err + } + apiFactory = memberFactory + } + return o.APIVersionsOptions.Complete(apiFactory, cmd, args) +} + +// Validate checks to the APIVersionsOptions to see if there is sufficient information run the command +func (o *CommandAPIVersionsOptions) Validate() error { + err := options.VerifyOperationScopeFlags(o.OperationScope, options.KarmadaControlPlane, options.Members) + if err != nil { + return err + } + if o.OperationScope == options.Members && len(o.Cluster) == 0 { + return fmt.Errorf("must specify a member cluster") + } + return nil +} + +// Run does the work +func (o *CommandAPIVersionsOptions) Run() error { + return o.APIVersionsOptions.RunAPIVersions() +} diff --git a/pkg/karmadactl/apply/apply.go b/pkg/karmadactl/apply/apply.go index 06163fcc5de4..b990a12aad21 100644 --- a/pkg/karmadactl/apply/apply.go +++ b/pkg/karmadactl/apply/apply.go @@ -35,6 +35,7 @@ import ( karmadaclientset "github.com/karmada-io/karmada/pkg/generated/clientset/versioned" "github.com/karmada-io/karmada/pkg/karmadactl/options" "github.com/karmada-io/karmada/pkg/karmadactl/util" + utilcomp "github.com/karmada-io/karmada/pkg/karmadactl/util/completion" "github.com/karmada-io/karmada/pkg/util/names" ) @@ -93,6 +94,7 @@ func NewCmdApply(f util.Factory, parentCommand string, streams genericiooptions. SilenceUsage: true, DisableFlagsInUseLine: true, Example: fmt.Sprintf(applyExample, parentCommand), + ValidArgsFunction: utilcomp.ResourceTypeAndNameCompletionFunc(f), RunE: func(cmd *cobra.Command, args []string) error { if err := o.Complete(f, cmd, parentCommand, args); err != nil { return err @@ -110,9 +112,14 @@ func NewCmdApply(f util.Factory, parentCommand string, streams genericiooptions. o.KubectlApplyFlags.AddFlags(cmd) flags := cmd.Flags() options.AddKubeConfigFlags(flags) - flags.StringVarP(options.DefaultConfigFlags.Namespace, "namespace", "n", *options.DefaultConfigFlags.Namespace, "If present, the namespace scope for this CLI request") + options.AddNamespaceFlag(flags) flags.BoolVarP(&o.AllClusters, "all-clusters", "", o.AllClusters, "If present, propagates a group of resources to all member clusters.") flags.StringSliceVarP(&o.Clusters, "cluster", "C", o.Clusters, "If present, propagates a group of resources to specified clusters.") + + utilcomp.RegisterCompletionFuncForKarmadaContextFlag(cmd) + utilcomp.RegisterCompletionFuncForNamespaceFlag(cmd, f) + utilcomp.RegisterCompletionFuncForClusterFlag(cmd) + return cmd } diff --git a/pkg/karmadactl/attach/attach.go b/pkg/karmadactl/attach/attach.go new file mode 100644 index 000000000000..5896b3a31dd8 --- /dev/null +++ b/pkg/karmadactl/attach/attach.go @@ -0,0 +1,132 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package attach + +import ( + "fmt" + "time" + + "github.com/spf13/cobra" + "k8s.io/cli-runtime/pkg/genericiooptions" + kubectlattach "k8s.io/kubectl/pkg/cmd/attach" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + "k8s.io/kubectl/pkg/util/templates" + + "github.com/karmada-io/karmada/pkg/karmadactl/options" + "github.com/karmada-io/karmada/pkg/karmadactl/util" + utilcomp "github.com/karmada-io/karmada/pkg/karmadactl/util/completion" +) + +var ( + attachExample = templates.Examples(` + # Get output from running pod mypod in cluster(member1); use the 'kubectl.kubernetes.io/default-container' annotation + # for selecting the container to be attached or the first container in the pod will be chosen + %[1]s attach mypod --cluster=member1 + + # Get output from ruby-container from pod mypod in cluster(member1) + %[1]s attach mypod -c ruby-container --cluster=member1 + + # Switch to raw terminal mode; sends stdin to 'bash' in ruby-container from pod mypod in Karmada control plane + # and sends stdout/stderr from 'bash' back to the client + %[1]s attach mypod -c ruby-container -i -t + + # Get output from the first pod of a replica set named nginx in cluster(member1) + %[1]s attach rs/nginx --cluster=member1 + `) +) + +const ( + defaultPodAttachTimeout = 60 * time.Second +) + +// NewCmdAttach new attach command. +func NewCmdAttach(f util.Factory, parentCommand string, streams genericiooptions.IOStreams) *cobra.Command { + var o CommandAttachOptions + o.AttachOptions = kubectlattach.NewAttachOptions(streams) + + cmd := &cobra.Command{ + Use: "attach (POD | TYPE/NAME) -c CONTAINER", + DisableFlagsInUseLine: true, + Short: "Attach to a running container", + Long: "Attach to a process that is already running inside an existing container.", + Example: fmt.Sprintf(attachExample, parentCommand), + ValidArgsFunction: utilcomp.PodResourceNameCompletionFunc(f), + Run: func(cmd *cobra.Command, args []string) { + cmdutil.CheckErr(o.Complete(f, cmd, args)) + cmdutil.CheckErr(o.Validate()) + cmdutil.CheckErr(o.Run()) + }, + Annotations: map[string]string{ + util.TagCommandGroup: util.GroupClusterTroubleshootingAndDebugging, + }, + } + + cmdutil.AddPodRunningTimeoutFlag(cmd, defaultPodAttachTimeout) + cmdutil.AddContainerVarFlags(cmd, &o.ContainerName, o.ContainerName) + options.AddKubeConfigFlags(cmd.Flags()) + options.AddNamespaceFlag(cmd.Flags()) + o.OperationScope = options.KarmadaControlPlane + cmd.Flags().BoolVarP(&o.Stdin, "stdin", "i", o.Stdin, "Pass stdin to the container") + cmd.Flags().BoolVarP(&o.TTY, "tty", "t", o.TTY, "Stdin is a TTY") + cmd.Flags().BoolVarP(&o.Quiet, "quiet", "q", o.Quiet, "Only print output from the remote session") + cmd.Flags().VarP(&o.OperationScope, "operation-scope", "s", "Used to control the operation scope of the command. The optional values are karmada and members. Defaults to karmada.") + cmd.Flags().StringVar(&o.Cluster, "cluster", "", "Used to specify a target member cluster and only takes effect when the command's operation scope is members, for example: --operation-scope=members --cluster=member1") + + utilcomp.RegisterCompletionFuncForKarmadaContextFlag(cmd) + utilcomp.RegisterCompletionFuncForNamespaceFlag(cmd, f) + utilcomp.RegisterCompletionFuncForOperationScopeFlag(cmd, options.KarmadaControlPlane, options.Members) + utilcomp.RegisterCompletionFuncForClusterFlag(cmd) + return cmd +} + +// CommandAttachOptions declare the arguments accepted by the attach command +type CommandAttachOptions struct { + // flags specific to attach + *kubectlattach.AttachOptions + Cluster string + OperationScope options.OperationScope +} + +// Complete verifies command line arguments and loads data from the command environment +func (o *CommandAttachOptions) Complete(f util.Factory, cmd *cobra.Command, args []string) error { + var attachFactory cmdutil.Factory = f + if o.OperationScope == options.Members && len(o.Cluster) != 0 { + memberFactory, err := f.FactoryForMemberCluster(o.Cluster) + if err != nil { + return err + } + attachFactory = memberFactory + } + return o.AttachOptions.Complete(attachFactory, cmd, args) +} + +// Validate checks that the provided attach options are specified. +func (o *CommandAttachOptions) Validate() error { + err := options.VerifyOperationScopeFlags(o.OperationScope, options.KarmadaControlPlane, options.Members) + if err != nil { + return err + } + if o.OperationScope == options.Members && len(o.Cluster) == 0 { + return fmt.Errorf("must specify a member cluster") + } + return o.AttachOptions.Validate() +} + +// Run executes a validated remote execution against a pod. +func (o *CommandAttachOptions) Run() error { + return o.AttachOptions.Run() +} diff --git a/pkg/karmadactl/cmdinit/bootstraptoken/agent/tlsbootstrap.go b/pkg/karmadactl/cmdinit/bootstraptoken/agent/tlsbootstrap.go index 34f284c3d627..14a8e75ad7c2 100644 --- a/pkg/karmadactl/cmdinit/bootstraptoken/agent/tlsbootstrap.go +++ b/pkg/karmadactl/cmdinit/bootstraptoken/agent/tlsbootstrap.go @@ -45,7 +45,7 @@ const ( ) // AllowBootstrapTokensToPostCSRs creates RBAC rules in a way the makes Karmada Agent Bootstrap Tokens able to post CSRs -func AllowBootstrapTokensToPostCSRs(clientSet *kubernetes.Clientset) error { +func AllowBootstrapTokensToPostCSRs(clientSet kubernetes.Interface) error { klog.Infoln("[bootstrap-token] configured RBAC rules to allow Karmada Agent Bootstrap tokens to post CSRs in order for agent to get long term certificate credentials") clusterRoleBinding := utils.ClusterRoleBindingFromSubjects(KarmadaAgentBootstrap, KarmadaAgentBootstrapperClusterRoleName, @@ -59,7 +59,7 @@ func AllowBootstrapTokensToPostCSRs(clientSet *kubernetes.Clientset) error { } // AutoApproveKarmadaAgentBootstrapTokens creates RBAC rules in a way that makes Karmada Agent Bootstrap Tokens' CSR auto-approved by the csrapprover controller -func AutoApproveKarmadaAgentBootstrapTokens(clientSet *kubernetes.Clientset) error { +func AutoApproveKarmadaAgentBootstrapTokens(clientSet kubernetes.Interface) error { klog.Infoln("[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Karmada Agent Bootstrap Token") clusterRoleBinding := utils.ClusterRoleBindingFromSubjects(KarmadaAgentAutoApproveBootstrapClusterRoleBinding, CSRAutoApprovalClusterRoleName, @@ -73,7 +73,7 @@ func AutoApproveKarmadaAgentBootstrapTokens(clientSet *kubernetes.Clientset) err } // AutoApproveAgentCertificateRotation creates RBAC rules in a way that makes Agent certificate rotation CSR auto-approved by the csrapprover controller -func AutoApproveAgentCertificateRotation(clientSet *kubernetes.Clientset) error { +func AutoApproveAgentCertificateRotation(clientSet kubernetes.Interface) error { klog.Infoln("[bootstrap-token] configured RBAC rules to allow certificate rotation for all agent client certificates in the member cluster") clusterRoleBinding := utils.ClusterRoleBindingFromSubjects(KarmadaAgentAutoApproveCertificateRotationClusterRoleBinding, KarmadaAgentSelfCSRAutoApprovalClusterRoleName, diff --git a/pkg/karmadactl/cmdinit/bootstraptoken/agent/tlsbootstrap_test.go b/pkg/karmadactl/cmdinit/bootstraptoken/agent/tlsbootstrap_test.go new file mode 100644 index 000000000000..71ba4a63a6c5 --- /dev/null +++ b/pkg/karmadactl/cmdinit/bootstraptoken/agent/tlsbootstrap_test.go @@ -0,0 +1,178 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package agent + +import ( + "context" + "fmt" + "testing" + + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clientset "k8s.io/client-go/kubernetes" + fakeclientset "k8s.io/client-go/kubernetes/fake" + + "github.com/karmada-io/karmada/pkg/karmadactl/cmdinit/utils" +) + +func TestAllowBootstrapTokensToPostCSRs(t *testing.T) { + tests := []struct { + name string + client clientset.Interface + prep func(clientset.Interface) error + verify func(clientset.Interface) error + }{ + { + name: "AllowBootstrapTokensToPostCSRs_CreateClusterRoleBinding_Created", + client: fakeclientset.NewSimpleClientset(), + prep: func(clientset.Interface) error { return nil }, + verify: func(client clientset.Interface) error { + return verifyClusterRoleBinding(client, KarmadaAgentBootstrap, KarmadaAgentBootstrapperClusterRoleName) + }, + }, + { + name: "AllowBootstrapTokensToPostCSRs_ClusterRoleBindingAlreadyExists_Updated", + client: fakeclientset.NewSimpleClientset(), + prep: func(client clientset.Interface) error { + return createClusterRoleBinding(client, KarmadaAgentBootstrap, KarmadaAgentBootstrapperClusterRoleName, KarmadaAgentBootstrapTokenAuthGroup) + }, + verify: func(client clientset.Interface) error { + return verifyClusterRoleBinding(client, KarmadaAgentBootstrap, KarmadaAgentBootstrapperClusterRoleName) + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if err := test.prep(test.client); err != nil { + t.Fatalf("failed to prep before allowing bootstrap tokens to post CSRs, got: %v", err) + } + if err := AllowBootstrapTokensToPostCSRs(test.client); err != nil { + t.Errorf("unexpected error while allowing bootstrap tokens to post CSRs, got: %v", err) + } + if err := test.verify(test.client); err != nil { + t.Errorf("failed to verify the creation of cluster role bindings, got: %v", err) + } + }) + } +} + +func TestAutoApproveKarmadaAgentBootstrapTokens(t *testing.T) { + tests := []struct { + name string + client clientset.Interface + prep func(clientset.Interface) error + verify func(clientset.Interface) error + }{ + { + name: "AutoApproveKarmadaAgentBootstrapTokens_CreateClusterRoleBindings_Created", + client: fakeclientset.NewSimpleClientset(), + prep: func(clientset.Interface) error { return nil }, + verify: func(client clientset.Interface) error { + return verifyClusterRoleBinding(client, KarmadaAgentAutoApproveBootstrapClusterRoleBinding, CSRAutoApprovalClusterRoleName) + }, + }, + { + name: "AutoApproveKarmadaAgentBootstrapTokens_ClusterRoleBindingAlreadyExists_Updated", + client: fakeclientset.NewSimpleClientset(), + prep: func(client clientset.Interface) error { + return createClusterRoleBinding(client, KarmadaAgentAutoApproveBootstrapClusterRoleBinding, CSRAutoApprovalClusterRoleName, KarmadaAgentBootstrapTokenAuthGroup) + }, + verify: func(client clientset.Interface) error { + return verifyClusterRoleBinding(client, KarmadaAgentAutoApproveBootstrapClusterRoleBinding, CSRAutoApprovalClusterRoleName) + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if err := test.prep(test.client); err != nil { + t.Fatalf("failed to prep before auto-approve karmada agent bootstrap tokens, got: %v", err) + } + if err := AutoApproveKarmadaAgentBootstrapTokens(test.client); err != nil { + t.Errorf("unexpected error, got: %v", err) + } + if err := test.verify(test.client); err != nil { + t.Errorf("failed to verify the creation of cluster role bindings, got error: %v", err) + } + }) + } +} + +func TestAutoApproveAgentCertificateRotation(t *testing.T) { + tests := []struct { + name string + client clientset.Interface + prep func(clientset.Interface) error + verify func(clientset.Interface) error + }{ + { + name: "AutoApproveAgentCertificateRotation_CreateClusterRoleBindings_Created", + client: fakeclientset.NewSimpleClientset(), + prep: func(clientset.Interface) error { return nil }, + verify: func(client clientset.Interface) error { + return verifyClusterRoleBinding(client, KarmadaAgentAutoApproveCertificateRotationClusterRoleBinding, KarmadaAgentSelfCSRAutoApprovalClusterRoleName) + }, + }, + { + name: "AutoApproveAgentCertificateRotation_ClusterRoleBindingAlreadyExists_Updated", + client: fakeclientset.NewSimpleClientset(), + prep: func(client clientset.Interface) error { + return createClusterRoleBinding(client, KarmadaAgentAutoApproveCertificateRotationClusterRoleBinding, KarmadaAgentSelfCSRAutoApprovalClusterRoleName, KarmadaAgentGroup) + }, + verify: func(client clientset.Interface) error { + return verifyClusterRoleBinding(client, KarmadaAgentAutoApproveCertificateRotationClusterRoleBinding, KarmadaAgentSelfCSRAutoApprovalClusterRoleName) + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if err := test.prep(test.client); err != nil { + t.Fatalf("failed to prep before auto-approve agent certificate rotation, got: %v", err) + } + if err := AutoApproveAgentCertificateRotation(test.client); err != nil { + t.Errorf("unexpected error, got: %v", err) + } + if err := test.verify(test.client); err != nil { + t.Errorf("failed to verify the creation of cluster role bindings, got error: %v", err) + } + }) + } +} + +func createClusterRoleBinding(client clientset.Interface, crbName, crName, subjectName string) error { + clusterRoleBinding := utils.ClusterRoleBindingFromSubjects(crbName, crName, + []rbacv1.Subject{ + { + Kind: rbacv1.GroupKind, + Name: subjectName, + }, + }, nil) + if _, err := client.RbacV1().ClusterRoleBindings().Create(context.TODO(), clusterRoleBinding, metav1.CreateOptions{}); err != nil { + return fmt.Errorf("failed to create cluster role binding, got: %v", err) + } + return nil +} + +func verifyClusterRoleBinding(client clientset.Interface, crbName, crName string) error { + clusterRoleBinding, err := client.RbacV1().ClusterRoleBindings().Get(context.TODO(), crbName, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get cluster role binding, got: %v", err) + } + if clusterRoleBinding.RoleRef.Name != crName { + return fmt.Errorf("expected cluster role ref name to be %s, but got %s", crName, clusterRoleBinding.RoleRef.Name) + } + return nil +} diff --git a/pkg/karmadactl/cmdinit/cert/cert.go b/pkg/karmadactl/cmdinit/cert/cert.go index 0a5dbd526c18..af22874337fa 100644 --- a/pkg/karmadactl/cmdinit/cert/cert.go +++ b/pkg/karmadactl/cmdinit/cert/cert.go @@ -54,13 +54,20 @@ const ( // NewPrivateKey returns a new private key. var NewPrivateKey = GeneratePrivateKey -// GeneratePrivateKey Generate CA Private Key +// GeneratePrivateKey generates a certificate key. It supports both +// ECDSA (using the P-256 elliptic curve) and RSA algorithms. For RSA, +// the key is generated with a size of 3072 bits. If the keyType is +// x509.UnknownPublicKeyAlgorithm, the function defaults to generating +// an RSA key. func GeneratePrivateKey(keyType x509.PublicKeyAlgorithm) (crypto.Signer, error) { - if keyType == x509.ECDSA { + switch keyType { + case x509.ECDSA: return ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + case x509.RSA, x509.UnknownPublicKeyAlgorithm: + return rsa.GenerateKey(rand.Reader, rsaKeySize) + default: + return nil, fmt.Errorf("unsupported key type: %T, supported key types are RSA and ECDSA", keyType) } - - return rsa.GenerateKey(rand.Reader, rsaKeySize) } // CertsConfig is a wrapper around certutil.Config extending it with PublicKeyAlgorithm. diff --git a/pkg/karmadactl/cmdinit/cmdinit.go b/pkg/karmadactl/cmdinit/cmdinit.go index b56988f60471..24e1eb567cec 100644 --- a/pkg/karmadactl/cmdinit/cmdinit.go +++ b/pkg/karmadactl/cmdinit/cmdinit.go @@ -76,7 +76,10 @@ var ( %[1]s init --karmada-apiserver-replicas 3 --etcd-replicas 3 --etcd-storage-mode PVC --storage-classes-name {StorageClassesName} # Specify external IPs(load balancer or HA IP) which used to sign the certificate - %[1]s init --cert-external-ip 10.235.1.2 --cert-external-dns www.karmada.io`) + %[1]s init --cert-external-ip 10.235.1.2 --cert-external-dns www.karmada.io + + # Install Karmada using a configuration file + %[1]s init --config /path/to/your/config/file.yaml`) ) // NewCmdInit install Karmada on Kubernetes @@ -120,7 +123,7 @@ func NewCmdInit(parentCommand string) *cobra.Command { // kube image registry flags.StringVarP(&opts.KubeImageMirrorCountry, "kube-image-mirror-country", "", "", "Country code of the kube image registry to be used. For Chinese mainland users, set it to cn") flags.StringVarP(&opts.KubeImageRegistry, "kube-image-registry", "", "", "Kube image registry. For Chinese mainland users, you may use local gcr.io mirrors such as registry.cn-hangzhou.aliyuncs.com/google_containers to override default kube image registry") - flags.StringVar(&opts.KubeImageTag, "kube-image-tag", "v1.29.6", "Choose a specific Kubernetes version for the control plane.") + flags.StringVar(&opts.KubeImageTag, "kube-image-tag", "v1.30.4", "Choose a specific Kubernetes version for the control plane.") // cert flags.StringVar(&opts.ExternalIP, "cert-external-ip", "", "the external IP of Karmada certificate (e.g 192.168.1.2,172.16.1.2)") flags.StringVar(&opts.ExternalDNS, "cert-external-dns", "", "the external DNS of Karmada certificate (e.g localhost,localhost.com)") @@ -140,7 +143,7 @@ func NewCmdInit(parentCommand string) *cobra.Command { flags.StringVarP(&opts.EtcdInitImage, "etcd-init-image", "", kubernetes.DefaultInitImage, "etcd init container image") flags.Int32VarP(&opts.EtcdReplicas, "etcd-replicas", "", 1, "etcd replica set, cluster 3,5...singular") flags.StringVarP(&opts.EtcdHostDataPath, "etcd-data", "", "/var/lib/karmada-etcd", "etcd data path,valid in hostPath mode.") - flags.StringVarP(&opts.EtcdNodeSelectorLabels, "etcd-node-selector-labels", "", "", "etcd pod select the labels of the node. valid in hostPath mode ( e.g. --etcd-node-selector-labels karmada.io/etcd=true)") + flags.StringVarP(&opts.EtcdNodeSelectorLabels, "etcd-node-selector-labels", "", "", "the labels used for etcd pod to select nodes, valid in hostPath mode, and with each label separated by a comma. ( e.g. --etcd-node-selector-labels karmada.io/etcd=true,kubernetes.io/os=linux)") flags.StringVarP(&opts.EtcdPersistentVolumeSize, "etcd-pvc-size", "", "5Gi", "etcd data path,valid in pvc mode.") flags.StringVar(&opts.ExternalEtcdCACertPath, "external-etcd-ca-cert-path", "", "The path of CA certificate of the external etcd cluster in pem format.") flags.StringVar(&opts.ExternalEtcdClientCertPath, "external-etcd-client-cert-path", "", "The path of client side certificate to the external etcd cluster in pem format.") @@ -149,6 +152,7 @@ func NewCmdInit(parentCommand string) *cobra.Command { flags.StringVar(&opts.ExternalEtcdKeyPrefix, "external-etcd-key-prefix", "", "The key prefix to be configured to kube-apiserver through --etcd-prefix.") // karmada flags.StringVar(&opts.CRDs, "crds", kubernetes.DefaultCrdURL, "Karmada crds resource.(local file e.g. --crds /root/crds.tar.gz)") + flags.StringVar(&opts.KarmadaInitFilePath, "config", "", "Karmada init file path") flags.StringVarP(&opts.KarmadaAPIServerAdvertiseAddress, "karmada-apiserver-advertise-address", "", "", "The IP address the Karmada API Server will advertise it's listening on. If not set, the address on the master node will be used.") flags.Int32VarP(&opts.KarmadaAPIServerNodePort, "port", "p", 32443, "Karmada apiserver service node port") flags.StringVarP(&opts.KarmadaDataPath, "karmada-data", "d", "/etc/karmada", "Karmada data path. kubeconfig cert and crds files") @@ -166,6 +170,7 @@ func NewCmdInit(parentCommand string) *cobra.Command { flags.StringVarP(&opts.KarmadaAggregatedAPIServerImage, "karmada-aggregated-apiserver-image", "", kubernetes.DefaultKarmadaAggregatedAPIServerImage, "Karmada aggregated apiserver image") flags.Int32VarP(&opts.KarmadaAggregatedAPIServerReplicas, "karmada-aggregated-apiserver-replicas", "", 1, "Karmada aggregated apiserver replica set") flags.IntVarP(&opts.WaitComponentReadyTimeout, "wait-component-ready-timeout", "", cmdinitoptions.WaitComponentReadyTimeout, "Wait for karmada component ready timeout. 0 means wait forever") + return cmd } diff --git a/pkg/karmadactl/cmdinit/config/config.go b/pkg/karmadactl/cmdinit/config/config.go new file mode 100644 index 000000000000..00dac60a9391 --- /dev/null +++ b/pkg/karmadactl/cmdinit/config/config.go @@ -0,0 +1,106 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "fmt" + "os" + "sort" + + "k8s.io/apimachinery/pkg/runtime/schema" + yamlserializer "k8s.io/apimachinery/pkg/runtime/serializer/yaml" + "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/klog/v2" +) + +// LoadInitConfiguration loads the InitConfiguration from the specified file path. +// It delegates the actual loading to the loadInitConfigurationFromFile function. +func LoadInitConfiguration(cfgPath string) (*KarmadaInitConfig, error) { + var config *KarmadaInitConfig + var err error + + config, err = loadInitConfigurationFromFile(cfgPath) + + return config, err +} + +// loadInitConfigurationFromFile reads the file at the specified path and converts it into an InitConfiguration. +// It reads the file contents and then converts the bytes to an InitConfiguration. +func loadInitConfigurationFromFile(cfgPath string) (*KarmadaInitConfig, error) { + klog.V(1).Infof("loading configuration from %q", cfgPath) + + b, err := os.ReadFile(cfgPath) + if err != nil { + return nil, fmt.Errorf("unable to read config from %q: %v", cfgPath, err) + } + gvkmap, err := ParseGVKYamlMap(b) + if err != nil { + return nil, err + } + + return documentMapToInitConfiguration(gvkmap) +} + +// ParseGVKYamlMap parses a single YAML document into a map of GroupVersionKind to byte slices. +// This function is a simplified version that handles only a single YAML document. +func ParseGVKYamlMap(yamlBytes []byte) (map[schema.GroupVersionKind][]byte, error) { + gvkmap := make(map[schema.GroupVersionKind][]byte) + + gvk, err := yamlserializer.DefaultMetaFactory.Interpret(yamlBytes) + if err != nil { + return nil, fmt.Errorf("failed to interpret YAML document: %w", err) + } + if len(gvk.Group) == 0 || len(gvk.Version) == 0 || len(gvk.Kind) == 0 { + return nil, fmt.Errorf("invalid configuration for GroupVersionKind %+v: kind and apiVersion is mandatory information that must be specified", gvk) + } + gvkmap[*gvk] = yamlBytes + + return gvkmap, nil +} + +// documentMapToInitConfiguration processes a map of GroupVersionKind to byte slices to extract the InitConfiguration. +// It iterates over the map, checking for the "InitConfiguration" kind, group, and version, and unmarshals its content into an InitConfiguration object. +func documentMapToInitConfiguration(gvkmap map[schema.GroupVersionKind][]byte) (*KarmadaInitConfig, error) { + var initcfg *KarmadaInitConfig + + gvks := make([]schema.GroupVersionKind, 0, len(gvkmap)) + for gvk := range gvkmap { + gvks = append(gvks, gvk) + } + sort.Slice(gvks, func(i, j int) bool { + return gvks[i].String() < gvks[j].String() + }) + + for _, gvk := range gvks { + fileContent := gvkmap[gvk] + if gvk.Kind == "KarmadaInitConfig" { + if gvk.Group != GroupName || gvk.Version != SchemeGroupVersion.Version { + return nil, fmt.Errorf("invalid Group or Version: expected group %q and version %q, but got group %q and version %q", GroupName, SchemeGroupVersion.Version, gvk.Group, gvk.Version) + } + initcfg = &KarmadaInitConfig{} + if err := yaml.Unmarshal(fileContent, initcfg); err != nil { + return nil, err + } + } + } + + if initcfg == nil { + return nil, fmt.Errorf("no KarmadaInitConfig kind was found in the YAML file") + } + + return initcfg, nil +} diff --git a/pkg/karmadactl/cmdinit/config/config_test.go b/pkg/karmadactl/cmdinit/config/config_test.go new file mode 100644 index 000000000000..61836900427d --- /dev/null +++ b/pkg/karmadactl/cmdinit/config/config_test.go @@ -0,0 +1,421 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "fmt" + "os" + "testing" + "time" + + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const testConfig = ` +apiVersion: config.karmada.io/v1alpha1 +kind: KarmadaInitConfig +spec: + certificates: + caCertFile: "/etc/karmada/pki/ca.crt" + caKeyFile: "/etc/karmada/pki/ca.key" + externalDNS: + - "localhost" + - "example.com" + externalIP: + - "192.168.1.2" + - "172.16.1.2" + validityPeriod: "8760h0m0s" + etcd: + local: + Repository: "registry.k8s.io/etcd" + Tag: "latest" + dataPath: "/var/lib/karmada-etcd" + initImage: + repository: "alpine" + tag: "3.19.1" + nodeSelectorLabels: + karmada.io/etcd: "true" + pvcSize: "5Gi" + replicas: 3 + storageClassesName: "fast" + storageMode: "PVC" + external: + endpoints: + - "https://example.com:8443" + caFile: "/path/to/your/ca.crt" + certFile: "/path/to/your/cert.crt" + keyFile: "/path/to/your/key.key" + keyPrefix: "ext-" + hostCluster: + apiEndpoint: "https://kubernetes.example.com" + kubeconfig: "/root/.kube/config" + context: "karmada-host" + domain: "cluster.local" + images: + imagePullPolicy: "IfNotPresent" + imagePullSecrets: + - "PullSecret1" + - "PullSecret2" + kubeImageMirrorCountry: "cn" + kubeImageRegistry: "registry.cn-hangzhou.aliyuncs.com/google_containers" + kubeImageTag: "v1.29.6" + privateRegistry: + registry: "my.private.registry" + components: + karmadaAPIServer: + repository: "karmada/kube-apiserver" + tag: "v1.29.6" + replicas: 1 + advertiseAddress: "192.168.1.100" + serviceType: "NodePort" + networking: + namespace: "karmada-system" + port: 32443 + karmadaAggregatedAPIServer: + repository: "karmada/karmada-aggregated-apiserver" + tag: "v0.0.0-master" + replicas: 1 + kubeControllerManager: + repository: "karmada/kube-controller-manager" + tag: "v1.29.6" + replicas: 1 + karmadaControllerManager: + repository: "karmada/karmada-controller-manager" + tag: "v0.0.0-master" + replicas: 1 + karmadaScheduler: + repository: "karmada/karmada-scheduler" + tag: "v0.0.0-master" + replicas: 1 + karmadaWebhook: + repository: "karmada/karmada-webhook" + tag: "v0.0.0-master" + replicas: 1 + karmadaDataPath: "/etc/karmada" + karmadaPKIPath: "/etc/karmada/pki" + karmadaCRDs: "https://github.com/karmada-io/karmada/releases/download/test/crds.tar.gz" + waitComponentReadyTimeout: 120 +` + +const invalidTestConfig = ` +apiVersion: v1alpha1 +kind: KarmadaInitConfig +metadata: + name: karmada-init +spec: + waitComponentReadyTimeout: "invalid-int" +` + +func TestLoadInitConfiguration(t *testing.T) { + expectedConfig := &KarmadaInitConfig{ + TypeMeta: metav1.TypeMeta{ + Kind: "KarmadaInitConfig", + APIVersion: "config.karmada.io/v1alpha1", + }, + Spec: KarmadaInitSpec{ + WaitComponentReadyTimeout: 120, + KarmadaDataPath: "/etc/karmada", + KarmadaPKIPath: "/etc/karmada/pki", + KarmadaCRDs: "https://github.com/karmada-io/karmada/releases/download/test/crds.tar.gz", + Certificates: Certificates{ + CACertFile: "/etc/karmada/pki/ca.crt", + CAKeyFile: "/etc/karmada/pki/ca.key", + ExternalDNS: []string{ + "localhost", + "example.com", + }, + ExternalIP: []string{ + "192.168.1.2", + "172.16.1.2", + }, + ValidityPeriod: metav1.Duration{Duration: parseDuration("8760h")}, + }, + Etcd: Etcd{ + Local: &LocalEtcd{ + CommonSettings: CommonSettings{ + Image: Image{ + Repository: "registry.k8s.io/etcd", + Tag: "latest", + }, + Replicas: 3, + }, + InitImage: Image{ + Repository: "alpine", + Tag: "3.19.1", + }, + DataPath: "/var/lib/karmada-etcd", + PVCSize: "5Gi", + NodeSelectorLabels: map[string]string{ + "karmada.io/etcd": "true", + }, + StorageClassesName: "fast", + StorageMode: "PVC", + }, + External: &ExternalEtcd{ + Endpoints: []string{ + "https://example.com:8443", + }, + CAFile: "/path/to/your/ca.crt", + CertFile: "/path/to/your/cert.crt", + KeyFile: "/path/to/your/key.key", + KeyPrefix: "ext-", + }, + }, + HostCluster: HostCluster{ + APIEndpoint: "https://kubernetes.example.com", + Kubeconfig: "/root/.kube/config", + Context: "karmada-host", + Domain: "cluster.local", + }, + Images: Images{ + ImagePullPolicy: corev1.PullIfNotPresent, + ImagePullSecrets: []string{"PullSecret1", "PullSecret2"}, + KubeImageMirrorCountry: "cn", + KubeImageRegistry: "registry.cn-hangzhou.aliyuncs.com/google_containers", + KubeImageTag: "v1.29.6", + PrivateRegistry: &ImageRegistry{ + Registry: "my.private.registry", + }, + }, + Components: KarmadaComponents{ + KarmadaAPIServer: &KarmadaAPIServer{ + CommonSettings: CommonSettings{ + Image: Image{ + Repository: "karmada/kube-apiserver", + Tag: "v1.29.6", + }, + Replicas: 1, + }, + AdvertiseAddress: "192.168.1.100", + Networking: Networking{ + Namespace: "karmada-system", + Port: 32443, + }, + }, + KarmadaAggregatedAPIServer: &KarmadaAggregatedAPIServer{ + CommonSettings: CommonSettings{ + Image: Image{ + Repository: "karmada/karmada-aggregated-apiserver", + Tag: "v0.0.0-master", + }, + Replicas: 1, + }, + }, + KubeControllerManager: &KubeControllerManager{ + CommonSettings: CommonSettings{ + Image: Image{ + Repository: "karmada/kube-controller-manager", + Tag: "v1.29.6", + }, + Replicas: 1, + }, + }, + KarmadaControllerManager: &KarmadaControllerManager{ + CommonSettings: CommonSettings{ + Image: Image{ + Repository: "karmada/karmada-controller-manager", + Tag: "v0.0.0-master", + }, + Replicas: 1, + }, + }, + KarmadaScheduler: &KarmadaScheduler{ + CommonSettings: CommonSettings{ + Image: Image{ + Repository: "karmada/karmada-scheduler", + Tag: "v0.0.0-master", + }, + Replicas: 1, + }, + }, + KarmadaWebhook: &KarmadaWebhook{ + CommonSettings: CommonSettings{ + Image: Image{ + Repository: "karmada/karmada-webhook", + Tag: "v0.0.0-master", + }, + Replicas: 1, + }, + }, + }, + }, + } + + t.Run("Test Load Valid Configuration", func(t *testing.T) { + tmpFile, err := os.CreateTemp("", "test-config-*.yaml") + assert.NoError(t, err) + defer os.Remove(tmpFile.Name()) + + _, err = tmpFile.Write([]byte(testConfig)) + assert.NoError(t, err) + err = tmpFile.Close() + assert.NoError(t, err) + + config, err := LoadInitConfiguration(tmpFile.Name()) + assert.NoError(t, err) + assert.Equal(t, expectedConfig, config) + }) + + t.Run("Test Load Invalid Configuration", func(t *testing.T) { + tmpFile, err := os.CreateTemp("", "invalid-config-*.yaml") + assert.NoError(t, err) + defer os.Remove(tmpFile.Name()) + + _, err = tmpFile.Write([]byte(invalidTestConfig)) + assert.NoError(t, err) + err = tmpFile.Close() + assert.NoError(t, err) + + _, err = LoadInitConfiguration(tmpFile.Name()) + assert.Error(t, err) + }) + + t.Run("Test Load Non-Existent Configuration", func(t *testing.T) { + _, err := LoadInitConfiguration("non-existent-file.yaml") + assert.Error(t, err) + }) +} + +func TestParseGVKYamlMap(t *testing.T) { + t.Run("Test Parse Valid GVK Yaml", func(t *testing.T) { + gvkmap, err := ParseGVKYamlMap([]byte(testConfig)) + assert.NoError(t, err) + assert.NotEmpty(t, gvkmap) + + // Check if the GVK is correct + for gvk := range gvkmap { + assert.Equal(t, "config.karmada.io", gvk.Group) + assert.Equal(t, "v1alpha1", gvk.Version) + assert.Equal(t, "KarmadaInitConfig", gvk.Kind) + } + }) + + t.Run("Test Parse Invalid GVK Yaml - Incorrect Group/Version/Kind", func(t *testing.T) { + invalidGVKConfig := ` +apiVersion: invalid.group/v1beta1 +kind: InvalidKind +metadata: + name: invalid-config +spec: + key: value +` + gvkmap, err := ParseGVKYamlMap([]byte(invalidGVKConfig)) + assert.NoError(t, err, "Expected error due to invalid Group/Version/Kind") + + for gvk := range gvkmap { + assert.Equal(t, "invalid.group", gvk.Group) + assert.Equal(t, "v1beta1", gvk.Version) + assert.Equal(t, "InvalidKind", gvk.Kind) + } + }) + + t.Run("Test Parse Invalid Yaml - Bad Formatting", func(t *testing.T) { + // This YAML has invalid formatting (bad indentation) + invalidFormattedYAML := ` +apiVersion: config.karmada.io/v1alpha1 +kind: KarmadaInitConfig +metadata: + name: invalid-format +spec: + certificates: + caCertFile: /etc/karmada/pki/ca.crt + caKeyFile: /etc/karmada/pki/ca.key + externalDNS + - "localhost" +` + _, err := ParseGVKYamlMap([]byte(invalidFormattedYAML)) + assert.Error(t, err, "Expected error due to incorrect YAML formatting") + }) + + t.Run("Test Parse Empty Yaml", func(t *testing.T) { + _, err := ParseGVKYamlMap([]byte{}) + assert.Error(t, err, "Expected error due to empty YAML") + }) +} + +func TestDocumentMapToInitConfiguration(t *testing.T) { + t.Run("Test Valid GVK Map to InitConfiguration", func(t *testing.T) { + gvkmap, err := ParseGVKYamlMap([]byte(testConfig)) + assert.NoError(t, err) + + config, err := documentMapToInitConfiguration(gvkmap) + assert.NoError(t, err) + assert.NotNil(t, config) + assert.Equal(t, "KarmadaInitConfig", config.Kind) + }) + + t.Run("Test Invalid GVK Map with Missing Kind", func(t *testing.T) { + // Create a GVK map with an invalid Kind + invalidGVK := map[schema.GroupVersionKind][]byte{ + {Group: "config.karmada.io", Version: "v1alpha1", Kind: "InvalidKind"}: []byte(testConfig), + } + + _, err := documentMapToInitConfiguration(invalidGVK) + assert.Error(t, err, "Expected error due to missing KarmadaInitConfig kind") + }) + + t.Run("Test Invalid GVK with Wrong Group and Version", func(t *testing.T) { + invalidGVKConfig := ` +apiVersion: wrong.group/v0alpha1 +kind: KarmadaInitConfig +metadata: + name: invalid-config +` + gvkmap, err := ParseGVKYamlMap([]byte(invalidGVKConfig)) + assert.NoError(t, err) + + _, err = documentMapToInitConfiguration(gvkmap) + assert.Error(t, err, "Expected error due to incorrect Group or Version") + }) + + t.Run("Test Multiple GVKs with Only One KarmadaInitConfig", func(t *testing.T) { + multiGVKConfig := ` +apiVersion: config.karmada.io/v1alpha1 +kind: KarmadaInitConfig +metadata: + name: valid-config +--- +apiVersion: other.group/v1beta1 +kind: OtherConfig +metadata: + name: other-config +` + gvkmap, err := ParseGVKYamlMap([]byte(multiGVKConfig)) + assert.NoError(t, err) + + config, err := documentMapToInitConfiguration(gvkmap) + assert.NoError(t, err) + assert.NotNil(t, config) + assert.Equal(t, "KarmadaInitConfig", config.Kind) + + // Ensure the other config is ignored + assert.Len(t, gvkmap, 1, fmt.Sprintf("Expect only 1 GVKs in the map, but got %d", len(gvkmap))) + }) +} + +// parseDuration parses a duration string and returns the corresponding time.Duration value. +// If the parsing fails, it returns a duration of 0. +func parseDuration(durationStr string) time.Duration { + duration, err := time.ParseDuration(durationStr) + if err != nil { + return 0 + } + return duration +} diff --git a/pkg/karmadactl/cmdinit/config/types.go b/pkg/karmadactl/cmdinit/config/types.go new file mode 100644 index 000000000000..0ac760d3231c --- /dev/null +++ b/pkg/karmadactl/cmdinit/config/types.go @@ -0,0 +1,353 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "config.karmada.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// KarmadaInitConfig defines the configuration for initializing Karmada +type KarmadaInitConfig struct { + metav1.TypeMeta `json:",inline" yaml:",inline"` + + // Spec defines the desired state for initializing Karmada + // +optional + Spec KarmadaInitSpec `json:"spec,omitempty" yaml:"spec,omitempty"` +} + +// KarmadaInitSpec is the specification part of KarmadaInitConfig, containing all configurable options +type KarmadaInitSpec struct { + // Certificates configures the certificate information required by Karmada + // +optional + Certificates Certificates `json:"certificates,omitempty" yaml:"certificates,omitempty"` + + // Etcd configures the information of the Etcd cluster + // +optional + Etcd Etcd `json:"etcd,omitempty" yaml:"etcd,omitempty"` + + // HostCluster configures the information of the host cluster + // +optional + HostCluster HostCluster `json:"hostCluster,omitempty" yaml:"hostCluster,omitempty"` + + // Images configures image-related information + // +optional + Images Images `json:"images,omitempty" yaml:"images,omitempty"` + + // Components configures information about Karmada components + // +optional + Components KarmadaComponents `json:"components,omitempty" yaml:"components,omitempty"` + + // KarmadaCRDs configures the Karmada CRDs to be installed + // +optional + KarmadaCRDs string `json:"karmadaCRDs,omitempty" yaml:"karmadaCRDs,omitempty"` + + // KarmadaDataPath configures the data directory for Karmada + // +optional + KarmadaDataPath string `json:"karmadaDataPath,omitempty" yaml:"karmadaDataPath,omitempty"` + + // KarmadaPKIPath configures the PKI directory for Karmada + // +optional + KarmadaPKIPath string `json:"karmadaPKIPath,omitempty" yaml:"karmadaPKIPath,omitempty"` + + // WaitComponentReadyTimeout configures the timeout (in seconds) for waiting for components to be ready + // +optional + WaitComponentReadyTimeout int `json:"waitComponentReadyTimeout,omitempty" yaml:"waitComponentReadyTimeout,omitempty"` +} + +// Certificates defines the configuration related to certificates +type Certificates struct { + // CACertFile is the path to the root CA certificate file + // +optional + CACertFile string `json:"caCertFile,omitempty" yaml:"caCertFile,omitempty"` + + // CAKeyFile is the path to the root CA key file + // +optional + CAKeyFile string `json:"caKeyFile,omitempty" yaml:"caKeyFile,omitempty"` + + // ExternalDNS is the list of external DNS names for the certificate + // +optional + ExternalDNS []string `json:"externalDNS,omitempty" yaml:"externalDNS,omitempty"` + + // ExternalIP is the list of external IPs for the certificate + // +optional + ExternalIP []string `json:"externalIP,omitempty" yaml:"externalIP,omitempty"` + + // ValidityPeriod is the validity period of the certificate + // +optional + ValidityPeriod metav1.Duration `json:"validityPeriod,omitempty" yaml:"validityPeriod,omitempty"` +} + +// Etcd defines the configuration of the Etcd cluster +type Etcd struct { + // Local indicates using a local Etcd cluster + // +optional + Local *LocalEtcd `json:"local,omitempty" yaml:"local,omitempty"` + + // External indicates using an external Etcd cluster + // +optional + External *ExternalEtcd `json:"external,omitempty" yaml:"external,omitempty"` +} + +// LocalEtcd defines the configuration of a local Etcd cluster +type LocalEtcd struct { + // CommonSettings contains common settings like image and resources + CommonSettings `json:",inline" yaml:",inline"` + + // DataPath is the data storage path for Etcd + // +optional + DataPath string `json:"dataPath,omitempty" yaml:"dataPath,omitempty"` + + // InitImage is the image for the Etcd init container + // +optional + InitImage Image `json:"initImage,omitempty" yaml:"initImage,omitempty"` + + // NodeSelectorLabels are the node selector labels for the Etcd pods + // +optional + NodeSelectorLabels map[string]string `json:"nodeSelectorLabels,omitempty" yaml:"nodeSelectorLabels,omitempty"` + + // PVCSize is the size of the PersistentVolumeClaim for Etcd + // +optional + PVCSize string `json:"pvcSize,omitempty" yaml:"pvcSize,omitempty"` + + // StorageMode is the storage mode for Etcd (e.g., emptyDir, hostPath, PVC) + // +optional + StorageMode string `json:"storageMode,omitempty" yaml:"storageMode,omitempty"` + + // StorageClassesName is the name of the storage class for the Etcd PVC + // +optional + StorageClassesName string `json:"storageClassesName,omitempty" yaml:"storageClassesName,omitempty"` +} + +// ExternalEtcd defines the configuration of an external Etcd cluster +type ExternalEtcd struct { + // Endpoints are the server addresses of the external Etcd cluster + // +required + Endpoints []string `json:"endpoints" yaml:"endpoints"` + + // CAFile is the path to the CA certificate for the external Etcd cluster + // +optional + CAFile string `json:"caFile,omitempty" yaml:"caFile,omitempty"` + + // CertFile is the path to the client certificate for the external Etcd cluster + // +optional + CertFile string `json:"certFile,omitempty" yaml:"certFile,omitempty"` + + // KeyFile is the path to the client key for the external Etcd cluster + // +optional + KeyFile string `json:"keyFile,omitempty" yaml:"keyFile,omitempty"` + + // KeyPrefix is the key prefix used in the external Etcd cluster + // +optional + KeyPrefix string `json:"keyPrefix,omitempty" yaml:"keyPrefix,omitempty"` +} + +// HostCluster defines the configuration of the host cluster +type HostCluster struct { + // APIEndpoint is the API server address of the host cluster + // +optional + APIEndpoint string `json:"apiEndpoint,omitempty" yaml:"apiEndpoint,omitempty"` + + // Kubeconfig is the path to the kubeconfig file for the host cluster + // +optional + Kubeconfig string `json:"kubeconfig,omitempty" yaml:"kubeconfig,omitempty"` + + // Context is the context name in the kubeconfig for the host cluster + // +optional + Context string `json:"context,omitempty" yaml:"context,omitempty"` + + // Domain is the domain name of the host cluster + // +optional + Domain string `json:"domain,omitempty" yaml:"domain,omitempty"` + + // SecretRef refers to the credentials needed to access the host cluster + // +optional + SecretRef *LocalSecretReference `json:"secretRef,omitempty" yaml:"secretRef,omitempty"` +} + +// Images defines the configuration related to images +type Images struct { + // ImagePullPolicy is the pull policy for images + // +optional + ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty" yaml:"imagePullPolicy,omitempty"` + + // ImagePullSecrets are the secrets used for pulling images + // +optional + ImagePullSecrets []string `json:"imagePullSecrets,omitempty" yaml:"imagePullSecrets,omitempty"` + + // KubeImageMirrorCountry is the country code for the Kubernetes image mirror + // +optional + KubeImageMirrorCountry string `json:"kubeImageMirrorCountry,omitempty" yaml:"kubeImageMirrorCountry,omitempty"` + + // KubeImageRegistry is the registry for Kubernetes images + // +optional + KubeImageRegistry string `json:"kubeImageRegistry,omitempty" yaml:"kubeImageRegistry,omitempty"` + + // KubeImageTag is the tag for Kubernetes images + // +optional + KubeImageTag string `json:"kubeImageTag,omitempty" yaml:"kubeImageTag,omitempty"` + + // PrivateRegistry is the private image registry + // +optional + PrivateRegistry *ImageRegistry `json:"privateRegistry,omitempty" yaml:"privateRegistry,omitempty"` +} + +// KarmadaComponents defines the configuration for all Karmada components +type KarmadaComponents struct { + // KarmadaAPIServer is the configuration for the Karmada API Server + // +optional + KarmadaAPIServer *KarmadaAPIServer `json:"karmadaAPIServer,omitempty" yaml:"karmadaAPIServer,omitempty"` + + // KarmadaAggregatedAPIServer is the configuration for the Karmada Aggregated API Server + // +optional + KarmadaAggregatedAPIServer *KarmadaAggregatedAPIServer `json:"karmadaAggregatedAPIServer,omitempty" yaml:"karmadaAggregatedAPIServer,omitempty"` + + // KubeControllerManager is the configuration for the Kube Controller Manager + // +optional + KubeControllerManager *KubeControllerManager `json:"kubeControllerManager,omitempty" yaml:"kubeControllerManager,omitempty"` + + // KarmadaControllerManager is the configuration for the Karmada Controller Manager + // +optional + KarmadaControllerManager *KarmadaControllerManager `json:"karmadaControllerManager,omitempty" yaml:"karmadaControllerManager,omitempty"` + + // KarmadaScheduler is the configuration for the Karmada Scheduler + // +optional + KarmadaScheduler *KarmadaScheduler `json:"karmadaScheduler,omitempty" yaml:"karmadaScheduler,omitempty"` + + // KarmadaWebhook is the configuration for the Karmada Webhook + // +optional + KarmadaWebhook *KarmadaWebhook `json:"karmadaWebhook,omitempty" yaml:"karmadaWebhook,omitempty"` +} + +// Networking defines network-related configuration +type Networking struct { + // Namespace is the Kubernetes namespace where Karmada is deployed + // +optional + Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty"` + + // Port is the port number for the Karmada API Server + // +optional + Port int32 `json:"port,omitempty" yaml:"port,omitempty"` +} + +// CommonSettings defines common settings for components +type CommonSettings struct { + // Image specifies the image to use for the component + Image `json:",inline" yaml:",inline"` + + // Replicas is the number of replicas for the component + // +optional + Replicas int32 `json:"replicas,omitempty" yaml:"replicas,omitempty"` + + // Resources defines resource requests and limits for the component + // +optional + Resources corev1.ResourceRequirements `json:"resources,omitempty" yaml:"resources,omitempty"` + + // NodeSelector defines node selection constraints + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty" yaml:"nodeSelector,omitempty"` + + // Tolerations define pod tolerations + // +optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty" yaml:"tolerations,omitempty"` + + // Affinity defines pod affinity rules + // +optional + Affinity *corev1.Affinity `json:"affinity,omitempty" yaml:"affinity,omitempty"` +} + +// Image defines image information +type Image struct { + // Repository is the repository for the image + // +optional + Repository string `json:"repository,omitempty" yaml:"repository,omitempty"` + + // Tag is the tag for the image + // +optional + Tag string `json:"tag,omitempty" yaml:"tag,omitempty"` +} + +// KarmadaAPIServer defines the configuration for the Karmada API Server +type KarmadaAPIServer struct { + CommonSettings `json:",inline" yaml:",inline"` + + // AdvertiseAddress is the address advertised by the API server + // +optional + AdvertiseAddress string `json:"advertiseAddress,omitempty" yaml:"advertiseAddress,omitempty"` + + // Networking configures network-related information + // +optional + Networking Networking `json:"networking,omitempty" yaml:"networking,omitempty"` + + // ServiceAnnotations are annotations added to the API server service + // +optional + ServiceAnnotations map[string]string `json:"serviceAnnotations,omitempty" yaml:"serviceAnnotations,omitempty"` +} + +// KarmadaAggregatedAPIServer defines the configuration for the Karmada Aggregated API Server +type KarmadaAggregatedAPIServer struct { + CommonSettings `json:",inline" yaml:",inline"` +} + +// KubeControllerManager defines the configuration for the Kube Controller Manager +type KubeControllerManager struct { + CommonSettings `json:",inline" yaml:",inline"` +} + +// KarmadaControllerManager defines the configuration for the Karmada Controller Manager +type KarmadaControllerManager struct { + CommonSettings `json:",inline" yaml:",inline"` +} + +// KarmadaScheduler defines the configuration for the Karmada Scheduler +type KarmadaScheduler struct { + CommonSettings `json:",inline" yaml:",inline"` +} + +// KarmadaWebhook defines the configuration for the Karmada Webhook +type KarmadaWebhook struct { + CommonSettings `json:",inline" yaml:",inline"` +} + +// LocalSecretReference is a reference to a secret within the same namespace +type LocalSecretReference struct { + // Name is the name of the referenced secret + Name string `json:"name,omitempty" yaml:"name,omitempty"` +} + +// ImageRegistry represents an image registry +type ImageRegistry struct { + // Registry is the hostname of the image registry + // +required + Registry string `json:"registry" yaml:"registry"` +} + +// GetImage generates the full image string in the format "Repository:Tag" +// by combining the image repository and tag fields. +func (i *Image) GetImage() string { + if i.Tag == "" || i.Repository == "" { + return "" + } + return i.Repository + ":" + i.Tag +} diff --git a/pkg/karmadactl/cmdinit/karmada/check.go b/pkg/karmadactl/cmdinit/karmada/check.go index 8d2690c1b866..1d068c3c2a30 100644 --- a/pkg/karmadactl/cmdinit/karmada/check.go +++ b/pkg/karmadactl/cmdinit/karmada/check.go @@ -29,7 +29,7 @@ import ( ) // WaitAPIServiceReady wait the api service condition true -func WaitAPIServiceReady(c *aggregator.Clientset, name string, timeout time.Duration) error { +func WaitAPIServiceReady(c aggregator.Interface, name string, timeout time.Duration) error { if err := wait.PollUntilContextTimeout(context.TODO(), time.Second, timeout, true, func(ctx context.Context) (done bool, err error) { apiService, e := c.ApiregistrationV1().APIServices().Get(ctx, name, metav1.GetOptions{}) if e != nil { diff --git a/pkg/karmadactl/cmdinit/karmada/check_test.go b/pkg/karmadactl/cmdinit/karmada/check_test.go new file mode 100644 index 000000000000..2d24add29cbe --- /dev/null +++ b/pkg/karmadactl/cmdinit/karmada/check_test.go @@ -0,0 +1,145 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package karmada + +import ( + "context" + "fmt" + "strings" + "testing" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" + aggregator "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset" + fakeAggregator "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/fake" +) + +func TestWaitAPIServiceReady(t *testing.T) { + aaAPIServiceName := "karmada-search" + tests := []struct { + name string + aaAPIServiceName string + client aggregator.Interface + timeout time.Duration + prep func(aggregator.Interface) error + wantErr bool + errMsg string + }{ + { + name: "WaitAPIServiceReady_AAAPIServiceDoesNotExist_Timeout", + aaAPIServiceName: aaAPIServiceName, + client: fakeAggregator.NewSimpleClientset(), + timeout: time.Millisecond * 50, + prep: func(aggregator.Interface) error { return nil }, + wantErr: true, + errMsg: "context deadline exceeded", + }, + { + name: "WaitAPIServiceReady_AAAPIServiceIsNotReady_Timeout", + aaAPIServiceName: aaAPIServiceName, + client: fakeAggregator.NewSimpleClientset(), + timeout: time.Millisecond * 100, + prep: func(client aggregator.Interface) error { + if _, err := createAAAPIService(client, aaAPIServiceName); err != nil { + return fmt.Errorf("failed to create %s aaAPIService, got: %v", aaAPIServiceName, err) + } + return nil + }, + wantErr: true, + errMsg: "context deadline exceeded", + }, + { + name: "WaitAPIServiceReady_AAAPIServiceIsReady_ItIsNowReadyToUse", + aaAPIServiceName: aaAPIServiceName, + client: fakeAggregator.NewSimpleClientset(), + timeout: time.Millisecond * 50, + prep: func(client aggregator.Interface) error { + if err := createAndMarkAAAPIServiceAvailable(client, aaAPIServiceName); err != nil { + return fmt.Errorf("failed to create and mark availability status of %s aaAPIService, got: %v", aaAPIServiceName, err) + } + return nil + }, + wantErr: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if err := test.prep(test.client); err != nil { + t.Fatalf("failed to prep before waiting for API service to be ready, got: %v", err) + } + err := WaitAPIServiceReady(test.client, test.aaAPIServiceName, test.timeout) + if err == nil && test.wantErr { + t.Fatal("expected an error, but got none") + } + if err != nil && !test.wantErr { + t.Errorf("unexpected error, got: %v", err) + } + if err != nil && test.wantErr && !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("expected error message %s to be in %s", test.errMsg, err.Error()) + } + }) + } +} + +// createAndMarkAAAPIServiceAvailable creates the specified AA APIService and then +// updates its condition status to "Available" by setting the condition status to "ConditionTrue". +// This function simplifies the combined process of creation and availability marking. +func createAndMarkAAAPIServiceAvailable(a aggregator.Interface, aaAPIServiceName string) error { + aaAPIServerCreated, err := createAAAPIService(a, aaAPIServiceName) + if err != nil { + return err + } + + return updateAAAPIServiceCondition( + aaAPIServerCreated, a, apiregistrationv1.Available, + apiregistrationv1.ConditionTrue, + ) +} + +// createAAAPIService creates a single APIService resource for the specified AA API +// using the provided aggregator client. It returns the created APIService object or an error +// if the creation fails. +func createAAAPIService(a aggregator.Interface, aaAPIServiceName string) (*apiregistrationv1.APIService, error) { + apiServiceCreated, err := a.ApiregistrationV1().APIServices().Create(context.TODO(), &apiregistrationv1.APIService{ + ObjectMeta: metav1.ObjectMeta{ + Name: aaAPIServiceName, + }, + }, metav1.CreateOptions{}) + if err != nil { + return nil, fmt.Errorf("failed to create api service, got error: %v", err) + } + return apiServiceCreated, nil +} + +// updateAAAPIServiceCondition updates the specified condition type and status +// for the provided APIService resource using the aggregator client. +// This function sets conditions like "Available" on the APIService to reflect its current state. +func updateAAAPIServiceCondition(service *apiregistrationv1.APIService, a aggregator.Interface, + conditionType apiregistrationv1.APIServiceConditionType, conditionStatus apiregistrationv1.ConditionStatus) error { + service.Status.Conditions = []apiregistrationv1.APIServiceCondition{ + { + Type: conditionType, + Status: conditionStatus, + }, + } + _, err := a.ApiregistrationV1().APIServices().UpdateStatus(context.TODO(), service, metav1.UpdateOptions{}) + if err != nil { + return fmt.Errorf("failed to update status of apiservice, got error: %v", err) + } + return nil +} diff --git a/pkg/karmadactl/cmdinit/karmada/deploy.go b/pkg/karmadactl/cmdinit/karmada/deploy.go index 6c8a751884e6..7af7791d3601 100644 --- a/pkg/karmadactl/cmdinit/karmada/deploy.go +++ b/pkg/karmadactl/cmdinit/karmada/deploy.go @@ -221,7 +221,7 @@ func crdPatchesResources(filename, caBundle string) ([]byte, error) { } // createCRDs create crd resource -func createCRDs(crdClient *clientset.Clientset, filename string) error { +func createCRDs(crdClient clientset.Interface, filename string) error { obj := apiextensionsv1.CustomResourceDefinition{} data, err := os.ReadFile(filename) if err != nil { @@ -252,7 +252,7 @@ func createCRDs(crdClient *clientset.Clientset, filename string) error { } // patchCRDs patch crd resource -func patchCRDs(crdClient *clientset.Clientset, caBundle, filename string) error { +func patchCRDs(crdClient clientset.Interface, caBundle, filename string) error { data, err := crdPatchesResources(filename, caBundle) if err != nil { return err diff --git a/pkg/karmadactl/cmdinit/karmada/rbac.go b/pkg/karmadactl/cmdinit/karmada/rbac.go index e4d51252cc32..7ee4281fee83 100644 --- a/pkg/karmadactl/cmdinit/karmada/rbac.go +++ b/pkg/karmadactl/cmdinit/karmada/rbac.go @@ -73,7 +73,7 @@ func grantAccessPermissionToAgent(clientSet kubernetes.Interface) error { { APIGroups: []string{"cluster.karmada.io"}, Resources: []string{"clusters"}, - Verbs: []string{"create", "get", "list", "watch", "patch", "update"}, + Verbs: []string{"create", "get", "list", "watch", "patch", "update", "delete"}, }, { APIGroups: []string{"cluster.karmada.io"}, diff --git a/pkg/karmadactl/cmdinit/kubernetes/deploy.go b/pkg/karmadactl/cmdinit/kubernetes/deploy.go index a3b5842caf97..cad1a9fce8af 100644 --- a/pkg/karmadactl/cmdinit/kubernetes/deploy.go +++ b/pkg/karmadactl/cmdinit/kubernetes/deploy.go @@ -36,6 +36,7 @@ import ( netutils "k8s.io/utils/net" "github.com/karmada-io/karmada/pkg/karmadactl/cmdinit/cert" + initConfig "github.com/karmada-io/karmada/pkg/karmadactl/cmdinit/config" "github.com/karmada-io/karmada/pkg/karmadactl/cmdinit/karmada" "github.com/karmada-io/karmada/pkg/karmadactl/cmdinit/options" "github.com/karmada-io/karmada/pkg/karmadactl/cmdinit/utils" @@ -134,6 +135,7 @@ type CommandInitOption struct { EtcdStorageMode string EtcdHostDataPath string EtcdNodeSelectorLabels string + EtcdNodeSelectorLabelsMap map[string]string EtcdPersistentVolumeSize string ExternalEtcdCACertPath string ExternalEtcdClientCertPath string @@ -173,6 +175,7 @@ type CommandInitOption struct { WaitComponentReadyTimeout int CaCertFile string CaKeyFile string + KarmadaInitFilePath string } func (i *CommandInitOption) validateLocalEtcd(parentCommand string) error { @@ -180,10 +183,6 @@ func (i *CommandInitOption) validateLocalEtcd(parentCommand string) error { return fmt.Errorf("when etcd storage mode is hostPath, dataPath is not empty. See '%s init --help'", parentCommand) } - if i.EtcdStorageMode == etcdStorageModeHostPath && i.EtcdNodeSelectorLabels != "" && utils.StringToMap(i.EtcdNodeSelectorLabels) == nil { - return fmt.Errorf("the label does not seem to be 'key=value'") - } - if i.EtcdStorageMode == etcdStorageModeHostPath && i.EtcdReplicas != 1 { return fmt.Errorf("for data security,when etcd storage mode is hostPath,etcd-replicas can only be 1") } @@ -222,6 +221,16 @@ func (i *CommandInitOption) isExternalEtcdProvided() bool { // Validate Check that there are enough flags to run the command. func (i *CommandInitOption) Validate(parentCommand string) error { + if i.KarmadaInitFilePath != "" { + cfg, err := initConfig.LoadInitConfiguration(i.KarmadaInitFilePath) + if err != nil { + return fmt.Errorf("failed to load karmada init configuration: %v", err) + } + if err := i.parseInitConfig(cfg); err != nil { + return fmt.Errorf("failed to parse karmada init configuration: %v", err) + } + } + if i.KarmadaAPIServerAdvertiseAddress != "" { if netutils.ParseIPSloppy(i.KarmadaAPIServerAdvertiseAddress) == nil { return fmt.Errorf("karmada apiserver advertise address is not valid") @@ -274,12 +283,18 @@ func (i *CommandInitOption) Complete() error { } klog.Infof("karmada apiserver ip: %s", i.KarmadaAPIServerIP) + if err := i.handleEtcdNodeSelectorLabels(); err != nil { + return err + } + if !i.isExternalEtcdProvided() && i.EtcdStorageMode == "hostPath" && i.EtcdNodeSelectorLabels != "" { - if !i.isNodeExist(i.EtcdNodeSelectorLabels) { - return fmt.Errorf("no node found by label %s", i.EtcdNodeSelectorLabels) + labels := strings.Split(i.EtcdNodeSelectorLabels, ",") + for _, label := range labels { + if !i.isNodeExist(label) { + return fmt.Errorf("no node found by label %s", label) + } } } - return initializeDirectory(i.KarmadaDataPath) } @@ -714,6 +729,21 @@ func (i *CommandInitOption) getImagePullSecrets() []corev1.LocalObjectReference return imagePullSecrets } +func (i *CommandInitOption) handleEtcdNodeSelectorLabels() error { + if i.EtcdStorageMode == etcdStorageModeHostPath && i.EtcdNodeSelectorLabels != "" { + selector, err := metav1.ParseToLabelSelector(i.EtcdNodeSelectorLabels) + if err != nil { + return fmt.Errorf("the etcdNodeSelector format is incorrect: %s", err) + } + labelMap, err := metav1.LabelSelectorAsMap(selector) + if err != nil { + return fmt.Errorf("failed to convert etcdNodeSelector labels to map: %v", err) + } + i.EtcdNodeSelectorLabelsMap = labelMap + } + return nil +} + func generateServerURL(serverIP string, nodePort int32) (string, error) { _, ipType, err := utils.ParseIP(serverIP) if err != nil { @@ -729,3 +759,225 @@ func generateServerURL(serverIP string, nodePort int32) (string, error) { func SupportedStorageMode() []string { return []string{etcdStorageModeEmptyDir, etcdStorageModeHostPath, etcdStorageModePVC} } + +// parseEtcdNodeSelectorLabelsMap parse etcd node selector labels +func (i *CommandInitOption) parseEtcdNodeSelectorLabelsMap() error { + if i.EtcdNodeSelectorLabels == "" { + return nil + } + // Parse the label selector string into a LabelSelector object + selector, err := metav1.ParseToLabelSelector(i.EtcdNodeSelectorLabels) + if err != nil { + return fmt.Errorf("the etcdNodeSelector format is incorrect: %s", err) + } + // Convert the LabelSelector object into a map[string]string + labelMap, err := metav1.LabelSelectorAsMap(selector) + if err != nil { + return fmt.Errorf("failed to convert etcdNodeSelector labels to map: %v", err) + } + i.EtcdNodeSelectorLabelsMap = labelMap + return nil +} + +// parseInitConfig parses fields from KarmadaInitConfig into CommandInitOption. +// It is responsible for delegating the parsing of various configuration sections, +// such as certificates, etcd, and control plane components. +func (i *CommandInitOption) parseInitConfig(cfg *initConfig.KarmadaInitConfig) error { + spec := cfg.Spec + + i.parseGeneralConfig(spec) + i.parseCertificateConfig(spec.Certificates) + i.parseEtcdConfig(spec.Etcd) + i.parseControlPlaneConfig(spec.Components) + + setIfNotEmpty(&i.KarmadaDataPath, spec.KarmadaDataPath) + setIfNotEmpty(&i.KarmadaPkiPath, spec.KarmadaPKIPath) + setIfNotEmpty(&i.HostClusterDomain, spec.HostCluster.Domain) + setIfNotEmpty(&i.CRDs, spec.KarmadaCRDs) + + return nil +} + +// parseGeneralConfig parses basic configuration related to the host cluster, +// such as namespace, kubeconfig, and image settings from the KarmadaInitConfigSpec. +func (i *CommandInitOption) parseGeneralConfig(spec initConfig.KarmadaInitSpec) { + setIfNotEmpty(&i.KubeConfig, spec.HostCluster.Kubeconfig) + setIfNotEmpty(&i.KubeImageTag, spec.Images.KubeImageTag) + setIfNotEmpty(&i.KubeImageRegistry, spec.Images.KubeImageRegistry) + setIfNotEmpty(&i.KubeImageMirrorCountry, spec.Images.KubeImageMirrorCountry) + + if spec.Images.PrivateRegistry != nil { + setIfNotEmpty(&i.ImageRegistry, spec.Images.PrivateRegistry.Registry) + } + setIfNotEmpty(&i.ImagePullPolicy, string(spec.Images.ImagePullPolicy)) + setIfNotEmpty(&i.Context, spec.HostCluster.Context) + + if len(spec.Images.ImagePullSecrets) != 0 { + i.PullSecrets = spec.Images.ImagePullSecrets + } + setIfNotZero(&i.WaitComponentReadyTimeout, spec.WaitComponentReadyTimeout) +} + +// parseCertificateConfig parses certificate-related configuration, including CA files, +// external DNS, and external IP from the Certificates configuration block. +func (i *CommandInitOption) parseCertificateConfig(certificates initConfig.Certificates) { + setIfNotEmpty(&i.CaKeyFile, certificates.CAKeyFile) + setIfNotEmpty(&i.CaCertFile, certificates.CACertFile) + + if len(certificates.ExternalDNS) > 0 { + i.ExternalDNS = joinStringSlice(certificates.ExternalDNS) + } + + if len(certificates.ExternalIP) > 0 { + i.ExternalIP = joinStringSlice(certificates.ExternalIP) + } + + if certificates.ValidityPeriod.Duration != 0 { + i.CertValidity = certificates.ValidityPeriod.Duration + } +} + +// parseEtcdConfig handles the parsing of both local and external Etcd configurations. +func (i *CommandInitOption) parseEtcdConfig(etcd initConfig.Etcd) { + if etcd.Local != nil { + i.parseLocalEtcdConfig(etcd.Local) + } else if etcd.External != nil { + i.parseExternalEtcdConfig(etcd.External) + } +} + +// parseLocalEtcdConfig parses the local Etcd settings, including image information, +// data path, PVC size, and node selector labels. +func (i *CommandInitOption) parseLocalEtcdConfig(localEtcd *initConfig.LocalEtcd) { + setIfNotEmpty(&i.EtcdImage, localEtcd.CommonSettings.Image.GetImage()) + setIfNotEmpty(&i.EtcdInitImage, localEtcd.InitImage.GetImage()) + setIfNotEmpty(&i.EtcdHostDataPath, localEtcd.DataPath) + setIfNotEmpty(&i.EtcdPersistentVolumeSize, localEtcd.PVCSize) + + if len(localEtcd.NodeSelectorLabels) != 0 { + i.EtcdNodeSelectorLabels = mapToString(localEtcd.NodeSelectorLabels) + } + + setIfNotEmpty(&i.EtcdStorageMode, localEtcd.StorageMode) + setIfNotEmpty(&i.StorageClassesName, localEtcd.StorageClassesName) + setIfNotZeroInt32(&i.EtcdReplicas, localEtcd.Replicas) +} + +// parseExternalEtcdConfig parses the external Etcd configuration, including CA file, +// client certificates, and endpoints. +func (i *CommandInitOption) parseExternalEtcdConfig(externalEtcd *initConfig.ExternalEtcd) { + setIfNotEmpty(&i.ExternalEtcdCACertPath, externalEtcd.CAFile) + setIfNotEmpty(&i.ExternalEtcdClientCertPath, externalEtcd.CertFile) + setIfNotEmpty(&i.ExternalEtcdClientKeyPath, externalEtcd.KeyFile) + + if len(externalEtcd.Endpoints) > 0 { + i.ExternalEtcdServers = strings.Join(externalEtcd.Endpoints, ",") + } + setIfNotEmpty(&i.ExternalEtcdKeyPrefix, externalEtcd.KeyPrefix) +} + +// parseControlPlaneConfig parses the configuration for various control plane components, +// including API Server, Controller Manager, Scheduler, and Webhook. +func (i *CommandInitOption) parseControlPlaneConfig(components initConfig.KarmadaComponents) { + i.parseKarmadaAPIServerConfig(components.KarmadaAPIServer) + i.parseKarmadaControllerManagerConfig(components.KarmadaControllerManager) + i.parseKarmadaSchedulerConfig(components.KarmadaScheduler) + i.parseKarmadaWebhookConfig(components.KarmadaWebhook) + i.parseKarmadaAggregatedAPIServerConfig(components.KarmadaAggregatedAPIServer) + i.parseKubeControllerManagerConfig(components.KubeControllerManager) +} + +// parseKarmadaAPIServerConfig parses the configuration for the Karmada API Server component, +// including image and replica settings, as well as advertise address. +func (i *CommandInitOption) parseKarmadaAPIServerConfig(apiServer *initConfig.KarmadaAPIServer) { + if apiServer != nil { + setIfNotZeroInt32(&i.KarmadaAPIServerNodePort, apiServer.Networking.Port) + setIfNotEmpty(&i.Namespace, apiServer.Networking.Namespace) + setIfNotEmpty(&i.KarmadaAPIServerImage, apiServer.CommonSettings.Image.GetImage()) + setIfNotZeroInt32(&i.KarmadaAPIServerReplicas, apiServer.CommonSettings.Replicas) + setIfNotEmpty(&i.KarmadaAPIServerAdvertiseAddress, apiServer.AdvertiseAddress) + } +} + +// parseKarmadaControllerManagerConfig parses the configuration for the Karmada Controller Manager, +// including image and replica settings. +func (i *CommandInitOption) parseKarmadaControllerManagerConfig(manager *initConfig.KarmadaControllerManager) { + if manager != nil { + setIfNotEmpty(&i.KarmadaControllerManagerImage, manager.CommonSettings.Image.GetImage()) + setIfNotZeroInt32(&i.KarmadaControllerManagerReplicas, manager.CommonSettings.Replicas) + } +} + +// parseKarmadaSchedulerConfig parses the configuration for the Karmada Scheduler, +// including image and replica settings. +func (i *CommandInitOption) parseKarmadaSchedulerConfig(scheduler *initConfig.KarmadaScheduler) { + if scheduler != nil { + setIfNotEmpty(&i.KarmadaSchedulerImage, scheduler.CommonSettings.Image.GetImage()) + setIfNotZeroInt32(&i.KarmadaSchedulerReplicas, scheduler.CommonSettings.Replicas) + } +} + +// parseKarmadaWebhookConfig parses the configuration for the Karmada Webhook, +// including image and replica settings. +func (i *CommandInitOption) parseKarmadaWebhookConfig(webhook *initConfig.KarmadaWebhook) { + if webhook != nil { + setIfNotEmpty(&i.KarmadaWebhookImage, webhook.CommonSettings.Image.GetImage()) + setIfNotZeroInt32(&i.KarmadaWebhookReplicas, webhook.CommonSettings.Replicas) + } +} + +// parseKarmadaAggregatedAPIServerConfig parses the configuration for the Karmada Aggregated API Server, +// including image and replica settings. +func (i *CommandInitOption) parseKarmadaAggregatedAPIServerConfig(aggregatedAPIServer *initConfig.KarmadaAggregatedAPIServer) { + if aggregatedAPIServer != nil { + setIfNotEmpty(&i.KarmadaAggregatedAPIServerImage, aggregatedAPIServer.CommonSettings.Image.GetImage()) + setIfNotZeroInt32(&i.KarmadaAggregatedAPIServerReplicas, aggregatedAPIServer.CommonSettings.Replicas) + } +} + +// parseKubeControllerManagerConfig parses the configuration for the Kube Controller Manager, +// including image and replica settings. +func (i *CommandInitOption) parseKubeControllerManagerConfig(manager *initConfig.KubeControllerManager) { + if manager != nil { + setIfNotEmpty(&i.KubeControllerManagerImage, manager.CommonSettings.Image.GetImage()) + setIfNotZeroInt32(&i.KubeControllerManagerReplicas, manager.CommonSettings.Replicas) + } +} + +// mapToString converts a map to a comma-separated key=value string. +func mapToString(m map[string]string) string { + var builder strings.Builder + for k, v := range m { + if builder.Len() > 0 { + builder.WriteString(",") + } + builder.WriteString(fmt.Sprintf("%s=%s", k, v)) + } + return builder.String() +} + +// setIfNotEmpty checks if the source string is not empty, and if so, assigns its value to the destination string. +func setIfNotEmpty(dest *string, src string) { + if src != "" { + *dest = src + } +} + +// setIfNotZero checks if the source integer is not zero, and if so, assigns its value to the destination integer. +func setIfNotZero(dest *int, src int) { + if src != 0 { + *dest = src + } +} + +// setIfNotZeroInt32 checks if the source int32 is not zero, and if so, assigns its value to the destination int32. +func setIfNotZeroInt32(dest *int32, src int32) { + if src != 0 { + *dest = src + } +} + +// joinStringSlice joins a slice of strings into a single string separated by commas. +func joinStringSlice(slice []string) string { + return strings.Join(slice, ",") +} diff --git a/pkg/karmadactl/cmdinit/kubernetes/deploy_test.go b/pkg/karmadactl/cmdinit/kubernetes/deploy_test.go index 5523a1f475a7..dba8ccb1584c 100644 --- a/pkg/karmadactl/cmdinit/kubernetes/deploy_test.go +++ b/pkg/karmadactl/cmdinit/kubernetes/deploy_test.go @@ -20,13 +20,16 @@ import ( "context" "net" "os" + "reflect" "testing" "time" + "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/fake" + "github.com/karmada-io/karmada/pkg/karmadactl/cmdinit/config" "github.com/karmada-io/karmada/pkg/karmadactl/cmdinit/utils" ) @@ -496,3 +499,237 @@ func TestKarmadaSchedulerImage(t *testing.T) { }) } } + +func TestCommandInitOption_parseEtcdNodeSelectorLabelsMap(t *testing.T) { + tests := []struct { + name string + opt CommandInitOption + wantErr bool + expected map[string]string + }{ + { + name: "Valid labels", + opt: CommandInitOption{ + EtcdNodeSelectorLabels: "kubernetes.io/os=linux,hello=world", + }, + wantErr: false, + expected: map[string]string{ + "kubernetes.io/os": "linux", + "hello": "world", + }, + }, + { + name: "Invalid labels without equal sign", + opt: CommandInitOption{ + EtcdNodeSelectorLabels: "invalidlabel", + }, + wantErr: true, + expected: nil, + }, + { + name: "Labels with extra spaces", + opt: CommandInitOption{ + EtcdNodeSelectorLabels: " key1 = value1 , key2=value2 ", + }, + wantErr: false, + expected: map[string]string{ + "key1": "value1", + "key2": "value2", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.opt.parseEtcdNodeSelectorLabelsMap() + if (err != nil) != tt.wantErr { + t.Errorf("parseEtcdNodeSelectorLabelsMap() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !tt.wantErr && !reflect.DeepEqual(tt.opt.EtcdNodeSelectorLabelsMap, tt.expected) { + t.Errorf("parseEtcdNodeSelectorLabelsMap() = %v, want %v", tt.opt.EtcdNodeSelectorLabelsMap, tt.expected) + } + }) + } +} + +func TestParseInitConfig(t *testing.T) { + cfg := &config.KarmadaInitConfig{ + Spec: config.KarmadaInitSpec{ + WaitComponentReadyTimeout: 200, + KarmadaDataPath: "/etc/karmada", + KarmadaPKIPath: "/etc/karmada/pki", + KarmadaCRDs: "https://github.com/karmada-io/karmada/releases/download/test/crds.tar.gz", + Certificates: config.Certificates{ + CACertFile: "/path/to/ca.crt", + CAKeyFile: "/path/to/ca.key", + ExternalDNS: []string{"dns1", "dns2"}, + ExternalIP: []string{"1.2.3.4", "5.6.7.8"}, + ValidityPeriod: metav1.Duration{Duration: parseDuration("8760h")}, + }, + Etcd: config.Etcd{ + Local: &config.LocalEtcd{ + CommonSettings: config.CommonSettings{ + Image: config.Image{ + Repository: "etcd-image", + Tag: "latest", + }, + Replicas: 3, + }, + InitImage: config.Image{ + Repository: "init-image", + Tag: "latest", + }, + DataPath: "/data/dir", + PVCSize: "5Gi", + NodeSelectorLabels: map[string]string{ + "key": "value", + }, + StorageClassesName: "fast", + StorageMode: "PVC", + }, + External: &config.ExternalEtcd{ + CAFile: "/etc/ssl/certs/ca-certificates.crt", + CertFile: "/path/to/certificate.pem", + KeyFile: "/path/to/privatekey.pem", + Endpoints: []string{"https://example.com:8443"}, + KeyPrefix: "ext-", + }, + }, + HostCluster: config.HostCluster{ + Kubeconfig: "/path/to/kubeconfig", + Context: "test-context", + Domain: "cluster.local", + }, + Images: config.Images{ + KubeImageTag: "v1.21.0", + KubeImageRegistry: "registry", + KubeImageMirrorCountry: "cn", + ImagePullPolicy: corev1.PullIfNotPresent, + ImagePullSecrets: []string{"secret1", "secret2"}, + PrivateRegistry: &config.ImageRegistry{ + Registry: "test-registry", + }, + }, + Components: config.KarmadaComponents{ + KarmadaAPIServer: &config.KarmadaAPIServer{ + CommonSettings: config.CommonSettings{ + Image: config.Image{ + Repository: "apiserver-image", + Tag: "latest", + }, + Replicas: 2, + }, + AdvertiseAddress: "192.168.1.1", + Networking: config.Networking{ + Namespace: "test-namespace", + Port: 32443, + }, + }, + KarmadaControllerManager: &config.KarmadaControllerManager{ + CommonSettings: config.CommonSettings{ + Image: config.Image{ + Repository: "controller-manager-image", + Tag: "latest", + }, + Replicas: 2, + }, + }, + KarmadaScheduler: &config.KarmadaScheduler{ + CommonSettings: config.CommonSettings{ + Image: config.Image{ + Repository: "scheduler-image", + Tag: "latest", + }, + Replicas: 2, + }, + }, + KarmadaWebhook: &config.KarmadaWebhook{ + CommonSettings: config.CommonSettings{ + Image: config.Image{ + Repository: "webhook-image", + Tag: "latest", + }, + Replicas: 2, + }, + }, + KarmadaAggregatedAPIServer: &config.KarmadaAggregatedAPIServer{ + CommonSettings: config.CommonSettings{ + Image: config.Image{ + Repository: "aggregated-apiserver-image", + Tag: "latest", + }, + Replicas: 2, + }, + }, + KubeControllerManager: &config.KubeControllerManager{ + CommonSettings: config.CommonSettings{ + Image: config.Image{ + Repository: "kube-controller-manager-image", + Tag: "latest", + }, + Replicas: 2, + }, + }, + }, + }, + } + + opt := &CommandInitOption{} + err := opt.parseInitConfig(cfg) + assert.NoError(t, err) + assert.Equal(t, "test-namespace", opt.Namespace) + assert.Equal(t, "/path/to/kubeconfig", opt.KubeConfig) + assert.Equal(t, "test-registry", opt.ImageRegistry) + assert.Equal(t, 200, opt.WaitComponentReadyTimeout) + assert.Equal(t, "dns1,dns2", opt.ExternalDNS) + assert.Equal(t, "1.2.3.4,5.6.7.8", opt.ExternalIP) + assert.Equal(t, parseDuration("8760h"), opt.CertValidity) + assert.Equal(t, "etcd-image:latest", opt.EtcdImage) + assert.Equal(t, "init-image:latest", opt.EtcdInitImage) + assert.Equal(t, "/data/dir", opt.EtcdHostDataPath) + assert.Equal(t, "5Gi", opt.EtcdPersistentVolumeSize) + assert.Equal(t, "key=value", opt.EtcdNodeSelectorLabels) + assert.Equal(t, "fast", opt.StorageClassesName) + assert.Equal(t, "PVC", opt.EtcdStorageMode) + assert.Equal(t, int32(3), opt.EtcdReplicas) + assert.Equal(t, "apiserver-image:latest", opt.KarmadaAPIServerImage) + assert.Equal(t, "192.168.1.1", opt.KarmadaAPIServerAdvertiseAddress) + assert.Equal(t, int32(2), opt.KarmadaAPIServerReplicas) + assert.Equal(t, "registry", opt.KubeImageRegistry) + assert.Equal(t, "cn", opt.KubeImageMirrorCountry) + assert.Equal(t, "IfNotPresent", opt.ImagePullPolicy) + assert.Equal(t, []string{"secret1", "secret2"}, opt.PullSecrets) + assert.Equal(t, "https://github.com/karmada-io/karmada/releases/download/test/crds.tar.gz", opt.CRDs) +} + +func TestParseInitConfig_MissingFields(t *testing.T) { + cfg := &config.KarmadaInitConfig{ + Spec: config.KarmadaInitSpec{ + Components: config.KarmadaComponents{ + KarmadaAPIServer: &config.KarmadaAPIServer{ + Networking: config.Networking{ + Namespace: "test-namespace", + }, + }, + }, + }, + } + + opt := &CommandInitOption{} + err := opt.parseInitConfig(cfg) + assert.NoError(t, err) + assert.Equal(t, "test-namespace", opt.Namespace) + assert.Empty(t, opt.KubeConfig) + assert.Empty(t, opt.KubeImageTag) +} + +// parseDuration parses a duration string and returns the corresponding time.Duration value. +// If the parsing fails, it returns a duration of 0. +func parseDuration(durationStr string) time.Duration { + duration, err := time.ParseDuration(durationStr) + if err != nil { + return 0 + } + return duration +} diff --git a/pkg/karmadactl/cmdinit/kubernetes/deployments.go b/pkg/karmadactl/cmdinit/kubernetes/deployments.go index 3c112b5c28b9..835ec04c263a 100644 --- a/pkg/karmadactl/cmdinit/kubernetes/deployments.go +++ b/pkg/karmadactl/cmdinit/kubernetes/deployments.go @@ -92,9 +92,6 @@ func (i *CommandInitOption) karmadaAPIServerContainerCommand() []string { fmt.Sprintf("--etcd-keyfile=%s/%s.key", karmadaCertsVolumeMountPath, options.EtcdClientCertAndKeyName), fmt.Sprintf("--etcd-servers=%s", etcdServers), "--bind-address=0.0.0.0", - fmt.Sprintf("--kubelet-client-certificate=%s/%s.crt", karmadaCertsVolumeMountPath, options.KarmadaCertAndKeyName), - fmt.Sprintf("--kubelet-client-key=%s/%s.key", karmadaCertsVolumeMountPath, options.KarmadaCertAndKeyName), - "--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname", "--disable-admission-plugins=StorageObjectInUseProtection,ServiceAccount", "--runtime-config=", fmt.Sprintf("--apiserver-count=%v", i.KarmadaAPIServerReplicas), @@ -452,8 +449,8 @@ func (i *CommandInitOption) makeKarmadaSchedulerDeployment() *appsv1.Deployment Command: []string{ "/bin/karmada-scheduler", "--kubeconfig=/etc/kubeconfig", - "--bind-address=0.0.0.0", - "--secure-port=10351", + "--metrics-bind-address=0.0.0.0:8080", + "--health-probe-bind-address=0.0.0.0:10351", "--enable-scheduler-estimator=true", "--leader-elect=true", "--scheduler-estimator-ca-file=/etc/karmada/pki/ca.crt", @@ -466,7 +463,7 @@ func (i *CommandInitOption) makeKarmadaSchedulerDeployment() *appsv1.Deployment Ports: []corev1.ContainerPort{ { Name: metricsPortName, - ContainerPort: 10351, + ContainerPort: 8080, Protocol: corev1.ProtocolTCP, }, }, @@ -591,10 +588,9 @@ func (i *CommandInitOption) makeKarmadaControllerManagerDeployment() *appsv1.Dep Command: []string{ "/bin/karmada-controller-manager", "--kubeconfig=/etc/kubeconfig", - "--bind-address=0.0.0.0", "--metrics-bind-address=:8080", + "--health-probe-bind-address=0.0.0.0:10357", "--cluster-status-update-frequency=10s", - "--secure-port=10357", fmt.Sprintf("--leader-elect-resource-namespace=%s", i.Namespace), "--v=4", }, diff --git a/pkg/karmadactl/cmdinit/kubernetes/statefulset.go b/pkg/karmadactl/cmdinit/kubernetes/statefulset.go index 77168712dabf..e74b79328879 100644 --- a/pkg/karmadactl/cmdinit/kubernetes/statefulset.go +++ b/pkg/karmadactl/cmdinit/kubernetes/statefulset.go @@ -28,7 +28,6 @@ import ( "k8s.io/utils/ptr" "github.com/karmada-io/karmada/pkg/karmadactl/cmdinit/options" - "github.com/karmada-io/karmada/pkg/karmadactl/cmdinit/utils" ) const ( @@ -300,11 +299,12 @@ func (i *CommandInitOption) makeETCDStatefulSet() *appsv1.StatefulSet { Volumes: *Volumes, } - if i.EtcdStorageMode == "hostPath" && i.EtcdNodeSelectorLabels != "" { - podSpec.NodeSelector = utils.StringToMap(i.EtcdNodeSelectorLabels) - } - if i.EtcdStorageMode == "hostPath" && i.EtcdNodeSelectorLabels == "" { - podSpec.NodeSelector = map[string]string{"karmada.io/etcd": ""} + if i.EtcdStorageMode == "hostPath" { + if i.EtcdNodeSelectorLabelsMap != nil { + podSpec.NodeSelector = i.EtcdNodeSelectorLabelsMap + } else { + podSpec.NodeSelector = map[string]string{"karmada.io/etcd": ""} + } } // InitContainers diff --git a/pkg/karmadactl/cmdinit/kubernetes/statefulset_test.go b/pkg/karmadactl/cmdinit/kubernetes/statefulset_test.go index ba5088edfed6..5cfa3e1b6a46 100644 --- a/pkg/karmadactl/cmdinit/kubernetes/statefulset_test.go +++ b/pkg/karmadactl/cmdinit/kubernetes/statefulset_test.go @@ -78,43 +78,55 @@ func TestCommandInitIOption_etcdInitContainerCommand(t *testing.T) { func TestCommandInitIOption_makeETCDStatefulSet(t *testing.T) { tests := []struct { - name string - opt CommandInitOption - expectedNSValue string - expectedNSLabel string + name string + opt CommandInitOption + expectedNSMap map[string]string }{ { - name: "EtcdStorageMode is etcdStorageModeHostPath, EtcdNodeSelectorLabels is set", + name: "EtcdStorageMode is etcdStorageModeHostPath, single valid EtcdNodeSelectorLabel", opt: CommandInitOption{ EtcdStorageMode: etcdStorageModeHostPath, Namespace: "karmada", StorageClassesName: "StorageClassesName", EtcdPersistentVolumeSize: "1024", EtcdNodeSelectorLabels: "label=value", + EtcdNodeSelectorLabelsMap: map[string]string{ + "label": "value", + }, + }, + expectedNSMap: map[string]string{ + "label": "value", }, - expectedNSValue: "value", - expectedNSLabel: "label", }, { - name: "EtcdStorageMode is etcdStorageModeHostPath, EtcdNodeSelectorLabels is not set", + name: "EtcdStorageMode is etcdStorageModeHostPath, multiple valid EtcdNodeSelectorLabels", opt: CommandInitOption{ EtcdStorageMode: etcdStorageModeHostPath, Namespace: "karmada", StorageClassesName: "StorageClassesName", EtcdPersistentVolumeSize: "1024", - EtcdNodeSelectorLabels: "", + EtcdNodeSelectorLabels: "label1=value1,label2=value2,kubernetes.io/os=linux", + EtcdNodeSelectorLabelsMap: map[string]string{ + "label1": "value1", + "label2": "value2", + "kubernetes.io/os": "linux", + }, + }, + expectedNSMap: map[string]string{ + "label1": "value1", + "label2": "value2", + "kubernetes.io/os": "linux", }, - expectedNSValue: "", - expectedNSLabel: "karmada.io/etcd", }, { name: "EtcdStorageMode is etcdStorageModePVC", opt: CommandInitOption{ - EtcdStorageMode: etcdStorageModePVC, - Namespace: "karmada", - StorageClassesName: "StorageClassesName", - EtcdPersistentVolumeSize: "1024", - EtcdNodeSelectorLabels: "", + EtcdStorageMode: etcdStorageModePVC, + Namespace: "karmada", + StorageClassesName: "StorageClassesName", + EtcdPersistentVolumeSize: "1024", + EtcdNodeSelectorLabels: "", + EtcdNodeSelectorLabelsMap: map[string]string{}, }, }, } @@ -128,8 +140,10 @@ func TestCommandInitIOption_makeETCDStatefulSet(t *testing.T) { } } else { nodeSelector := etcd.Spec.Template.Spec.NodeSelector - if val, ok := nodeSelector[tt.expectedNSLabel]; !ok || val != tt.expectedNSValue { - t.Errorf("CommandInitOption.makeETCDStatefulSet() returns wrong nodeSelector %v", nodeSelector) + for label, value := range tt.expectedNSMap { + if val, ok := nodeSelector[label]; !ok || val != value { + t.Errorf("CommandInitOption.makeETCDStatefulSet() returns wrong nodeSelector %v, expected %v=%v", nodeSelector, label, value) + } } if len(etcd.Spec.VolumeClaimTemplates) != 0 { diff --git a/pkg/karmadactl/cmdinit/utils/examples.go b/pkg/karmadactl/cmdinit/utils/examples.go index 3dec2cb6d1b7..486b92500b92 100644 --- a/pkg/karmadactl/cmdinit/utils/examples.go +++ b/pkg/karmadactl/cmdinit/utils/examples.go @@ -140,6 +140,8 @@ spec: - /bin/karmada-scheduler-estimator - --kubeconfig=/etc/{{member_cluster_name}}-kubeconfig - --cluster-name={{member_cluster_name}} + - --metrics-bind-address=0.0.0.0:8080 + - --health-probe-bind-address=0.0.0.0:10351 volumeMounts: - name: member-kubeconfig subPath: {{member_cluster_name}}-kubeconfig diff --git a/pkg/karmadactl/completion/completion.go b/pkg/karmadactl/completion/completion.go new file mode 100644 index 000000000000..92c4d4171bee --- /dev/null +++ b/pkg/karmadactl/completion/completion.go @@ -0,0 +1,143 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package completion + +import ( + "fmt" + "io" + + "github.com/spf13/cobra" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + "k8s.io/kubectl/pkg/util/i18n" + "k8s.io/kubectl/pkg/util/templates" +) + +const defaultBoilerPlate = ` +# Copyright 2024 The Karmada Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +` + +var ( + completionLong = templates.LongDesc(i18n.T(` + Output shell completion code for the specified shell (bash, zsh). + The shell code must be evaluated to provide interactive + completion of kubectl commands. This can be done by sourcing it from + the .bash_profile. + + Note for zsh users: zsh completions are only supported in versions of zsh >= 5.2.`)) + + completionExample = templates.Examples(i18n.T(` + # Installing bash completion on Linux + ## If bash-completion is not installed on Linux, install the 'bash-completion' package + 1. apt-get install bash-completion + 2. source /usr/share/bash-completion/bash_completion + ## Load the %[1]s completion code for bash into the current shell + source <(%[1]s completion bash) + ## Or, write bash completion code to a file and source it from .bash_profile + 1. %[1]s completion bash > ~/.kube/completion.bash.inc + 2. echo "source '$HOME/.kube/completion.bash.inc'" >> $HOME/.bash_profile + 3. source $HOME/.bash_profile + + # Load the %[1]s completion code for zsh into the current shell + source <(%[1]s completion zsh) + # Set the %[1]s completion code for zsh to autoload on startup + %[1]s completion zsh > "${fpath[1]}/%[1]s"`)) +) + +var ( + // TODO: support output shell completion code for more specified shell, like `fish` and `powershell`. + completionShells = map[string]func(out io.Writer, boilerPlate string, cmd *cobra.Command) error{ + "bash": runCompletionBash, + "zsh": runCompletionZsh, + } +) + +// NewCmdCompletion creates the `completion` command +func NewCmdCompletion(parentCommand string, out io.Writer, boilerPlate string) *cobra.Command { + var shells []string + for s := range completionShells { + shells = append(shells, s) + } + + cmd := &cobra.Command{ + Use: "completion SHELL", + DisableFlagsInUseLine: true, + Short: "Output shell completion code for the specified shell (bash, zsh)", + Long: completionLong, + Example: fmt.Sprintf(completionExample, parentCommand), + Run: func(cmd *cobra.Command, args []string) { + cmdutil.CheckErr(RunCompletion(out, boilerPlate, cmd, args)) + }, + ValidArgs: shells, + } + + return cmd +} + +// RunCompletion checks given arguments and executes command +func RunCompletion(out io.Writer, boilerPlate string, cmd *cobra.Command, args []string) error { + if len(args) == 0 { + return cmdutil.UsageErrorf(cmd, "Shell not specified.") + } + if len(args) > 1 { + return cmdutil.UsageErrorf(cmd, "Too many arguments. Expected only the shell type.") + } + run, found := completionShells[args[0]] + if !found { + return cmdutil.UsageErrorf(cmd, "Unsupported shell type %q.", args[0]) + } + + return run(out, boilerPlate, cmd.Parent()) +} + +func runCompletionBash(out io.Writer, boilerPlate string, cmd *cobra.Command) error { + if len(boilerPlate) == 0 { + boilerPlate = defaultBoilerPlate + } + if _, err := out.Write([]byte(boilerPlate)); err != nil { + return err + } + + return cmd.GenBashCompletionV2(out, true) +} + +func runCompletionZsh(out io.Writer, boilerPlate string, cmd *cobra.Command) error { + zshHead := fmt.Sprintf("#compdef %[1]s\ncompdef _%[1]s %[1]s\n", cmd.Name()) + if _, err := out.Write([]byte(zshHead)); err != nil { + return err + } + + if len(boilerPlate) == 0 { + boilerPlate = defaultBoilerPlate + } + if _, err := out.Write([]byte(boilerPlate)); err != nil { + return err + } + + return cmd.GenZshCompletion(out) +} diff --git a/pkg/karmadactl/cordon/cordon.go b/pkg/karmadactl/cordon/cordon.go index 7bc5cf7a8d28..955b3b04496b 100644 --- a/pkg/karmadactl/cordon/cordon.go +++ b/pkg/karmadactl/cordon/cordon.go @@ -33,6 +33,7 @@ import ( karmadaclientset "github.com/karmada-io/karmada/pkg/generated/clientset/versioned" "github.com/karmada-io/karmada/pkg/karmadactl/options" "github.com/karmada-io/karmada/pkg/karmadactl/util" + utilcomp "github.com/karmada-io/karmada/pkg/karmadactl/util/completion" ) var ( @@ -61,6 +62,7 @@ const ( // NewCmdCordon defines the `cordon` command that mark cluster as unschedulable. func NewCmdCordon(f util.Factory, parentCommand string) *cobra.Command { opts := CommandCordonOption{} + cmd := &cobra.Command{ Use: "cordon CLUSTER", Short: "Mark cluster as unschedulable", @@ -68,6 +70,7 @@ func NewCmdCordon(f util.Factory, parentCommand string) *cobra.Command { Example: fmt.Sprintf(cordonExample, parentCommand), SilenceUsage: true, DisableFlagsInUseLine: true, + ValidArgsFunction: utilcomp.SpecifiedResourceTypeAndNameCompletionFunc(f, []string{"cluster"}), RunE: func(_ *cobra.Command, args []string) error { if err := opts.Complete(args); err != nil { return err @@ -86,12 +89,14 @@ func NewCmdCordon(f util.Factory, parentCommand string) *cobra.Command { options.AddKubeConfigFlags(flags) flags.BoolVar(&opts.DryRun, "dry-run", false, "Run the command in dry-run mode, without making any server requests.") + utilcomp.RegisterCompletionFuncForKarmadaContextFlag(cmd) return cmd } // NewCmdUncordon defines the `uncordon` command that mark cluster as schedulable. func NewCmdUncordon(f util.Factory, parentCommand string) *cobra.Command { opts := CommandCordonOption{} + cmd := &cobra.Command{ Use: "uncordon CLUSTER", Short: "Mark cluster as schedulable", @@ -99,6 +104,7 @@ func NewCmdUncordon(f util.Factory, parentCommand string) *cobra.Command { Example: fmt.Sprintf(uncordonExample, parentCommand), SilenceUsage: true, DisableFlagsInUseLine: true, + ValidArgsFunction: utilcomp.SpecifiedResourceTypeAndNameCompletionFunc(f, []string{"cluster"}), RunE: func(_ *cobra.Command, args []string) error { if err := opts.Complete(args); err != nil { return err @@ -117,6 +123,7 @@ func NewCmdUncordon(f util.Factory, parentCommand string) *cobra.Command { options.AddKubeConfigFlags(flags) flags.BoolVar(&opts.DryRun, "dry-run", false, "Run the command in dry-run mode, without making any server requests.") + utilcomp.RegisterCompletionFuncForKarmadaContextFlag(cmd) return cmd } diff --git a/pkg/karmadactl/create/create.go b/pkg/karmadactl/create/create.go new file mode 100644 index 000000000000..ed34d3becd99 --- /dev/null +++ b/pkg/karmadactl/create/create.go @@ -0,0 +1,64 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package create + +import ( + "fmt" + + "github.com/spf13/cobra" + "k8s.io/cli-runtime/pkg/genericiooptions" + kubectlcreate "k8s.io/kubectl/pkg/cmd/create" + "k8s.io/kubectl/pkg/util/templates" + + "github.com/karmada-io/karmada/pkg/karmadactl/options" + "github.com/karmada-io/karmada/pkg/karmadactl/util" + utilcomp "github.com/karmada-io/karmada/pkg/karmadactl/util/completion" +) + +var ( + createLong = templates.LongDesc(` + Create a resource from a file or from stdin. + + JSON and YAML formats are accepted.`) + + createExample = templates.Examples(` + # Create a pod using the data in pod.json + %[1]s create -f ./pod.json + + # Create a pod based on the JSON passed into stdin + cat pod.json | %[1]s create -f - + + # Edit the data in registry.yaml in JSON then create the resource using the edited data + %[1]s create -f registry.yaml --edit -o json`) +) + +// NewCmdCreate returns new initialized instance of create sub command +func NewCmdCreate(f util.Factory, parentCommand string, ioStreams genericiooptions.IOStreams) *cobra.Command { + cmd := kubectlcreate.NewCmdCreate(f, ioStreams) + cmd.Long = fmt.Sprintf(createLong, parentCommand) + cmd.Example = fmt.Sprintf(createExample, parentCommand) + cmd.Annotations = map[string]string{ + util.TagCommandGroup: util.GroupBasic, + } + options.AddKubeConfigFlags(cmd.PersistentFlags()) + options.AddNamespaceFlag(cmd.PersistentFlags()) + + utilcomp.RegisterCompletionFuncForKarmadaContextFlag(cmd) + utilcomp.RegisterCompletionFuncForNamespaceFlag(cmd, f) + + return cmd +} diff --git a/pkg/karmadactl/delete/delete.go b/pkg/karmadactl/delete/delete.go new file mode 100644 index 000000000000..79179c4929e0 --- /dev/null +++ b/pkg/karmadactl/delete/delete.go @@ -0,0 +1,106 @@ +/* + Copyright 2024 The Karmada Authors. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package delete + +import ( + "fmt" + + "github.com/spf13/cobra" + "k8s.io/cli-runtime/pkg/genericiooptions" + kubectldelete "k8s.io/kubectl/pkg/cmd/delete" + "k8s.io/kubectl/pkg/util/templates" + + "github.com/karmada-io/karmada/pkg/karmadactl/options" + "github.com/karmada-io/karmada/pkg/karmadactl/util" + utilcomp "github.com/karmada-io/karmada/pkg/karmadactl/util/completion" +) + +var ( + deleteLong = templates.LongDesc(` + Delete resources by file names, stdin, resources and names, or by resources and label selector. + + JSON and YAML formats are accepted. Only one type of argument may be specified: file names, + resources and names, or resources and label selector. + + Some resources, such as pods, support graceful deletion. These resources define a default period + before they are forcibly terminated (the grace period) but you may override that value with + the --grace-period flag, or pass --now to set a grace-period of 1. Because these resources often + represent entities in the cluster, deletion may not be acknowledged immediately. If the node + hosting a pod is down or cannot reach the API server, termination may take significantly longer + than the grace period. To force delete a resource, you must specify the --force flag. + Note: only a subset of resources support graceful deletion. In absence of the support, + the --grace-period flag is ignored. + + IMPORTANT: Force deleting pods does not wait for confirmation that the pod's processes have been + terminated, which can leave those processes running until the node detects the deletion and + completes graceful deletion. If your processes use shared storage or talk to a remote API and + depend on the name of the pod to identify themselves, force deleting those pods may result in + multiple processes running on different machines using the same identification which may lead + to data corruption or inconsistency. Only force delete pods when you are sure the pod is + terminated, or if your application can tolerate multiple copies of the same pod running at once. + Also, if you force delete pods, the scheduler may place new pods on those nodes before the node + has released those resources and causing those pods to be evicted immediately. + + Note that the delete command does NOT do resource version checks, so if someone submits an + update to a resource right when you submit a delete, their update will be lost along with the + rest of the resource. + + After a CustomResourceDefinition is deleted, invalidation of discovery cache may take up + to 6 hours. If you don't want to wait, you might want to run "[1]%s api-resources" to refresh + the discovery cache.`) + + deleteExample = templates.Examples(` + # Delete a propagationpolicy using the type and name specified in propagationpolicy.json + [1]%s delete -f ./propagationpolicy.json + + # Delete resources from a directory containing kustomization.yaml - e.g. dir/kustomization.yaml + [1]%s delete -k dir + + # Delete resources from all files that end with '.json' + [1]%s delete -f '*.json' + + # Delete a propagationpolicy based on the type and name in the JSON passed into stdin + cat propagationpolicy.json | [1]%s delete -f - + + # Delete propagationpolicies and services with same names "baz" and "foo" + [1]%s delete propagationpolicy,service baz foo + + # Delete propagationpolicies and services with label name=myLabel + [1]%s delete propagationpolicies,services -l name=myLabel + + # Delete a propagationpolicy with minimal delay + [1]%s delete propagationpolicy foo --now + + # Force delete a propagationpolicy on a dead node + [1]%s delete propagationpolicy foo --force + + # Delete all propagationpolicies + [1]%s delete propagationpolicies --all`) +) + +// NewCmdDelete returns new initialized instance of delete sub command +func NewCmdDelete(f util.Factory, parentCommand string, ioStreams genericiooptions.IOStreams) *cobra.Command { + cmd := kubectldelete.NewCmdDelete(f, ioStreams) + cmd.Long = fmt.Sprintf(deleteLong, parentCommand) + cmd.Example = fmt.Sprintf(deleteExample, parentCommand) + cmd.Annotations = map[string]string{ + util.TagCommandGroup: util.GroupBasic, + } + options.AddKubeConfigFlags(cmd.Flags()) + options.AddNamespaceFlag(cmd.Flags()) + + utilcomp.RegisterCompletionFuncForKarmadaContextFlag(cmd) + utilcomp.RegisterCompletionFuncForNamespaceFlag(cmd, f) + return cmd +} diff --git a/pkg/karmadactl/describe/describe.go b/pkg/karmadactl/describe/describe.go index eefe9ab92c47..b2c2ce3788b3 100644 --- a/pkg/karmadactl/describe/describe.go +++ b/pkg/karmadactl/describe/describe.go @@ -26,6 +26,7 @@ import ( "github.com/karmada-io/karmada/pkg/karmadactl/options" "github.com/karmada-io/karmada/pkg/karmadactl/util" + utilcomp "github.com/karmada-io/karmada/pkg/karmadactl/util/completion" ) var ( @@ -74,6 +75,7 @@ func NewCmdDescribe(f util.Factory, parentCommand string, streams genericiooptio Long: fmt.Sprintf(describeLong, parentCommand), SilenceUsage: true, DisableFlagsInUseLine: true, + ValidArgsFunction: utilcomp.ResourceTypeAndNameCompletionFunc(f), Example: fmt.Sprintf(describeExample, parentCommand), RunE: func(_ *cobra.Command, args []string) error { if err := o.Complete(f, args, kubedescribeFlags, parentCommand); err != nil { @@ -96,11 +98,15 @@ func NewCmdDescribe(f util.Factory, parentCommand string, streams genericiooptio kubedescribeFlags.AddFlags(cmd) options.AddKubeConfigFlags(flags) + options.AddNamespaceFlag(flags) o.OperationScope = options.KarmadaControlPlane - flags.Var(&o.OperationScope, "operation-scope", "Used to control the operation scope of the command. The optional values are karmada and members. Defaults to karmada.") - flags.StringVarP(options.DefaultConfigFlags.Namespace, "namespace", "n", *options.DefaultConfigFlags.Namespace, "If present, the namespace scope for this CLI request") + flags.VarP(&o.OperationScope, "operation-scope", "s", "Used to control the operation scope of the command. The optional values are karmada and members. Defaults to karmada.") flags.StringVarP(&o.Cluster, "cluster", "C", "", "Used to specify a target member cluster and only takes effect when the command's operation scope is members, for example: --operation-scope=members --cluster=member1") + utilcomp.RegisterCompletionFuncForKarmadaContextFlag(cmd) + utilcomp.RegisterCompletionFuncForNamespaceFlag(cmd, f) + utilcomp.RegisterCompletionFuncForOperationScopeFlag(cmd, options.KarmadaControlPlane, options.Members) + utilcomp.RegisterCompletionFuncForClusterFlag(cmd) return cmd } diff --git a/pkg/karmadactl/edit/edit.go b/pkg/karmadactl/edit/edit.go new file mode 100644 index 000000000000..56ec6fd64c1d --- /dev/null +++ b/pkg/karmadactl/edit/edit.go @@ -0,0 +1,64 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package edit + +import ( + "fmt" + + "github.com/spf13/cobra" + "k8s.io/cli-runtime/pkg/genericiooptions" + kubectledit "k8s.io/kubectl/pkg/cmd/edit" + "k8s.io/kubectl/pkg/util/i18n" + "k8s.io/kubectl/pkg/util/templates" + + "github.com/karmada-io/karmada/pkg/karmadactl/options" + "github.com/karmada-io/karmada/pkg/karmadactl/util" + utilcomp "github.com/karmada-io/karmada/pkg/karmadactl/util/completion" +) + +var ( + editExample = templates.Examples(i18n.T(` + # Edit the service named 'registry' + %[1]s edit svc/registry + + # Use an alternative editor + KUBE_EDITOR="nano" %[1]s edit svc/registry + + # Edit the job 'myjob' in JSON using the v1 API format + %[1]s edit job.v1.batch/myjob -o json + + # Edit the deployment 'mydeployment' in YAML and save the modified config in its annotation + %[1]s edit deployment/mydeployment -o yaml --save-config + + # Edit the 'status' subresource for the 'mydeployment' deployment + %[1]s edit deployment mydeployment --subresource='status'`)) +) + +// NewCmdEdit returns new initialized instance of edit sub command +func NewCmdEdit(f util.Factory, parentCommand string, ioStreams genericiooptions.IOStreams) *cobra.Command { + cmd := kubectledit.NewCmdEdit(f, ioStreams) + cmd.Example = fmt.Sprintf(editExample, parentCommand) + cmd.Annotations = map[string]string{ + util.TagCommandGroup: util.GroupBasic, + } + options.AddKubeConfigFlags(cmd.Flags()) + options.AddNamespaceFlag(cmd.Flags()) + + utilcomp.RegisterCompletionFuncForKarmadaContextFlag(cmd) + utilcomp.RegisterCompletionFuncForNamespaceFlag(cmd, f) + return cmd +} diff --git a/pkg/karmadactl/exec/exec.go b/pkg/karmadactl/exec/exec.go index 0df1be5d62d6..3cd9624b89b6 100644 --- a/pkg/karmadactl/exec/exec.go +++ b/pkg/karmadactl/exec/exec.go @@ -28,6 +28,7 @@ import ( "github.com/karmada-io/karmada/pkg/karmadactl/options" "github.com/karmada-io/karmada/pkg/karmadactl/util" + utilcomp "github.com/karmada-io/karmada/pkg/karmadactl/util/completion" ) const ( @@ -80,6 +81,7 @@ func NewCmdExec(f util.Factory, parentCommand string, streams genericiooptions.I SilenceUsage: true, DisableFlagsInUseLine: true, Example: fmt.Sprintf(execExample, parentCommand), + ValidArgsFunction: utilcomp.PodResourceNameCompletionFunc(f), RunE: func(cmd *cobra.Command, args []string) error { argsLenAtDash := cmd.ArgsLenAtDash() if err := o.Complete(f, cmd, args, argsLenAtDash); err != nil { @@ -101,7 +103,7 @@ func NewCmdExec(f util.Factory, parentCommand string, streams genericiooptions.I o.OperationScope = options.KarmadaControlPlane flags := cmd.Flags() options.AddKubeConfigFlags(flags) - flags.StringVarP(options.DefaultConfigFlags.Namespace, "namespace", "n", *options.DefaultConfigFlags.Namespace, "If present, the namespace scope for this CLI request") + options.AddNamespaceFlag(flags) cmdutil.AddPodRunningTimeoutFlag(cmd, defaultPodExecTimeout) cmdutil.AddJsonFilenameFlag(flags, &o.KubectlExecOptions.FilenameOptions.Filenames, "to use to exec into the resource") cmdutil.AddContainerVarFlags(cmd, &o.KubectlExecOptions.ContainerName, o.KubectlExecOptions.ContainerName) @@ -109,8 +111,15 @@ func NewCmdExec(f util.Factory, parentCommand string, streams genericiooptions.I flags.BoolVarP(&o.KubectlExecOptions.Stdin, "stdin", "i", o.KubectlExecOptions.Stdin, "Pass stdin to the container") flags.BoolVarP(&o.KubectlExecOptions.TTY, "tty", "t", o.KubectlExecOptions.TTY, "Stdin is a TTY") flags.BoolVarP(&o.KubectlExecOptions.Quiet, "quiet", "q", o.KubectlExecOptions.Quiet, "Only print output from the remote session") - flags.Var(&o.OperationScope, "operation-scope", "Used to control the operation scope of the command. The optional values are karmada and members. Defaults to karmada.") + flags.VarP(&o.OperationScope, "operation-scope", "s", "Used to control the operation scope of the command. The optional values are karmada and members. Defaults to karmada.") flags.StringVar(&o.Cluster, "cluster", "", "Used to specify a target member cluster and only takes effect when the command's operation scope is members, for example: --operation-scope=members --cluster=member1") + + cmdutil.CheckErr(cmd.RegisterFlagCompletionFunc("container", utilcomp.ContainerCompletionFunc(f))) + utilcomp.RegisterCompletionFuncForKarmadaContextFlag(cmd) + utilcomp.RegisterCompletionFuncForNamespaceFlag(cmd, f) + utilcomp.RegisterCompletionFuncForOperationScopeFlag(cmd, options.KarmadaControlPlane, options.Members) + utilcomp.RegisterCompletionFuncForClusterFlag(cmd) + return cmd } diff --git a/pkg/karmadactl/explain/explain.go b/pkg/karmadactl/explain/explain.go new file mode 100644 index 000000000000..3f2848a4fc9e --- /dev/null +++ b/pkg/karmadactl/explain/explain.go @@ -0,0 +1,139 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package explain + +import ( + "fmt" + + "github.com/spf13/cobra" + "k8s.io/cli-runtime/pkg/genericiooptions" + kubectlexplain "k8s.io/kubectl/pkg/cmd/explain" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + "k8s.io/kubectl/pkg/util/templates" + + "github.com/karmada-io/karmada/pkg/karmadactl/options" + "github.com/karmada-io/karmada/pkg/karmadactl/util" + utilcomp "github.com/karmada-io/karmada/pkg/karmadactl/util/completion" +) + +var ( + explainLong = templates.LongDesc(` + Describe fields and structure of various resources in Karmada control plane or a member cluster. + + This command describes the fields associated with each supported API resource. + Fields are identified via a simple JSONPath identifier: + + .[.] + + Information about each field is retrieved from the server in OpenAPI format.`) + + explainExamples = templates.Examples(` + # Get the documentation of the resource and its fields in Karmada control plane + %[1]s explain propagationpolicies + + # Get all the fields in the resource in member cluster member1 + %[1]s explain pods --recursive --operation-scope=members --cluster=member1 + + # Get the explanation for resourcebindings in supported api versions in Karmada control plane + %[1]s explain resourcebindings --api-version=work.karmada.io/v1alpha1 + + # Get the documentation of a specific field of a resource in member cluster member1 + %[1]s explain pods.spec.containers --operation-scope=members --cluster=member1 + + # Get the documentation of resources in different format in Karmada control plane + %[1]s explain clusterpropagationpolicies --output=plaintext-openapiv2`) + plaintextTemplateName = "plaintext" +) + +// NewCmdExplain new explain command. +func NewCmdExplain(f util.Factory, parentCommand string, streams genericiooptions.IOStreams) *cobra.Command { + var o CommandExplainOptions + o.ExplainOptions = kubectlexplain.NewExplainOptions(parentCommand, streams) + + cmd := &cobra.Command{ + Use: "explain TYPE [--recursive=FALSE|TRUE] [--api-version=api-version-group] [--output=plaintext|plaintext-openapiv2] ", + DisableFlagsInUseLine: true, + Short: "Get documentation for a resource", + Long: fmt.Sprintf(explainLong, parentCommand), + Example: fmt.Sprintf(explainExamples, parentCommand), + Run: func(cmd *cobra.Command, args []string) { + cmdutil.CheckErr(o.Complete(f, cmd, args)) + cmdutil.CheckErr(o.Validate()) + cmdutil.CheckErr(o.Run()) + }, + Annotations: map[string]string{ + util.TagCommandGroup: util.GroupBasic, + }, + } + + flags := cmd.Flags() + o.OperationScope = options.KarmadaControlPlane + options.AddKubeConfigFlags(flags) + options.AddNamespaceFlag(flags) + flags.VarP(&o.OperationScope, "operation-scope", "s", "Used to control the operation scope of the command. The optional values are karmada and members. Defaults to karmada.") + flags.BoolVar(&o.Recursive, "recursive", o.Recursive, "When true, print the name of all the fields recursively. Otherwise, print the available fields with their description.") + flags.StringVar(&o.APIVersion, "api-version", o.APIVersion, "Use given api-version (group/version) of the resource.") + + // Only enable --output as a valid flag if the feature is enabled + flags.StringVar(&o.OutputFormat, "output", plaintextTemplateName, "Format in which to render the schema. Valid values are: (plaintext, plaintext-openapiv2).") + flags.StringVar(&o.Cluster, "cluster", "", "Used to specify a target member cluster and only takes effect when the command's operation scope is member clusters, for example: --operation-scope=all --cluster=member1") + + utilcomp.RegisterCompletionFuncForKarmadaContextFlag(cmd) + utilcomp.RegisterCompletionFuncForNamespaceFlag(cmd, f) + utilcomp.RegisterCompletionFuncForOperationScopeFlag(cmd, options.KarmadaControlPlane, options.Members) + utilcomp.RegisterCompletionFuncForClusterFlag(cmd) + return cmd +} + +// CommandExplainOptions contains the input to the explain command. +type CommandExplainOptions struct { + // flags specific to explain + *kubectlexplain.ExplainOptions + Cluster string + OperationScope options.OperationScope +} + +// Complete ensures that options are valid and marshals them if necessary +func (o *CommandExplainOptions) Complete(f util.Factory, cmd *cobra.Command, args []string) error { + var explainFactory cmdutil.Factory = f + if o.OperationScope == options.Members && len(o.Cluster) != 0 { + memberFactory, err := f.FactoryForMemberCluster(o.Cluster) + if err != nil { + return err + } + explainFactory = memberFactory + } + + return o.ExplainOptions.Complete(explainFactory, cmd, args) +} + +// Validate checks that the provided explain options are specified +func (o *CommandExplainOptions) Validate() error { + err := options.VerifyOperationScopeFlags(o.OperationScope, options.KarmadaControlPlane, options.Members) + if err != nil { + return err + } + if o.OperationScope == options.Members && len(o.Cluster) == 0 { + return fmt.Errorf("must specify a member cluster") + } + return o.ExplainOptions.Validate() +} + +// Run executes the appropriate steps to print a model's documentation +func (o *CommandExplainOptions) Run() error { + return o.ExplainOptions.Run() +} diff --git a/pkg/karmadactl/get/get.go b/pkg/karmadactl/get/get.go index edf66affd92d..5f8156220184 100644 --- a/pkg/karmadactl/get/get.go +++ b/pkg/karmadactl/get/get.go @@ -130,13 +130,13 @@ func NewCmdGet(f util.Factory, parentCommand string, streams genericiooptions.IO DisableFlagsInUseLine: true, Example: fmt.Sprintf(getExample, parentCommand), RunE: func(cmd *cobra.Command, args []string) error { - if err := o.Complete(f); err != nil { + if err := o.Complete(f, cmd); err != nil { return err } if err := o.Validate(cmd); err != nil { return err } - if err := o.Run(f, cmd, args); err != nil { + if err := o.Run(f, args); err != nil { return err } return nil @@ -149,9 +149,9 @@ func NewCmdGet(f util.Factory, parentCommand string, streams genericiooptions.IO o.PrintFlags.AddFlags(cmd) flags := cmd.Flags() options.AddKubeConfigFlags(flags) + options.AddNamespaceFlag(flags) o.OperationScope = options.KarmadaControlPlane - flags.Var(&o.OperationScope, "operation-scope", "Used to control the operation scope of the command. The optional values are karmada, members, and all. Defaults to karmada.") - flags.StringVarP(options.DefaultConfigFlags.Namespace, "namespace", "n", *options.DefaultConfigFlags.Namespace, "If present, the namespace scope for this CLI request") + flags.VarP(&o.OperationScope, "operation-scope", "s", "Used to control the operation scope of the command. The optional values are karmada, members, and all. Defaults to karmada.") flags.StringVarP(&o.LabelSelector, "labels", "l", "", "-l=label or -l label") flags.StringSliceVarP(&o.Clusters, "clusters", "C", []string{}, "Used to specify target member clusters and only takes effect when the command's operation scope is members or all, for example: --operation-scope=all --clusters=member1,member2") flags.BoolVarP(&o.AllNamespaces, "all-namespaces", "A", o.AllNamespaces, "If present, list the requested object(s) across all namespaces. Namespace in current context is ignored even if specified with --namespace.") @@ -167,7 +167,7 @@ func NewCmdGet(f util.Factory, parentCommand string, streams genericiooptions.IO type CommandGetOptions struct { Clusters []string OperationScope options.OperationScope - targetMemberClusters []string + TargetMemberClusters []string PrintFlags *get.PrintFlags ToPrinter func(*meta.RESTMapping, *bool, bool, bool) (printers.ResourcePrinterFunc, error) @@ -198,7 +198,7 @@ type CommandGetOptions struct { genericiooptions.IOStreams - karmadaClient karmadaclientset.Interface + KarmadaClient karmadaclientset.Interface } // NewCommandGetOptions returns a CommandGetOptions with default chunk size 500. @@ -212,9 +212,7 @@ func NewCommandGetOptions(streams genericiooptions.IOStreams) *CommandGetOptions } // Complete takes the command arguments and infers any remaining options. -func (g *CommandGetOptions) Complete(f util.Factory) error { - newScheme := gclient.NewSchema() - +func (g *CommandGetOptions) Complete(f util.Factory, cmd *cobra.Command) error { err := g.handleNamespaceScopeFlags(f) if err != nil { return err @@ -225,12 +223,54 @@ func (g *CommandGetOptions) Complete(f util.Factory) error { templateArg = *g.PrintFlags.TemplateFlags.TemplateArgument } + outputOption := cmd.Flags().Lookup("output").Value.String() + if strings.Contains(outputOption, "custom-columns") || outputOption == "yaml" || strings.Contains(outputOption, "json") { + g.ServerPrint = false + } + // human readable printers have special conversion rules, so we determine if we're using one. if (len(*g.PrintFlags.OutputFormat) == 0 && len(templateArg) == 0) || *g.PrintFlags.OutputFormat == "wide" { g.IsHumanReadablePrinter = true } - g.ToPrinter = func(mapping *meta.RESTMapping, outputObjects *bool, withNamespace bool, withKind bool) (printers.ResourcePrinterFunc, error) { + g.ToPrinter = g.getResourcePrinter() + karmadaClient, err := f.KarmadaClientSet() + if err != nil { + return err + } + g.KarmadaClient = karmadaClient + return g.HandleClusterScopeFlags() +} + +// Validate checks the set of flags provided by the user. +func (g *CommandGetOptions) Validate(cmd *cobra.Command) error { + if cmdutil.GetFlagBool(cmd, "show-labels") { + outputOption := cmd.Flags().Lookup("output").Value.String() + if outputOption != "" && outputOption != "wide" { + return fmt.Errorf("--show-labels option cannot be used with %s printer", outputOption) + } + } + if g.OutputWatchEvents && !(g.Watch || g.WatchOnly) { + return fmt.Errorf("--output-watch-events option can only be used with --watch or --watch-only") + } + + if err := options.VerifyOperationScopeFlags(g.OperationScope); err != nil { + return err + } + + if options.ContainMembersScope(g.OperationScope) && len(g.Clusters) > 0 { + clusters, err := g.KarmadaClient.ClusterV1alpha1().Clusters().List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return err + } + return util.VerifyClustersExist(g.Clusters, clusters) + } + return nil +} + +func (g *CommandGetOptions) getResourcePrinter() func(mapping *meta.RESTMapping, outputObjects *bool, withNamespace bool, withKind bool) (printers.ResourcePrinterFunc, error) { + newScheme := gclient.NewSchema() + return func(mapping *meta.RESTMapping, outputObjects *bool, withNamespace bool, withKind bool) (printers.ResourcePrinterFunc, error) { // make a new copy of current flags / opts before mutating printFlags := g.PrintFlags.Copy() @@ -263,51 +303,20 @@ func (g *CommandGetOptions) Complete(f util.Factory) error { return printer.PrintObj, nil } - karmadaClient, err := f.KarmadaClientSet() - if err != nil { - return err - } - g.karmadaClient = karmadaClient - return g.handleClusterScopeFlags() -} - -// Validate checks the set of flags provided by the user. -func (g *CommandGetOptions) Validate(cmd *cobra.Command) error { - if cmdutil.GetFlagBool(cmd, "show-labels") { - outputOption := cmd.Flags().Lookup("output").Value.String() - if outputOption != "" && outputOption != "wide" { - return fmt.Errorf("--show-labels option cannot be used with %s printer", outputOption) - } - } - if g.OutputWatchEvents && !(g.Watch || g.WatchOnly) { - return fmt.Errorf("--output-watch-events option can only be used with --watch or --watch-only") - } - - if err := options.VerifyOperationScopeFlags(g.OperationScope); err != nil { - return err - } - - if options.ContainMembersScope(g.OperationScope) && len(g.Clusters) > 0 { - clusters, err := g.karmadaClient.ClusterV1alpha1().Clusters().List(context.TODO(), metav1.ListOptions{}) - if err != nil { - return err - } - return util.VerifyClustersExist(g.Clusters, clusters) - } - return nil } -func (g *CommandGetOptions) handleClusterScopeFlags() error { +// HandleClusterScopeFlags used to handle flags related to cluster scope. +func (g *CommandGetOptions) HandleClusterScopeFlags() error { var err error switch g.OperationScope { case options.KarmadaControlPlane: - g.targetMemberClusters = []string{} + g.TargetMemberClusters = []string{} case options.Members, options.All: if len(g.Clusters) == 0 { - g.targetMemberClusters, err = LoadRegisteredClusters(g.karmadaClient) + g.TargetMemberClusters, err = LoadRegisteredClusters(g.KarmadaClient) return err } - g.targetMemberClusters = g.Clusters + g.TargetMemberClusters = g.Clusters return nil } return nil @@ -339,7 +348,7 @@ type WatchObj struct { } // Run performs the get operation. -func (g *CommandGetOptions) Run(f util.Factory, cmd *cobra.Command, args []string) error { +func (g *CommandGetOptions) Run(f util.Factory, args []string) error { mux := sync.Mutex{} var wg sync.WaitGroup @@ -347,24 +356,19 @@ func (g *CommandGetOptions) Run(f util.Factory, cmd *cobra.Command, args []strin var watchObjs []WatchObj var allErrs []error - outputOption := cmd.Flags().Lookup("output").Value.String() - if strings.Contains(outputOption, "custom-columns") || outputOption == "yaml" || strings.Contains(outputOption, "json") { - g.ServerPrint = false - } - if options.ContainKarmadaScope(g.OperationScope) { g.getObjInfo(&mux, f, "Karmada", true, &objs, &watchObjs, &allErrs, args) } - if len(g.targetMemberClusters) != 0 { - wg.Add(len(g.targetMemberClusters)) - for idx := range g.targetMemberClusters { - memberFactory, err := f.FactoryForMemberCluster(g.targetMemberClusters[idx]) + if len(g.TargetMemberClusters) != 0 { + wg.Add(len(g.TargetMemberClusters)) + for idx := range g.TargetMemberClusters { + memberFactory, err := f.FactoryForMemberCluster(g.TargetMemberClusters[idx]) if err != nil { return err } go func() { - g.getObjInfo(&mux, memberFactory, g.targetMemberClusters[idx], false, &objs, &watchObjs, &allErrs, args) + g.getObjInfo(&mux, memberFactory, g.TargetMemberClusters[idx], false, &objs, &watchObjs, &allErrs, args) wg.Done() }() } @@ -476,7 +480,7 @@ func (g *CommandGetOptions) printIfNotFindResource(written int, allErrs *[]error if written != 0 || g.IgnoreNotFound || len(*allErrs) != 0 { return } - if !options.ContainKarmadaScope(g.OperationScope) && len(g.targetMemberClusters) == 0 { + if !options.ContainKarmadaScope(g.OperationScope) && len(g.TargetMemberClusters) == 0 { fmt.Fprintln(g.ErrOut, "No member Clusters found in Karmada control plane") return } diff --git a/pkg/karmadactl/interpret/interpret.go b/pkg/karmadactl/interpret/interpret.go index 047727177e16..6122c24b33aa 100644 --- a/pkg/karmadactl/interpret/interpret.go +++ b/pkg/karmadactl/interpret/interpret.go @@ -34,6 +34,7 @@ import ( workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2" "github.com/karmada-io/karmada/pkg/karmadactl/options" "github.com/karmada-io/karmada/pkg/karmadactl/util" + utilcomp "github.com/karmada-io/karmada/pkg/karmadactl/util/completion" "github.com/karmada-io/karmada/pkg/karmadactl/util/genericresource" "github.com/karmada-io/karmada/pkg/util/gclient" "github.com/karmada-io/karmada/pkg/util/helper" @@ -44,13 +45,13 @@ var ( interpretLong = templates.LongDesc(` Validate, test and edit interpreter customization before applying it to the control plane. - 1. Validate the ResourceInterpreterCustomization configuration as per API schema + 1. Validate the ResourceInterpreterCustomization configuration as per API schema and try to load the scripts for syntax check. - 2. Run the rules locally and test if the result is expected. Similar to the dry run. + 2. Run the rules locally and test if the result is expected. Similar to the dry run. 3. Edit customization. Similar to the kubectl edit. -`) + `) interpretExample = templates.Examples(` # Check the customizations in file @@ -82,7 +83,7 @@ var ( # Edit customization %[1]s interpret -f customization.yml --edit -`) + `) ) const ( @@ -131,6 +132,7 @@ func NewCmdInterpret(f util.Factory, parentCommand string, streams genericioopti cmdutil.AddJsonFilenameFlag(flags, &o.FilenameOptions.Filenames, "Filename, directory, or URL to files containing the customizations") flags.BoolVarP(&o.FilenameOptions.Recursive, "recursive", "R", false, "Process the directory used in -f, --filename recursively. Useful when you want to manage related manifests organized within the same directory.") + utilcomp.RegisterCompletionFuncForKarmadaContextFlag(cmd) return cmd } diff --git a/pkg/karmadactl/join/join.go b/pkg/karmadactl/join/join.go index fd773025f4f1..d3ce88be906e 100644 --- a/pkg/karmadactl/join/join.go +++ b/pkg/karmadactl/join/join.go @@ -33,6 +33,7 @@ import ( "github.com/karmada-io/karmada/pkg/karmadactl/options" cmdutil "github.com/karmada-io/karmada/pkg/karmadactl/util" "github.com/karmada-io/karmada/pkg/karmadactl/util/apiclient" + utilcomp "github.com/karmada-io/karmada/pkg/karmadactl/util/completion" "github.com/karmada-io/karmada/pkg/util" ) @@ -77,6 +78,7 @@ func NewCmdJoin(f cmdutil.Factory, parentCommand string) *cobra.Command { opts.AddFlags(flags) options.AddKubeConfigFlags(flags) + utilcomp.RegisterCompletionFuncForKarmadaContextFlag(cmd) return cmd } diff --git a/pkg/karmadactl/karmadactl.go b/pkg/karmadactl/karmadactl.go index 87ab2573f075..c0dff7ba96e7 100644 --- a/pkg/karmadactl/karmadactl.go +++ b/pkg/karmadactl/karmadactl.go @@ -29,24 +29,36 @@ import ( "k8s.io/kubectl/pkg/util/templates" "github.com/karmada-io/karmada/pkg/karmadactl/addons" + "github.com/karmada-io/karmada/pkg/karmadactl/annotate" + "github.com/karmada-io/karmada/pkg/karmadactl/apiresources" "github.com/karmada-io/karmada/pkg/karmadactl/apply" + "github.com/karmada-io/karmada/pkg/karmadactl/attach" "github.com/karmada-io/karmada/pkg/karmadactl/cmdinit" + "github.com/karmada-io/karmada/pkg/karmadactl/completion" "github.com/karmada-io/karmada/pkg/karmadactl/cordon" + "github.com/karmada-io/karmada/pkg/karmadactl/create" "github.com/karmada-io/karmada/pkg/karmadactl/deinit" + karmadactldelete "github.com/karmada-io/karmada/pkg/karmadactl/delete" "github.com/karmada-io/karmada/pkg/karmadactl/describe" + "github.com/karmada-io/karmada/pkg/karmadactl/edit" "github.com/karmada-io/karmada/pkg/karmadactl/exec" + "github.com/karmada-io/karmada/pkg/karmadactl/explain" "github.com/karmada-io/karmada/pkg/karmadactl/get" "github.com/karmada-io/karmada/pkg/karmadactl/interpret" "github.com/karmada-io/karmada/pkg/karmadactl/join" + "github.com/karmada-io/karmada/pkg/karmadactl/label" "github.com/karmada-io/karmada/pkg/karmadactl/logs" "github.com/karmada-io/karmada/pkg/karmadactl/options" + "github.com/karmada-io/karmada/pkg/karmadactl/patch" "github.com/karmada-io/karmada/pkg/karmadactl/promote" "github.com/karmada-io/karmada/pkg/karmadactl/register" "github.com/karmada-io/karmada/pkg/karmadactl/taint" "github.com/karmada-io/karmada/pkg/karmadactl/token" "github.com/karmada-io/karmada/pkg/karmadactl/top" "github.com/karmada-io/karmada/pkg/karmadactl/unjoin" + "github.com/karmada-io/karmada/pkg/karmadactl/unregister" "github.com/karmada-io/karmada/pkg/karmadactl/util" + utilcomp "github.com/karmada-io/karmada/pkg/karmadactl/util/completion" "github.com/karmada-io/karmada/pkg/version/sharedcommand" ) @@ -82,11 +94,24 @@ func NewKarmadaCtlCommand(cmdUse, parentCommand string) *cobra.Command { _ = flag.CommandLine.Parse(nil) f := util.NewFactory(options.DefaultConfigFlags) ioStreams := genericiooptions.IOStreams{In: os.Stdin, Out: os.Stdout, ErrOut: os.Stderr} + + // Avoid import cycle by setting ValidArgsFunction here instead of in NewCmdGet() + getCmd := get.NewCmdGet(f, parentCommand, ioStreams) + getCmd.ValidArgsFunction = utilcomp.ResourceTypeAndNameCompletionFunc(f) + utilcomp.RegisterCompletionFuncForClustersFlag(getCmd) + utilcomp.RegisterCompletionFuncForKarmadaContextFlag(getCmd) + utilcomp.RegisterCompletionFuncForNamespaceFlag(getCmd, f) + utilcomp.RegisterCompletionFuncForOperationScopeFlag(getCmd) + groups := templates.CommandGroups{ { Message: "Basic Commands:", Commands: []*cobra.Command{ - get.NewCmdGet(f, parentCommand, ioStreams), + explain.NewCmdExplain(f, parentCommand, ioStreams), + getCmd, + create.NewCmdCreate(f, parentCommand, ioStreams), + karmadactldelete.NewCmdDelete(f, parentCommand, ioStreams), + edit.NewCmdEdit(f, parentCommand, ioStreams), }, }, { @@ -99,6 +124,7 @@ func NewKarmadaCtlCommand(cmdUse, parentCommand string) *cobra.Command { unjoin.NewCmdUnjoin(f, parentCommand), token.NewCmdToken(f, parentCommand, ioStreams), register.NewCmdRegister(parentCommand), + unregister.NewCmdUnregister(parentCommand), }, }, { @@ -112,6 +138,7 @@ func NewKarmadaCtlCommand(cmdUse, parentCommand string) *cobra.Command { { Message: "Troubleshooting and Debugging Commands:", Commands: []*cobra.Command{ + attach.NewCmdAttach(f, parentCommand, ioStreams), logs.NewCmdLogs(f, parentCommand, ioStreams), exec.NewCmdExec(f, parentCommand, ioStreams), describe.NewCmdDescribe(f, parentCommand, ioStreams), @@ -124,6 +151,22 @@ func NewKarmadaCtlCommand(cmdUse, parentCommand string) *cobra.Command { apply.NewCmdApply(f, parentCommand, ioStreams), promote.NewCmdPromote(f, parentCommand), top.NewCmdTop(f, parentCommand, ioStreams), + patch.NewCmdPatch(f, parentCommand, ioStreams), + }, + }, + { + Message: "Settings Commands:", + Commands: []*cobra.Command{ + label.NewCmdLabel(f, parentCommand, ioStreams), + annotate.NewCmdAnnotate(f, parentCommand, ioStreams), + completion.NewCmdCompletion(parentCommand, ioStreams.Out, ""), + }, + }, + { + Message: "Other Commands:", + Commands: []*cobra.Command{ + apiresources.NewCmdAPIResources(f, parentCommand, ioStreams), + apiresources.NewCmdAPIVersions(f, parentCommand, ioStreams), }, }, } @@ -136,6 +179,8 @@ func NewKarmadaCtlCommand(cmdUse, parentCommand string) *cobra.Command { templates.ActsAsRootCommand(rootCmd, filters, groups...) + utilcomp.SetFactoryForCompletion(f) + return rootCmd } diff --git a/pkg/karmadactl/label/label.go b/pkg/karmadactl/label/label.go new file mode 100644 index 000000000000..2be1cec696eb --- /dev/null +++ b/pkg/karmadactl/label/label.go @@ -0,0 +1,64 @@ +/* +Copyright 2024 The Karmada Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package label + +import ( + "fmt" + + "github.com/spf13/cobra" + "k8s.io/cli-runtime/pkg/genericiooptions" + kubectllabel "k8s.io/kubectl/pkg/cmd/label" + "k8s.io/kubectl/pkg/util/templates" + + "github.com/karmada-io/karmada/pkg/karmadactl/options" + "github.com/karmada-io/karmada/pkg/karmadactl/util" + utilcomp "github.com/karmada-io/karmada/pkg/karmadactl/util/completion" +) + +var ( + labelExample = templates.Examples(` + # Update deployment 'foo' with the label 'resourcetemplate.karmada.io/deletion-protected' and the value 'Always' + [1]%s label deployment foo resourcetemplate.karmada.io/deletion-protected=Always + + # Update deployment 'foo' with the label 'resourcetemplate.karmada.io/deletion-protected' and the value '', overwriting any existing value + [1]%s label --overwrite deployment foo resourcetemplate.karmada.io/deletion-protected= + + # Update all deployment in the namespace + [1]%s label pp --all resourcetemplate.karmada.io/deletion-protected= + + # Update a deployment identified by the type and name in "deployment.json" + [1]%s label -f deployment.json resourcetemplate.karmada.io/deletion-protected= + + # Update deployment 'foo' only if the resource is unchanged from version 1 + [1]%s label deployment resourcetemplate.karmada.io/deletion-protected=Always --resource-version=1 + + # Update deployment 'foo' by removing a label named 'bar' if it exists + # Does not require the --overwrite flag + [1]%s label deployment foo resourcetemplate.karmada.io/deletion-protected-`) +) + +// NewCmdLabel returns new initialized instance of label sub command +func NewCmdLabel(f util.Factory, parentCommand string, ioStreams genericiooptions.IOStreams) *cobra.Command { + cmd := kubectllabel.NewCmdLabel(f, ioStreams) + cmd.Example = fmt.Sprintf(labelExample, parentCommand) + cmd.Annotations = map[string]string{ + util.TagCommandGroup: util.GroupSettingsCommands, + } + options.AddKubeConfigFlags(cmd.Flags()) + options.AddNamespaceFlag(cmd.Flags()) + + utilcomp.RegisterCompletionFuncForKarmadaContextFlag(cmd) + utilcomp.RegisterCompletionFuncForNamespaceFlag(cmd, f) + return cmd +} diff --git a/pkg/karmadactl/logs/logs.go b/pkg/karmadactl/logs/logs.go index 68ba700fb30b..c6725df29d14 100644 --- a/pkg/karmadactl/logs/logs.go +++ b/pkg/karmadactl/logs/logs.go @@ -27,6 +27,7 @@ import ( "github.com/karmada-io/karmada/pkg/karmadactl/options" "github.com/karmada-io/karmada/pkg/karmadactl/util" + utilcomp "github.com/karmada-io/karmada/pkg/karmadactl/util/completion" ) const ( @@ -79,6 +80,7 @@ func NewCmdLogs(f util.Factory, parentCommand string, streams genericiooptions.I SilenceUsage: true, DisableFlagsInUseLine: true, Example: fmt.Sprintf(logsExample, parentCommand), + ValidArgsFunction: utilcomp.PodResourceNameAndContainerCompletionFunc(f), RunE: func(cmd *cobra.Command, args []string) error { if err := o.Complete(cmd, args, f); err != nil { return err @@ -98,10 +100,13 @@ func NewCmdLogs(f util.Factory, parentCommand string, streams genericiooptions.I flags := cmd.Flags() options.AddKubeConfigFlags(flags) - flags.StringVarP(options.DefaultConfigFlags.Namespace, "namespace", "n", *options.DefaultConfigFlags.Namespace, "If present, the namespace scope for this CLI request") + options.AddNamespaceFlag(flags) flags.StringVarP(&o.Cluster, "cluster", "C", "", "Specify a member cluster") o.KubectlLogsOptions.AddFlags(cmd) + utilcomp.RegisterCompletionFuncForKarmadaContextFlag(cmd) + utilcomp.RegisterCompletionFuncForNamespaceFlag(cmd, f) + utilcomp.RegisterCompletionFuncForClusterFlag(cmd) return cmd } diff --git a/pkg/karmadactl/options/global.go b/pkg/karmadactl/options/global.go index 6cdf5c9443f9..8cc65b95bd91 100644 --- a/pkg/karmadactl/options/global.go +++ b/pkg/karmadactl/options/global.go @@ -49,6 +49,11 @@ func AddKubeConfigFlags(flags *pflag.FlagSet) { flags.StringVar(DefaultConfigFlags.Context, "karmada-context", *DefaultConfigFlags.Context, "The name of the kubeconfig context to use") } +// AddNamespaceFlag add namespace flag to the specified FlagSet. +func AddNamespaceFlag(flags *pflag.FlagSet) { + flags.StringVarP(DefaultConfigFlags.Namespace, "namespace", "n", *DefaultConfigFlags.Namespace, "If present, the namespace scope for this CLI request.") +} + // OperationScope defines the operation scope of a command. type OperationScope string @@ -57,7 +62,7 @@ func (o *OperationScope) String() string { return string(*o) } -// Set vaule to OperationScope +// Set value to OperationScope func (o *OperationScope) Set(s string) error { switch s { case "": diff --git a/pkg/karmadactl/patch/patch.go b/pkg/karmadactl/patch/patch.go new file mode 100644 index 000000000000..a38c55a7f5f7 --- /dev/null +++ b/pkg/karmadactl/patch/patch.go @@ -0,0 +1,60 @@ +/* +Copyright 2024 The Karmada Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package patch + +import ( + "fmt" + + "github.com/spf13/cobra" + "k8s.io/cli-runtime/pkg/genericiooptions" + kubectlpatch "k8s.io/kubectl/pkg/cmd/patch" + "k8s.io/kubectl/pkg/util/templates" + + "github.com/karmada-io/karmada/pkg/karmadactl/options" + "github.com/karmada-io/karmada/pkg/karmadactl/util" + utilcomp "github.com/karmada-io/karmada/pkg/karmadactl/util/completion" +) + +var ( + patchExample = templates.Examples(` + # Partially update a deployment using a strategic merge patch, specifying the patch as JSON + [1]%s patch deployment nginx-deployment -p '{"spec":{"replicas":2}}' + + # Partially update a deployment using a strategic merge patch, specifying the patch as YAML + [1]%s patch deployment nginx-deployment -p $'spec:\n replicas: 2' + + # Partially update a deployment identified by the type and name specified in "deployment.json" using strategic merge patch + [1]%s patch -f deployment.json -p '{"spec":{"replicas":2}}' + + # Update a propagationpolicy's conflictResolution using a JSON patch with positional arrays + [1]%s patch pp nginx-propagation --type='json' -p='[{"op": "replace", "path": "/spec/conflictResolution", "value":"Overwrite"}]' + + # Update a deployment's replicas through the 'scale' subresource using a merge patch + [1]%s patch deployment nginx-deployment --subresource='scale' --type='merge' -p '{"spec":{"replicas":2}}'`) +) + +// NewCmdPatch returns new initialized instance of patch sub command +func NewCmdPatch(f util.Factory, parentCommand string, ioStreams genericiooptions.IOStreams) *cobra.Command { + cmd := kubectlpatch.NewCmdPatch(f, ioStreams) + cmd.Example = fmt.Sprintf(patchExample, parentCommand) + cmd.Annotations = map[string]string{ + util.TagCommandGroup: util.GroupAdvancedCommands, + } + options.AddKubeConfigFlags(cmd.Flags()) + options.AddNamespaceFlag(cmd.Flags()) + + utilcomp.RegisterCompletionFuncForKarmadaContextFlag(cmd) + utilcomp.RegisterCompletionFuncForNamespaceFlag(cmd, f) + return cmd +} diff --git a/pkg/karmadactl/promote/promote.go b/pkg/karmadactl/promote/promote.go index 230b1a17c672..2cb99167cce1 100644 --- a/pkg/karmadactl/promote/promote.go +++ b/pkg/karmadactl/promote/promote.go @@ -38,6 +38,7 @@ import ( "k8s.io/klog/v2" cmdutil "k8s.io/kubectl/pkg/cmd/util" "k8s.io/kubectl/pkg/util/templates" + "k8s.io/utils/ptr" configv1alpha1 "github.com/karmada-io/karmada/pkg/apis/config/v1alpha1" policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" @@ -46,6 +47,7 @@ import ( "github.com/karmada-io/karmada/pkg/karmadactl/get" "github.com/karmada-io/karmada/pkg/karmadactl/options" "github.com/karmada-io/karmada/pkg/karmadactl/util" + utilcomp "github.com/karmada-io/karmada/pkg/karmadactl/util/completion" "github.com/karmada-io/karmada/pkg/resourceinterpreter/customized/declarative" "github.com/karmada-io/karmada/pkg/resourceinterpreter/customized/webhook" "github.com/karmada-io/karmada/pkg/resourceinterpreter/customized/webhook/request" @@ -103,6 +105,7 @@ func NewCmdPromote(f util.Factory, parentCommand string) *cobra.Command { Example: fmt.Sprintf(promoteExample, parentCommand), SilenceUsage: true, DisableFlagsInUseLine: true, + ValidArgsFunction: utilcomp.ResourceTypeAndNameCompletionFunc(f), RunE: func(_ *cobra.Command, args []string) error { if err := opts.Complete(f, args); err != nil { return err @@ -123,7 +126,11 @@ func NewCmdPromote(f util.Factory, parentCommand string) *cobra.Command { flag := cmd.Flags() opts.AddFlags(flag) options.AddKubeConfigFlags(flag) + options.AddNamespaceFlag(flag) + utilcomp.RegisterCompletionFuncForKarmadaContextFlag(cmd) + utilcomp.RegisterCompletionFuncForNamespaceFlag(cmd, f) + utilcomp.RegisterCompletionFuncForClusterFlag(cmd) return cmd } @@ -176,7 +183,6 @@ func (o *CommandPromoteOption) AddFlags(flags *pflag.FlagSet) { "The name of the PropagationPolicy(or ClusterPropagationPolicy) that is automatically created after promotion. If not specified, the name will be the resource name with a hash suffix that is generated by resource metadata.") flags.StringVarP(&o.OutputFormat, "output", "o", "", "Output format. One of: json|yaml") - flags.StringVarP(&o.Namespace, "namespace", "n", o.Namespace, "If present, the namespace scope for this CLI request") flags.StringVarP(&o.Cluster, "cluster", "C", "", "the name of legacy cluster (eg -C=member1)") flags.StringVar(&o.ClusterContext, "cluster-context", "", "Context name of legacy cluster in kubeconfig. Only works when there are multiple contexts in the kubeconfig.") @@ -213,11 +219,9 @@ func (o *CommandPromoteOption) Complete(f util.Factory, args []string) error { } } - if o.Namespace == "" { - o.Namespace, _, err = f.ToRawKubeConfigLoader().Namespace() - if err != nil { - return fmt.Errorf("failed to get namespace from Factory. error: %w", err) - } + o.Namespace, _, err = f.ToRawKubeConfigLoader().Namespace() + if err != nil { + return fmt.Errorf("failed to get namespace from Factory. error: %w", err) } // If '--cluster-context' not specified, take the cluster name as the context. @@ -550,12 +554,14 @@ func (o *CommandPromoteOption) promote(controlPlaneRestConfig *rest.Config, obj if err != nil { return fmt.Errorf("failed to create resource %q(%s) in control plane: %v", gvr, o.name, err) } + fmt.Printf("ResourceTemplate (%s/%s) is created successfully\n", o.Namespace, o.name) if o.AutoCreatePolicy { - err = o.createClusterPropagationPolicy(karmadaClient, gvr, isDep) + policyName, err := o.createClusterPropagationPolicy(karmadaClient, gvr, isDep) if err != nil { return err } + fmt.Printf("ClusterPropagationPolicy %s is created successfully\n", policyName) } fmt.Printf("Resource %q(%s) is promoted successfully\n", gvr, o.name) @@ -575,12 +581,14 @@ func (o *CommandPromoteOption) promote(controlPlaneRestConfig *rest.Config, obj if err != nil { return fmt.Errorf("failed to create resource %q(%s/%s) in control plane: %v", gvr, o.Namespace, o.name, err) } + fmt.Printf("ResourceTemplate (%s/%s) is created successfully\n", o.Namespace, o.name) if o.AutoCreatePolicy { - err = o.createPropagationPolicy(karmadaClient, gvr, isDep) + policyName, err := o.createPropagationPolicy(karmadaClient, gvr, isDep) if err != nil { return err } + fmt.Printf("PropagationPolicy (%s/%s) is created successfully\n", o.Namespace, policyName) } fmt.Printf("Resource %q(%s/%s) is promoted successfully\n", gvr, o.Namespace, o.name) @@ -655,7 +663,7 @@ func (o *CommandPromoteOption) printObjectAndPolicy(obj *unstructured.Unstructur } // createPropagationPolicy create PropagationPolicy in karmada control plane -func (o *CommandPromoteOption) createPropagationPolicy(karmadaClient *karmadaclientset.Clientset, gvr schema.GroupVersionResource, isDep bool) error { +func (o *CommandPromoteOption) createPropagationPolicy(karmadaClient *karmadaclientset.Clientset, gvr schema.GroupVersionResource, isDep bool) (string, error) { var policyName string if o.PolicyName == "" { policyName = names.GeneratePolicyName(o.Namespace, o.name, o.gvk.String()) @@ -668,18 +676,18 @@ func (o *CommandPromoteOption) createPropagationPolicy(karmadaClient *karmadacli pp := buildPropagationPolicy(o.name, policyName, o.Namespace, o.Cluster, gvr, o.gvk, isDep) _, err = karmadaClient.PolicyV1alpha1().PropagationPolicies(o.Namespace).Create(context.TODO(), pp, metav1.CreateOptions{}) - return err + return policyName, err } if err != nil { - return fmt.Errorf("failed to get PropagationPolicy(%s/%s) in control plane: %v", o.Namespace, policyName, err) + return policyName, fmt.Errorf("failed to get PropagationPolicy(%s/%s) in control plane: %v", o.Namespace, policyName, err) } // PropagationPolicy already exists, not to create it - return fmt.Errorf("the PropagationPolicy(%s/%s) already exist, please edit it to propagate resource", o.Namespace, policyName) + return policyName, fmt.Errorf("the PropagationPolicy(%s/%s) already exist, please edit it to propagate resource", o.Namespace, policyName) } // createClusterPropagationPolicy create ClusterPropagationPolicy in karmada control plane -func (o *CommandPromoteOption) createClusterPropagationPolicy(karmadaClient *karmadaclientset.Clientset, gvr schema.GroupVersionResource, isDep bool) error { +func (o *CommandPromoteOption) createClusterPropagationPolicy(karmadaClient *karmadaclientset.Clientset, gvr schema.GroupVersionResource, isDep bool) (string, error) { var policyName string if o.PolicyName == "" { policyName = names.GeneratePolicyName("", o.name, o.gvk.String()) @@ -692,14 +700,14 @@ func (o *CommandPromoteOption) createClusterPropagationPolicy(karmadaClient *kar cpp := buildClusterPropagationPolicy(o.name, policyName, o.Cluster, gvr, o.gvk, isDep) _, err = karmadaClient.PolicyV1alpha1().ClusterPropagationPolicies().Create(context.TODO(), cpp, metav1.CreateOptions{}) - return err + return policyName, err } if err != nil { - return fmt.Errorf("failed to get ClusterPropagationPolicy(%s) in control plane: %v", policyName, err) + return policyName, fmt.Errorf("failed to get ClusterPropagationPolicy(%s) in control plane: %v", policyName, err) } // ClusterPropagationPolicy already exists, not to create it - return fmt.Errorf("the ClusterPropagationPolicy(%s) already exist, please edit it to propagate resource", policyName) + return policyName, fmt.Errorf("the ClusterPropagationPolicy(%s) already exist, please edit it to propagate resource", policyName) } // preprocessResource delete redundant fields to convert resource as template @@ -747,6 +755,8 @@ func buildPropagationPolicy(resourceName, policyName, namespace, cluster string, ClusterNames: []string{cluster}, }, }, + ConflictResolution: policyv1alpha1.ConflictOverwrite, + PreserveResourcesOnDeletion: ptr.To[bool](true), }, } return pp @@ -772,6 +782,8 @@ func buildClusterPropagationPolicy(resourceName, policyName, cluster string, gvr ClusterNames: []string{cluster}, }, }, + ConflictResolution: policyv1alpha1.ConflictOverwrite, + PreserveResourcesOnDeletion: ptr.To[bool](true), }, } return cpp diff --git a/pkg/karmadactl/register/register.go b/pkg/karmadactl/register/register.go index 852c2150d1b8..c07db7cc78f7 100644 --- a/pkg/karmadactl/register/register.go +++ b/pkg/karmadactl/register/register.go @@ -51,6 +51,7 @@ import ( "github.com/karmada-io/karmada/pkg/apis/cluster/validation" karmadaclientset "github.com/karmada-io/karmada/pkg/generated/clientset/versioned" + addonutils "github.com/karmada-io/karmada/pkg/karmadactl/addons/utils" "github.com/karmada-io/karmada/pkg/karmadactl/options" cmdutil "github.com/karmada-io/karmada/pkg/karmadactl/util" "github.com/karmada-io/karmada/pkg/karmadactl/util/apiclient" @@ -398,7 +399,7 @@ func (o *CommandRegisterOption) Run(parentCommand string) error { return err } - if err := cmdutil.WaitForDeploymentRollout(o.memberClusterClient, KarmadaAgentDeployment, int(o.Timeout)); err != nil { + if err := addonutils.WaitForDeploymentRollout(o.memberClusterClient, KarmadaAgentDeployment, int(o.Timeout)); err != nil { return err } diff --git a/pkg/karmadactl/taint/taint.go b/pkg/karmadactl/taint/taint.go index b050d91d59aa..de150eaa843a 100644 --- a/pkg/karmadactl/taint/taint.go +++ b/pkg/karmadactl/taint/taint.go @@ -37,6 +37,7 @@ import ( "github.com/karmada-io/karmada/pkg/generated/clientset/versioned/scheme" "github.com/karmada-io/karmada/pkg/karmadactl/options" "github.com/karmada-io/karmada/pkg/karmadactl/util" + utilcomp "github.com/karmada-io/karmada/pkg/karmadactl/util/completion" "github.com/karmada-io/karmada/pkg/util/lifted" ) @@ -84,6 +85,7 @@ func NewCmdTaint(f util.Factory, parentCommand string) *cobra.Command { Example: fmt.Sprintf(taintExample, parentCommand), SilenceUsage: true, DisableFlagsInUseLine: true, + ValidArgsFunction: utilcomp.SpecifiedResourceTypeAndNameCompletionFunc(f, []string{"cluster"}), RunE: func(_ *cobra.Command, args []string) error { if err := opts.Complete(f, args); err != nil { return err @@ -107,6 +109,7 @@ func NewCmdTaint(f util.Factory, parentCommand string) *cobra.Command { flags.BoolVar(&opts.overwrite, "overwrite", opts.overwrite, "If true, allow taints to be overwritten, otherwise reject taint updates that overwrite existing taints.") flags.BoolVar(&opts.DryRun, "dry-run", false, "Run the command in dry-run mode, without making any server requests.") + utilcomp.RegisterCompletionFuncForKarmadaContextFlag(cmd) return cmd } diff --git a/pkg/karmadactl/token/token.go b/pkg/karmadactl/token/token.go index 69f052eaffe8..138358cb79e2 100644 --- a/pkg/karmadactl/token/token.go +++ b/pkg/karmadactl/token/token.go @@ -39,6 +39,7 @@ import ( "github.com/karmada-io/karmada/pkg/karmadactl/options" "github.com/karmada-io/karmada/pkg/karmadactl/util" tokenutil "github.com/karmada-io/karmada/pkg/karmadactl/util/bootstraptoken" + utilcomp "github.com/karmada-io/karmada/pkg/karmadactl/util/completion" ) var ( @@ -135,6 +136,7 @@ func NewCmdTokenCreate(f util.Factory, out io.Writer, tokenOpts *CommandTokenOpt cmd.Flags().StringSliceVar(&tokenOpts.Groups, "groups", tokenutil.DefaultGroups, fmt.Sprintf("Extra groups that this token will authenticate as when used for authentication. Must match %q", bootstrapapi.BootstrapGroupPattern)) cmd.Flags().StringVar(&tokenOpts.Description, "description", tokenOpts.Description, "A human friendly description of how this token is used.") + utilcomp.RegisterCompletionFuncForKarmadaContextFlag(cmd) return cmd } @@ -158,6 +160,7 @@ func NewCmdTokenList(f util.Factory, out io.Writer, errW io.Writer, tokenOpts *C options.AddKubeConfigFlags(cmd.Flags()) + utilcomp.RegisterCompletionFuncForKarmadaContextFlag(cmd) return cmd } @@ -190,6 +193,7 @@ func NewCmdTokenDelete(f util.Factory, out io.Writer, tokenOpts *CommandTokenOpt options.AddKubeConfigFlags(cmd.Flags()) + utilcomp.RegisterCompletionFuncForKarmadaContextFlag(cmd) return cmd } diff --git a/pkg/karmadactl/top/top.go b/pkg/karmadactl/top/top.go index 235b81727f7c..1241102b1035 100644 --- a/pkg/karmadactl/top/top.go +++ b/pkg/karmadactl/top/top.go @@ -58,6 +58,9 @@ func NewCmdTop(f util.Factory, parentCommand string, streams genericiooptions.IO Short: "Display resource (CPU/memory) usage of member clusters", Long: topLong, Run: cmdutil.DefaultSubCommandRun(streams.ErrOut), + Annotations: map[string]string{ + util.TagCommandGroup: util.GroupAdvancedCommands, + }, } // create subcommands diff --git a/pkg/karmadactl/top/top_node.go b/pkg/karmadactl/top/top_node.go index f1d71385b0b5..055ff08058d2 100644 --- a/pkg/karmadactl/top/top_node.go +++ b/pkg/karmadactl/top/top_node.go @@ -31,7 +31,6 @@ import ( "k8s.io/cli-runtime/pkg/genericiooptions" "k8s.io/client-go/kubernetes" cmdutil "k8s.io/kubectl/pkg/cmd/util" - "k8s.io/kubectl/pkg/util/completion" "k8s.io/kubectl/pkg/util/i18n" "k8s.io/kubectl/pkg/util/templates" metricsapi "k8s.io/metrics/pkg/apis/metrics" @@ -40,7 +39,9 @@ import ( autoscalingv1alpha1 "github.com/karmada-io/karmada/pkg/apis/autoscaling/v1alpha1" karmadaclientset "github.com/karmada-io/karmada/pkg/generated/clientset/versioned" + "github.com/karmada-io/karmada/pkg/karmadactl/options" "github.com/karmada-io/karmada/pkg/karmadactl/util" + utilcomp "github.com/karmada-io/karmada/pkg/karmadactl/util/completion" ) // NodeOptions contains all the options for running the top-node cli command. @@ -103,21 +104,27 @@ func NewCmdTopNode(f util.Factory, parentCommand string, o *NodeOptions, streams Short: i18n.T("Display resource (CPU/memory) usage of nodes"), Long: topNodeLong, Example: fmt.Sprintf(topNodeExample, parentCommand), - ValidArgsFunction: completion.ResourceNameCompletionFunc(f, "node"), + ValidArgsFunction: utilcomp.ResourceNameCompletionFunc(f, "node"), Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(o.Complete(f, cmd, args)) cmdutil.CheckErr(o.Validate()) cmdutil.CheckErr(o.RunTopNode(f)) }, Aliases: []string{"nodes", "no"}, + Annotations: map[string]string{ + "parent": "top", // used for completion code to set default operation scope. + }, } cmdutil.AddLabelSelectorFlagVar(cmd, &o.Selector) + options.AddKubeConfigFlags(cmd.Flags()) cmd.Flags().StringVar(&o.SortBy, "sort-by", o.SortBy, "If non-empty, sort nodes list using specified field. The field can be either 'cpu' or 'memory'.") cmd.Flags().StringSliceVar(&o.Clusters, "clusters", []string{}, "Used to specify target member clusters, for example: --clusters=member1,member2") cmd.Flags().BoolVar(&o.NoHeaders, "no-headers", o.NoHeaders, "If present, print output without headers") cmd.Flags().BoolVar(&o.UseProtocolBuffers, "use-protocol-buffers", o.UseProtocolBuffers, "Enables using protocol-buffers to access Metrics API.") cmd.Flags().BoolVar(&o.ShowCapacity, "show-capacity", o.ShowCapacity, "Print node resources based on Capacity instead of Allocatable(default) of the nodes.") + utilcomp.RegisterCompletionFuncForKarmadaContextFlag(cmd) + utilcomp.RegisterCompletionFuncForClustersFlag(cmd) return cmd } diff --git a/pkg/karmadactl/top/top_pods.go b/pkg/karmadactl/top/top_pods.go index f9501542abce..712fbb2a3cbc 100644 --- a/pkg/karmadactl/top/top_pods.go +++ b/pkg/karmadactl/top/top_pods.go @@ -33,7 +33,6 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/klog/v2" cmdutil "k8s.io/kubectl/pkg/cmd/util" - "k8s.io/kubectl/pkg/util/completion" "k8s.io/kubectl/pkg/util/i18n" "k8s.io/kubectl/pkg/util/templates" metricsapi "k8s.io/metrics/pkg/apis/metrics" @@ -44,6 +43,7 @@ import ( karmadaclientset "github.com/karmada-io/karmada/pkg/generated/clientset/versioned" "github.com/karmada-io/karmada/pkg/karmadactl/options" "github.com/karmada-io/karmada/pkg/karmadactl/util" + utilcomp "github.com/karmada-io/karmada/pkg/karmadactl/util/completion" ) // PodOptions contains the options to the top command. @@ -84,6 +84,12 @@ var ( # Show metrics for all pods in the default namespace %[1]s top pod + # Show metrics for all pods in the default namespace in member1 cluster + %[1]s top pod --clusters=member1 + + # Show metrics for all pods in the default namespace in member1 and member2 cluster + %[1]s top pod --clusters=member1,member2 + # Show metrics for all pods in the given namespace %[1]s top pod --namespace=NAMESPACE @@ -109,17 +115,20 @@ func NewCmdTopPod(f util.Factory, parentCommand string, o *PodOptions, streams g Short: i18n.T("Display resource (CPU/memory) usage of pods of member clusters"), Long: topPodLong, Example: fmt.Sprintf(topPodExample, parentCommand), - ValidArgsFunction: completion.ResourceNameCompletionFunc(f, "pod"), + ValidArgsFunction: utilcomp.ResourceNameCompletionFunc(f, "pod"), Run: func(cmd *cobra.Command, args []string) { cmdutil.CheckErr(o.Complete(f, cmd, args)) cmdutil.CheckErr(o.Validate()) cmdutil.CheckErr(o.RunTopPod(f)) }, Aliases: []string{"pods", "po"}, + Annotations: map[string]string{ + "parent": "top", // used for completion code to set default operation scope. + }, } cmdutil.AddLabelSelectorFlagVar(cmd, &o.LabelSelector) options.AddKubeConfigFlags(cmd.Flags()) - cmd.Flags().StringVarP(options.DefaultConfigFlags.Namespace, "namespace", "n", *options.DefaultConfigFlags.Namespace, "If present, the namespace scope for this CLI request") + options.AddNamespaceFlag(cmd.Flags()) cmd.Flags().StringSliceVarP(&o.Clusters, "clusters", "C", []string{}, "-C=member1,member2") cmd.Flags().StringVar(&o.FieldSelector, "field-selector", o.FieldSelector, "Selector (field query) to filter on, supports '=', '==', and '!='.(e.g. --field-selector key1=value1,key2=value2). The server only supports a limited number of field queries per type.") cmd.Flags().StringVar(&o.SortBy, "sort-by", o.SortBy, "If non-empty, sort pods list using specified field. The field can be either 'cpu' or 'memory'.") @@ -128,6 +137,10 @@ func NewCmdTopPod(f util.Factory, parentCommand string, o *PodOptions, streams g cmd.Flags().BoolVar(&o.NoHeaders, "no-headers", o.NoHeaders, "If present, print output without headers.") cmd.Flags().BoolVar(&o.UseProtocolBuffers, "use-protocol-buffers", o.UseProtocolBuffers, "Enables using protocol-buffers to access Metrics API.") cmd.Flags().BoolVar(&o.Sum, "sum", o.Sum, "Print the sum of the resource usage") + + utilcomp.RegisterCompletionFuncForKarmadaContextFlag(cmd) + utilcomp.RegisterCompletionFuncForNamespaceFlag(cmd, f) + utilcomp.RegisterCompletionFuncForClustersFlag(cmd) return cmd } diff --git a/pkg/karmadactl/unjoin/unjoin.go b/pkg/karmadactl/unjoin/unjoin.go index 3ca26807b1e4..e1bad4c4029b 100644 --- a/pkg/karmadactl/unjoin/unjoin.go +++ b/pkg/karmadactl/unjoin/unjoin.go @@ -17,15 +17,11 @@ limitations under the License. package unjoin import ( - "context" "fmt" "time" "github.com/spf13/cobra" "github.com/spf13/pflag" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/wait" kubeclient "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/klog/v2" @@ -35,6 +31,7 @@ import ( "github.com/karmada-io/karmada/pkg/karmadactl/options" cmdutil "github.com/karmada-io/karmada/pkg/karmadactl/util" "github.com/karmada-io/karmada/pkg/karmadactl/util/apiclient" + utilcomp "github.com/karmada-io/karmada/pkg/karmadactl/util/completion" "github.com/karmada-io/karmada/pkg/util" "github.com/karmada-io/karmada/pkg/util/names" ) @@ -86,6 +83,7 @@ func NewCmdUnjoin(f cmdutil.Factory, parentCommand string) *cobra.Command { opts.AddFlags(flags) options.AddKubeConfigFlags(flags) + utilcomp.RegisterCompletionFuncForKarmadaContextFlag(cmd) return cmd } @@ -149,7 +147,7 @@ func (j *CommandUnjoinOption) AddFlags(flags *pflag.FlagSet) { flags.StringVar(&j.ClusterKubeConfig, "cluster-kubeconfig", "", "Path of the cluster's kubeconfig.") flags.BoolVar(&j.forceDeletion, "force", false, - "Delete cluster and secret resources even if resources in the cluster targeted for unjoin are not removed successfully.") + "When set, the unjoin command will attempt to clean up resources in the member cluster before deleting the Cluster object. If the cleanup fails within the timeout period, the Cluster object will still be deleted, potentially leaving some resources behind in the member cluster.") flags.DurationVar(&j.Wait, "wait", 60*time.Second, "wait for the unjoin command execution process(default 60s), if there is no success after this time, timeout will be returned.") flags.BoolVar(&j.DryRun, "dry-run", false, "Run the command in dry-run mode, without making any server requests.") } @@ -183,9 +181,10 @@ func (j *CommandUnjoinOption) Run(f cmdutil.Factory) error { // RunUnJoinCluster unJoin the cluster from karmada. func (j *CommandUnjoinOption) RunUnJoinCluster(controlPlaneRestConfig, clusterConfig *rest.Config) error { controlPlaneKarmadaClient := karmadaclientset.NewForConfigOrDie(controlPlaneRestConfig) + controlPlaneKubeClient := kubeclient.NewForConfigOrDie(controlPlaneRestConfig) // delete the cluster object in host cluster that associates the unjoining cluster - err := j.deleteClusterObject(controlPlaneKarmadaClient) + err := cmdutil.DeleteClusterObject(controlPlaneKubeClient, controlPlaneKarmadaClient, j.ClusterName, j.Wait, j.DryRun, j.forceDeletion) if err != nil { klog.Errorf("Failed to delete cluster object. cluster name: %s, error: %v", j.ClusterName, err) return err @@ -223,42 +222,6 @@ func (j *CommandUnjoinOption) RunUnJoinCluster(controlPlaneRestConfig, clusterCo return nil } -// deleteClusterObject delete the cluster object in host cluster that associates the unjoining cluster -func (j *CommandUnjoinOption) deleteClusterObject(controlPlaneKarmadaClient *karmadaclientset.Clientset) error { - if j.DryRun { - return nil - } - - err := controlPlaneKarmadaClient.ClusterV1alpha1().Clusters().Delete(context.TODO(), j.ClusterName, metav1.DeleteOptions{}) - if apierrors.IsNotFound(err) { - return fmt.Errorf("no cluster object %s found in karmada control Plane", j.ClusterName) - } - if err != nil { - klog.Errorf("Failed to delete cluster object. cluster name: %s, error: %v", j.ClusterName, err) - return err - } - - // make sure the given cluster object has been deleted - err = wait.PollUntilContextTimeout(context.TODO(), 1*time.Second, j.Wait, false, func(context.Context) (done bool, err error) { - _, err = controlPlaneKarmadaClient.ClusterV1alpha1().Clusters().Get(context.TODO(), j.ClusterName, metav1.GetOptions{}) - if apierrors.IsNotFound(err) { - return true, nil - } - if err != nil { - klog.Errorf("Failed to get cluster %s. err: %v", j.ClusterName, err) - return false, err - } - klog.Infof("Waiting for the cluster object %s to be deleted", j.ClusterName) - return false, nil - }) - if err != nil { - klog.Errorf("Failed to delete cluster object. cluster name: %s, error: %v", j.ClusterName, err) - return err - } - - return nil -} - // deleteRBACResources deletes the cluster role, cluster rolebindings from the unjoining cluster. func deleteRBACResources(clusterKubeClient kubeclient.Interface, unjoiningClusterName string, forceDeletion, dryRun bool) error { if dryRun { diff --git a/pkg/karmadactl/unregister/unregister.go b/pkg/karmadactl/unregister/unregister.go new file mode 100644 index 000000000000..90b7ccfaa98d --- /dev/null +++ b/pkg/karmadactl/unregister/unregister.go @@ -0,0 +1,405 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unregister + +import ( + "context" + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" + appsv1 "k8s.io/api/apps/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kubeclient "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + "k8s.io/klog/v2" + "k8s.io/kubectl/pkg/util/templates" + + "github.com/karmada-io/karmada/pkg/apis/cluster/validation" + karmadaclientset "github.com/karmada-io/karmada/pkg/generated/clientset/versioned" + "github.com/karmada-io/karmada/pkg/karmadactl/options" + "github.com/karmada-io/karmada/pkg/karmadactl/register" + cmdutil "github.com/karmada-io/karmada/pkg/karmadactl/util" + "github.com/karmada-io/karmada/pkg/karmadactl/util/apiclient" + "github.com/karmada-io/karmada/pkg/util" + "github.com/karmada-io/karmada/pkg/util/names" +) + +var ( + unregisterLong = templates.LongDesc(` + Unregister removes a member cluster from Karmada, it will clean up the cluster object in the control plane and Karmada resources in the member cluster.`) + + unregisterExample = templates.Examples(` + # Unregister cluster from karmada control plane + %[1]s unregister CLUSTER_NAME --cluster-kubeconfig= [--cluster-context=] + + # Unregister cluster from karmada control plane with timeout + %[1]s unregister CLUSTER_NAME --cluster-kubeconfig= --wait 2m + + # Unregister cluster from karmada control plane, manually specify the location of the karmada config + %[1]s unregister CLUSTER_NAME --karmada-config= [--karmada-context=] --cluster-kubeconfig= [--cluster-context=] + `) +) + +// NewCmdUnregister defines the `unregister` command that removes registration of a pull mode cluster from control plane. +func NewCmdUnregister(parentCommand string) *cobra.Command { + opts := CommandUnregisterOption{} + + cmd := &cobra.Command{ + Use: "unregister CLUSTER_NAME", + Short: "Remove a pull mode cluster from Karmada control plane", + Long: unregisterLong, + Example: fmt.Sprintf(unregisterExample, parentCommand), + SilenceUsage: true, + DisableFlagsInUseLine: true, + RunE: func(_ *cobra.Command, args []string) error { + if err := opts.Complete(args); err != nil { + return err + } + if err := opts.Validate(args); err != nil { + return err + } + if err := opts.Run(); err != nil { + return err + } + return nil + }, + Annotations: map[string]string{ + cmdutil.TagCommandGroup: cmdutil.GroupClusterRegistration, + }, + } + + flags := cmd.Flags() + opts.AddFlags(flags) + + return cmd +} + +// CommandUnregisterOption holds all command options. +type CommandUnregisterOption struct { + // ClusterName is the cluster's name that we are going to unregister. + ClusterName string + + // KarmadaConfig is the path of config to access karmada-apiserver. + KarmadaConfig string + + // KarmadaContext is the context in KarmadaConfig file to access karmada-apiserver. + KarmadaContext string + + // ClusterKubeConfig is the KUBECONFIG file path to access unregistering member cluster. + ClusterKubeConfig string + + // ClusterContext is the context in ClusterKubeConfig to access unregistering member cluster. + ClusterContext string + + // Namespace is the namespace that karmada-agent component deployed. + Namespace string + + // AgentName is the deployment name that karmada-agent component deployed. + AgentName string + + // ClusterNamespace holds namespace where the member cluster secrets are stored + ClusterNamespace string + + // Wait tells maximum command execution time + Wait time.Duration + + // DryRun tells if run the command in dry-run mode, without making any server requests. + DryRun bool + + // ControlPlaneClient control plane client set + ControlPlaneClient karmadaclientset.Interface + + // ControlPlaneKubeClient control plane kube client set + ControlPlaneKubeClient kubeclient.Interface + + // MemberClusterClient member cluster client set + MemberClusterClient kubeclient.Interface +} + +// AddFlags adds flags to the specified FlagSet. +func (j *CommandUnregisterOption) AddFlags(flags *pflag.FlagSet) { + flags.StringVar(&j.ClusterKubeConfig, "cluster-kubeconfig", "", "KUBECONFIG file path to access unregistering member cluster, required.") + flags.StringVar(&j.ClusterContext, "cluster-context", "", "Context in cluster-kubeconfig to access unregistering member cluster, optional, defaults to current context.") + flags.StringVar(&j.KarmadaConfig, "karmada-config", "", "Path of config to access karmada-apiserver, optional, defaults to fetch automatically from member cluster.") + flags.StringVar(&j.KarmadaContext, "karmada-context", "", "Context in karmada-config to access karmada-apiserver, optional, defaults to current context.") + + flags.StringVarP(&j.Namespace, "namespace", "n", "karmada-system", "Namespace of the karmada-agent component deployed.") + flags.StringVarP(&j.AgentName, "agent-name", "", register.KarmadaAgentName, "Deployment name of the karmada-agent component deployed.") + flags.StringVar(&j.ClusterNamespace, "cluster-namespace", options.DefaultKarmadaClusterNamespace, "Namespace in the control plane where member cluster secrets are stored.") + flags.DurationVar(&j.Wait, "wait", 60*time.Second, "wait for the unjoin command execution process(default 60s), if there is no success after this time, timeout will be returned.") + flags.BoolVar(&j.DryRun, "dry-run", false, "Run the command in dry-run mode, without making any server requests.") +} + +// Complete ensures that options are valid and marshals them if necessary. +func (j *CommandUnregisterOption) Complete(args []string) error { + // Get cluster name from the command args. + if len(args) > 0 { + j.ClusterName = args[0] + } + return nil +} + +// Validate ensures that command unregister options are valid.. +func (j *CommandUnregisterOption) Validate(args []string) error { + if len(args) > 1 { + return fmt.Errorf("only the cluster name is allowed as an argument") + } + if errMsgs := validation.ValidateClusterName(j.ClusterName); len(errMsgs) != 0 { + return fmt.Errorf("invalid cluster name(%s): %s", j.ClusterName, strings.Join(errMsgs, ";")) + } + if j.ClusterKubeConfig == "" { + return fmt.Errorf("--cluster-kubeconfig is required to specify KUBECONFIG file path to access unregistering member cluster") + } + if j.Wait <= 0 { + return fmt.Errorf(" --wait %v must be a positive duration, e.g. 1m0s ", j.Wait) + } + return nil +} + +// Run is the implementation of the 'unregister' command. +func (j *CommandUnregisterOption) Run() error { + klog.V(1).Infof("Unregistering cluster. cluster name: %s", j.ClusterName) + klog.V(1).Infof("Unregistering cluster. karmada-agent deployed in namespace: %s", j.Namespace) + klog.V(1).Infof("Unregistering cluster. member cluster secrets stored in namespace: %s", j.ClusterNamespace) + + // 1. build member cluster client + err := j.buildClusterClientSet() + if err != nil { + return err + } + + // 2. build karmada control plane client + if j.KarmadaConfig != "" { + err = j.buildKarmadaClientSetFromFile() + } else { + err = j.buildKarmadaClientSetFromAgent() + } + if err != nil { + return err + } + + return j.RunUnregisterCluster() +} + +func (j *CommandUnregisterOption) buildClusterClientSet() error { + restConfig, err := apiclient.RestConfig(j.ClusterContext, j.ClusterKubeConfig) + if err != nil { + return fmt.Errorf("failed to read member cluster rest config: %w", err) + } + j.MemberClusterClient, err = apiclient.NewClientSet(restConfig) + if err != nil { + return fmt.Errorf("failed to build member cluster clientset: %w", err) + } + return nil +} + +func (j *CommandUnregisterOption) buildKarmadaClientSetFromFile() error { + karmadaCfg, err := clientcmd.LoadFromFile(j.KarmadaConfig) + if err != nil { + return fmt.Errorf("failed to load karmada config: %w", err) + } + if j.KarmadaContext != "" { + karmadaCfg.CurrentContext = j.KarmadaContext + } + j.ControlPlaneClient, err = register.ToKarmadaClient(karmadaCfg) + if err != nil { + return fmt.Errorf("failed to build karmada control plane clientset: %w", err) + } + j.ControlPlaneKubeClient, err = register.ToClientSet(karmadaCfg) + if err != nil { + return fmt.Errorf("failed to build kube control plane clientset: %w", err) + } + return nil +} + +func (j *CommandUnregisterOption) buildKarmadaClientSetFromAgent() error { + // 1. get karmada-agent deployment + agent, err := j.MemberClusterClient.AppsV1().Deployments(j.Namespace).Get(context.TODO(), j.AgentName, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get deployment karmada-agent in member cluster: %w", err) + } + + // 2. get karmada config from karmada-agent deployment + karmadaCfg, err := j.getKarmadaAgentConfig(agent) + if err != nil { + return fmt.Errorf("failed to get karmada config from karmada-agent deployment: %w", err) + } + + // 3. get karmada context from karmada-agent deployment + const karmadaContextPrefix = "--karmada-context=" + for _, cmd := range agent.Spec.Template.Spec.Containers[0].Command { + if strings.HasPrefix(cmd, karmadaContextPrefix) { + karmadaCfg.CurrentContext = cmd[len(karmadaContextPrefix):] + } + } + + j.ControlPlaneClient, err = register.ToKarmadaClient(karmadaCfg) + if err != nil { + return fmt.Errorf("failed to build karmada control plane clientset: %w", err) + } + j.ControlPlaneKubeClient, err = register.ToClientSet(karmadaCfg) + if err != nil { + return fmt.Errorf("failed to build kube control plane clientset: %w", err) + } + return nil +} + +func (j *CommandUnregisterOption) getKarmadaAgentConfig(agent *appsv1.Deployment) (*clientcmdapi.Config, error) { + var mountPath, fileName, volumeName, agentConfigSecretName string + + const karmadaConfigPrefix = "--karmada-kubeconfig=" + for _, cmd := range agent.Spec.Template.Spec.Containers[0].Command { + if strings.HasPrefix(cmd, karmadaConfigPrefix) { + karmadaConfigPath := cmd[len(karmadaConfigPrefix):] + mountPath, fileName = filepath.Dir(karmadaConfigPath), filepath.Base(karmadaConfigPath) + } + } + + for _, mount := range agent.Spec.Template.Spec.Containers[0].VolumeMounts { + if filepath.Clean(mount.MountPath) == mountPath { + volumeName = mount.Name + } + } + + for _, volume := range agent.Spec.Template.Spec.Volumes { + if volume.Name == volumeName { + agentConfigSecretName = volume.Secret.SecretName + } + } + + if agentConfigSecretName == "" { + return nil, fmt.Errorf("failed to get secret name of karmada agent config") + } + + agentConfigSecret, err := j.MemberClusterClient.CoreV1().Secrets(j.Namespace).Get(context.TODO(), agentConfigSecretName, metav1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("failed to get the secret which stores the karmada agent config") + } + if len(agentConfigSecret.Data[fileName]) == 0 { + return nil, fmt.Errorf("empty data, secretName: %s, keyName: %s", agentConfigSecretName, fileName) + } + + return clientcmd.Load(agentConfigSecret.Data[fileName]) +} + +type obj struct{ Kind, Name, Namespace string } + +func (o *obj) ToString() string { + if o.Namespace == "" { + return fmt.Sprintf("%s/%s", o.Kind, o.Name) + } + return fmt.Sprintf("%s/%s/%s", o.Kind, o.Namespace, o.Name) +} + +// RunUnregisterCluster unregister the pull mode cluster from karmada. +func (j *CommandUnregisterOption) RunUnregisterCluster() error { + if j.DryRun { + return nil + } + + // 1. delete the cluster object from the Karmada control plane + //TODO: add flag --force to implement force deletion. + if err := cmdutil.DeleteClusterObject(j.ControlPlaneKubeClient, j.ControlPlaneClient, j.ClusterName, j.Wait, j.DryRun, false); err != nil { + klog.Errorf("Failed to delete cluster object. cluster name: %s, error: %v", j.ClusterName, err) + return err + } + klog.Infof("Successfully delete cluster object (%s) from control plane.", j.ClusterName) + + // 2. delete resource created by karmada in member cluster + var err error + for _, resource := range j.listMemberClusterResources() { + switch resource.Kind { + case "ClusterRole": + err = util.DeleteClusterRole(j.MemberClusterClient, resource.Name) + case "ClusterRoleBinding": + err = util.DeleteClusterRoleBinding(j.MemberClusterClient, resource.Name) + case "ServiceAccount": + err = util.DeleteServiceAccount(j.MemberClusterClient, resource.Namespace, resource.Name) + case "Secret": + err = util.DeleteSecret(j.MemberClusterClient, resource.Namespace, resource.Name) + case "Deployment": + err = deleteDeployment(j.MemberClusterClient, resource.Namespace, resource.Name) + case "Namespace": + err = util.DeleteNamespace(j.MemberClusterClient, resource.Name) + } + + if err != nil { + klog.Errorf("Failed to delete (%v) in unregistering cluster (%s): %+v.", resource, j.ClusterName, err) + return err + } + klog.Infof("Successfully delete resource (%v) from member cluster (%s).", resource, j.ClusterName) + } + + // 3. delete local obsolete files generated by karmadactl + localObsoleteFiles := []obj{ + {Kind: "File", Name: filepath.Join(register.KarmadaDir, register.KarmadaAgentKubeConfigFileName)}, + {Kind: "File", Name: register.CACertPath}, + } + for _, obj := range localObsoleteFiles { + if err = os.Remove(obj.Name); err != nil { + if os.IsNotExist(err) { + continue + } + klog.Errorf("Failed to delete local file (%v) in current node: %+v.", obj.Name, err) + return err + } + klog.Infof("Successfully delete local file (%v) in current node.", obj.Name) + } + + return nil +} + +// listMemberClusterResources lists resources to be deleted which created by karmada in member cluster +func (j *CommandUnregisterOption) listMemberClusterResources() []obj { + return []obj{ + // the rbac resource prepared for karmada-controller-manager to access member cluster's kube-apiserver + {Kind: "ServiceAccount", Namespace: j.ClusterNamespace, Name: names.GenerateServiceAccountName(j.ClusterName)}, + {Kind: "ClusterRole", Name: names.GenerateRoleName(names.GenerateServiceAccountName(j.ClusterName))}, + {Kind: "ClusterRoleBinding", Name: names.GenerateRoleName(names.GenerateServiceAccountName(j.ClusterName))}, + {Kind: "Secret", Namespace: j.ClusterNamespace, Name: names.GenerateServiceAccountName(j.ClusterName)}, + // the rbac resource prepared for karmada-aggregated-apiserver to access member cluster's kube-apiserver + {Kind: "ServiceAccount", Namespace: j.ClusterNamespace, Name: names.GenerateServiceAccountName("impersonator")}, + {Kind: "Secret", Namespace: j.ClusterNamespace, Name: names.GenerateServiceAccountName("impersonator")}, + // the namespace to store above rbac resources + {Kind: "Namespace", Name: j.ClusterNamespace}, + + // the deployment of karmada-agent + {Kind: "Deployment", Namespace: j.Namespace, Name: register.KarmadaAgentName}, + // the rbac resources used by karmada-agent to access the member cluster's kube-apiserver + {Kind: "ServiceAccount", Namespace: j.Namespace, Name: register.KarmadaAgentServiceAccountName}, + {Kind: "ClusterRole", Name: register.KarmadaAgentName}, + {Kind: "ClusterRoleBinding", Name: register.KarmadaAgentName}, + // the karmada config used by karmada-agent to access karmada-apiserver + {Kind: "Secret", Namespace: j.Namespace, Name: register.KarmadaKubeconfigName}, + } +} + +// deleteDeployment just try to delete the Deployment. +func deleteDeployment(client kubeclient.Interface, namespace, name string) error { + err := client.AppsV1().Deployments(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{}) + if err != nil && !apierrors.IsNotFound(err) { + return err + } + return nil +} diff --git a/pkg/karmadactl/unregister/unregister_test.go b/pkg/karmadactl/unregister/unregister_test.go new file mode 100644 index 000000000000..ab3c97e9df63 --- /dev/null +++ b/pkg/karmadactl/unregister/unregister_test.go @@ -0,0 +1,234 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unregister + +import ( + "testing" + "time" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/tools/clientcmd" + + clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1" + fakekarmadaclient "github.com/karmada-io/karmada/pkg/generated/clientset/versioned/fake" + "github.com/karmada-io/karmada/pkg/karmadactl/register" +) + +const ( + clusterNamespace = "default" + namespace = "default" + clusterName = "member_test" + agentConfigSecretName = "karmada-agent-config" //nolint:gosec + agentConfigKeyName = "karmada.config" + agentSecretVolumeName = "karmada-config" //nolint:gosec +) + +func TestCommandUnregisterOption_Complete_Validate(t *testing.T) { + tests := []struct { + name string + args []string + clusterKubeConfig string + wait time.Duration + wantCompleteErr bool + wantValidateErr bool + }{ + { + name: "args more than one", + args: []string{"member1", "member2"}, + wantCompleteErr: false, + wantValidateErr: true, + }, + { + name: "invalid cluster name", + args: []string{"member.1"}, + wantCompleteErr: false, + wantValidateErr: true, + }, + { + name: "empty clusterKubeConfig", + args: []string{"member1"}, + clusterKubeConfig: "", + wantCompleteErr: false, + wantValidateErr: true, + }, + { + name: "negative wait time", + args: []string{"member1"}, + clusterKubeConfig: "./kube/config", + wait: -1, + wantCompleteErr: false, + wantValidateErr: true, + }, + { + name: "normal case", + args: []string{"member1"}, + clusterKubeConfig: "./kube/config", + wait: 60 * time.Second, + wantCompleteErr: false, + wantValidateErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + j := &CommandUnregisterOption{ + ClusterKubeConfig: tt.clusterKubeConfig, + Wait: tt.wait, + } + + err := j.Complete(tt.args) + if (err == nil && tt.wantCompleteErr) || (err != nil && !tt.wantCompleteErr) { + t.Errorf("Complete() error = %v, wantCompleteErr %v", err, tt.wantCompleteErr) + } + + err = j.Validate(tt.args) + if (err == nil && tt.wantValidateErr) || (err != nil && !tt.wantValidateErr) { + t.Errorf("Validate() error = %v, wantValidateErr %v", err, tt.wantValidateErr) + } + }) + } +} + +func TestCommandUnregisterOption_getKarmadaAgentConfig(t *testing.T) { + agentConfig := register.CreateBasic("http://127.0.0.1:5443", clusterName, "test", nil) + agentConfigBytes, _ := clientcmd.Write(*agentConfig) + agentConfigSecret := createSecret(agentConfigSecretName, namespace, agentConfigKeyName, agentConfigBytes) + + tests := []struct { + name string + mountPath string + karmadaConfigPath string + clusterResources []runtime.Object + wantErr bool + }{ + { + name: "common case", + mountPath: "/etc/karmada/config", + karmadaConfigPath: "/etc/karmada/config/karmada.config", + clusterResources: []runtime.Object{agentConfigSecret}, + wantErr: false, + }, + { + name: "mount path end up with a extra / symbol", + mountPath: "/etc/karmada/config/", + karmadaConfigPath: "/etc/karmada/config/karmada.config", + clusterResources: []runtime.Object{agentConfigSecret}, + wantErr: false, + }, + { + name: "agent config secret not found", + mountPath: "/etc/karmada/config", + karmadaConfigPath: "/etc/karmada/config/karmada.config", + wantErr: true, + }, + { + name: "agent config secret exist but has a invalid key name", + mountPath: "/etc/karmada/config", + karmadaConfigPath: "/etc/karmada/config/karmada-config", + clusterResources: []runtime.Object{agentConfigSecret}, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + j := &CommandUnregisterOption{ + Namespace: namespace, + MemberClusterClient: fake.NewSimpleClientset(tt.clusterResources...), + } + agent := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{Name: register.KarmadaAgentName, Namespace: namespace}, + Spec: appsv1.DeploymentSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Command: []string{ + "/bin/karmada-agent", + "--karmada-kubeconfig=" + tt.karmadaConfigPath, + }, + VolumeMounts: []corev1.VolumeMount{{ + Name: agentSecretVolumeName, + MountPath: tt.mountPath, + }}, + }}, + Volumes: []corev1.Volume{{ + Name: agentSecretVolumeName, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{SecretName: agentConfigSecretName}, + }, + }}, + }, + }, + }, + } + _, err := j.getKarmadaAgentConfig(agent) + if (err == nil && tt.wantErr) || (err != nil && !tt.wantErr) { + t.Errorf("getSpecifiedKarmadaContext() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func TestCommandUnregisterOption_RunUnregisterCluster(t *testing.T) { + tests := []struct { + name string + clusterObject []runtime.Object + clusterResources []runtime.Object + wantErr bool + }{ + { + name: "cluster object not exist", + clusterObject: []runtime.Object{}, + clusterResources: []runtime.Object{}, + wantErr: true, + }, + { + name: "cluster exist, but cluster resources not found", + clusterObject: []runtime.Object{&clusterv1alpha1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: clusterName}}}, + clusterResources: []runtime.Object{}, + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + j := &CommandUnregisterOption{ + ClusterName: clusterName, + Namespace: namespace, + ClusterNamespace: clusterNamespace, + Wait: 60 * time.Second, + } + j.ControlPlaneClient = fakekarmadaclient.NewSimpleClientset(tt.clusterObject...) + j.MemberClusterClient = fake.NewSimpleClientset(tt.clusterResources...) + err := j.RunUnregisterCluster() + if (err == nil && tt.wantErr) || (err != nil && !tt.wantErr) { + t.Errorf("RunUnregisterCluster() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func createSecret(secretName, secretNamespace, keyName string, value []byte) *corev1.Secret { + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Name: secretName, Namespace: secretNamespace}, + Data: map[string][]byte{ + keyName: value, + }, + } +} diff --git a/pkg/karmadactl/util/apiclient/apiclient.go b/pkg/karmadactl/util/apiclient/apiclient.go index d5194970f412..c29dfed8d7ca 100644 --- a/pkg/karmadactl/util/apiclient/apiclient.go +++ b/pkg/karmadactl/util/apiclient/apiclient.go @@ -98,12 +98,12 @@ func NewClientSet(c *rest.Config) (*kubernetes.Clientset, error) { } // NewCRDsClient is to create a clientset ClientSet -func NewCRDsClient(c *rest.Config) (*clientset.Clientset, error) { +func NewCRDsClient(c *rest.Config) (clientset.Interface, error) { return clientset.NewForConfig(c) } // NewAPIRegistrationClient is to create an apiregistration ClientSet -func NewAPIRegistrationClient(c *rest.Config) (*aggregator.Clientset, error) { +func NewAPIRegistrationClient(c *rest.Config) (aggregator.Interface, error) { return aggregator.NewForConfig(c) } diff --git a/pkg/karmadactl/util/cluster.go b/pkg/karmadactl/util/cluster.go new file mode 100644 index 000000000000..f5da3f4fab35 --- /dev/null +++ b/pkg/karmadactl/util/cluster.go @@ -0,0 +1,151 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "context" + "fmt" + "time" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + kubeclient "k8s.io/client-go/kubernetes" + "k8s.io/klog/v2" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + karmadaclientset "github.com/karmada-io/karmada/pkg/generated/clientset/versioned" + "github.com/karmada-io/karmada/pkg/util" + "github.com/karmada-io/karmada/pkg/util/names" +) + +// DeleteClusterObject deletes the cluster object from the Karmada control plane. +func DeleteClusterObject(controlPlaneKubeClient kubeclient.Interface, controlPlaneKarmadaClient karmadaclientset.Interface, clusterName string, + timeout time.Duration, dryRun bool, forceDeletion bool) error { + if dryRun { + return nil + } + + err := controlPlaneKarmadaClient.ClusterV1alpha1().Clusters().Delete(context.TODO(), clusterName, metav1.DeleteOptions{}) + if apierrors.IsNotFound(err) { + return fmt.Errorf("no cluster object %s found in karmada control Plane", clusterName) + } + if err != nil { + klog.Errorf("Failed to delete cluster object. cluster name: %s, error: %v", clusterName, err) + return err + } + + // make sure the given cluster object has been deleted. + // If the operation times out and `forceDeletion` is true, then force deletion begins, which involves sequentially deleting the `work`, `executionSpace`, and `cluster` finalizers. + err = wait.PollUntilContextTimeout(context.TODO(), 1*time.Second, timeout, false, func(context.Context) (done bool, err error) { + _, err = controlPlaneKarmadaClient.ClusterV1alpha1().Clusters().Get(context.TODO(), clusterName, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + return true, nil + } + if err != nil { + klog.Errorf("Failed to get cluster %s. err: %v", clusterName, err) + return false, err + } + klog.Infof("Waiting for the cluster object %s to be deleted", clusterName) + return false, nil + }) + + // If the Cluster object not be deleted within the timeout period, it is likely due to the resources in the member + // cluster can not be cleaned up. With the option force deletion, we will try to clean up the Cluster object by + // removing the finalizers from related resources. This behavior may result in some resources remain in the member + // clusters. + if err != nil && forceDeletion { + klog.Warningf("Deleting the cluster object timed out. cluster name: %s, error: %v", clusterName, err) + klog.Infof("Start forced deletion. cluster name: %s", clusterName) + executionSpaceName := names.GenerateExecutionSpaceName(clusterName) + err = removeWorkFinalizer(executionSpaceName, controlPlaneKarmadaClient) + if err != nil { + klog.Errorf("Force deletion. Failed to remove the finalizer of Work, error: %v", err) + } + + err = removeExecutionSpaceFinalizer(executionSpaceName, controlPlaneKubeClient) + if err != nil { + klog.Errorf("Force deletion. Failed to remove the finalizer of Namespace(%s), error: %v", executionSpaceName, err) + } + + err = removeClusterFinalizer(clusterName, controlPlaneKarmadaClient) + if err != nil { + klog.Errorf("Force deletion. Failed to remove the finalizer of Cluster(%s), error: %v", clusterName, err) + } + + klog.Infof("Forced deletion is complete.") + return nil + } + + return err +} + +// removeWorkFinalizer removes the finalizer of works in the executionSpace. +func removeWorkFinalizer(executionSpaceName string, controlPlaneKarmadaClient karmadaclientset.Interface) error { + list, err := controlPlaneKarmadaClient.WorkV1alpha1().Works(executionSpaceName).List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return fmt.Errorf("failed to list work in executionSpace %s", executionSpaceName) + } + + for i := range list.Items { + work := &list.Items[i] + if !controllerutil.ContainsFinalizer(work, util.ExecutionControllerFinalizer) { + continue + } + controllerutil.RemoveFinalizer(work, util.ExecutionControllerFinalizer) + _, err = controlPlaneKarmadaClient.WorkV1alpha1().Works(executionSpaceName).Update(context.TODO(), work, metav1.UpdateOptions{}) + if err != nil { + return fmt.Errorf("failed to remove the finalizer of work(%s/%s)", executionSpaceName, work.GetName()) + } + } + return nil +} + +// removeExecutionSpaceFinalizer removes the finalizer of executionSpace. +func removeExecutionSpaceFinalizer(executionSpaceName string, controlPlaneKubeClient kubeclient.Interface) error { + executionSpace, err := controlPlaneKubeClient.CoreV1().Namespaces().Get(context.TODO(), executionSpaceName, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get Namespace(%s)", executionSpaceName) + } + + if !controllerutil.ContainsFinalizer(executionSpace, string(corev1.FinalizerKubernetes)) { + return nil + } + + controllerutil.RemoveFinalizer(executionSpace, "kubernetes") + _, err = controlPlaneKubeClient.CoreV1().Namespaces().Update(context.TODO(), executionSpace, metav1.UpdateOptions{}) + + return err +} + +// removeClusterFinalizer removes the finalizer of cluster object. +func removeClusterFinalizer(clusterName string, controlPlaneKarmadaClient karmadaclientset.Interface) error { + cluster, err := controlPlaneKarmadaClient.ClusterV1alpha1().Clusters().Get(context.TODO(), clusterName, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get Cluster(%s)", clusterName) + } + + if !controllerutil.ContainsFinalizer(cluster, util.ClusterControllerFinalizer) { + return nil + } + + controllerutil.RemoveFinalizer(cluster, util.ClusterControllerFinalizer) + _, err = controlPlaneKarmadaClient.ClusterV1alpha1().Clusters().Update(context.TODO(), cluster, metav1.UpdateOptions{}) + + return err +} diff --git a/pkg/karmadactl/util/command_group.go b/pkg/karmadactl/util/command_group.go index 4899d4f4f32d..c9eb67ed26fa 100644 --- a/pkg/karmadactl/util/command_group.go +++ b/pkg/karmadactl/util/command_group.go @@ -36,4 +36,10 @@ const ( // GroupAdvancedCommands means the command belongs to Group "Advanced Commands" GroupAdvancedCommands = "Advanced Commands" + + // GroupSettingsCommands means the command belongs to Group "Settings Commands" + GroupSettingsCommands = "Settings Commands" + + // GroupOtherCommands means the command belongs to Group "Other Commands" + GroupOtherCommands = "Other Commands" ) diff --git a/pkg/karmadactl/util/completion/completion.go b/pkg/karmadactl/util/completion/completion.go new file mode 100644 index 000000000000..374fa591288c --- /dev/null +++ b/pkg/karmadactl/util/completion/completion.go @@ -0,0 +1,505 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package completion + +import ( + "bytes" + "context" + "fmt" + "io" + "os" + "strings" + "time" + + "github.com/spf13/cobra" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/cli-runtime/pkg/genericiooptions" + "k8s.io/cli-runtime/pkg/printers" + "k8s.io/kubectl/pkg/cmd/apiresources" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + "k8s.io/kubectl/pkg/polymorphichelpers" + "k8s.io/kubectl/pkg/scheme" + + "github.com/karmada-io/karmada/pkg/karmadactl/get" + "github.com/karmada-io/karmada/pkg/karmadactl/options" + "github.com/karmada-io/karmada/pkg/karmadactl/util" +) + +var factory util.Factory + +// SetFactoryForCompletion Store the factory which is needed by the completion functions. +// Not all commands have access to the factory, so cannot pass it to the completion functions. +func SetFactoryForCompletion(f util.Factory) { + factory = f +} + +// ResourceTypeAndNameCompletionFunc Returns a completion function that completes resource types +// and resource names that match the toComplete prefix. It supports the / form. +func ResourceTypeAndNameCompletionFunc(f util.Factory) func(*cobra.Command, []string, string) ([]string, cobra.ShellCompDirective) { + return resourceTypeAndNameCompletionFunc(f, nil, true) +} + +// SpecifiedResourceTypeAndNameCompletionFunc Returns a completion function that completes resource +// types limited to the specified allowedTypes, and resource names that match the toComplete prefix. +// It allows for multiple resources. It supports the / form. +func SpecifiedResourceTypeAndNameCompletionFunc(f util.Factory, allowedTypes []string) func(*cobra.Command, []string, string) ([]string, cobra.ShellCompDirective) { + return resourceTypeAndNameCompletionFunc(f, allowedTypes, true) +} + +// ResourceNameCompletionFunc Returns a completion function that completes as a first argument +// the resource names specified by the resourceType parameter, and which match the toComplete prefix. +// This function does NOT support the / form: it is meant to be used by commands +// that don't support that form. For commands that apply to pods and that support the / +// form, please use PodResourceNameCompletionFunc() +func ResourceNameCompletionFunc(f util.Factory, resourceType string) func(*cobra.Command, []string, string) ([]string, cobra.ShellCompDirective) { + return func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + var comps []string + if len(args) == 0 { + comps = CompGetResource(f, cmd, resourceType, toComplete) + } + return comps, cobra.ShellCompDirectiveNoFileComp + } +} + +// PodResourceNameCompletionFunc Returns a completion function that completes: +// 1- pod names that match the toComplete prefix +// 2- resource types containing pods which match the toComplete prefix +func PodResourceNameCompletionFunc(f util.Factory) func(*cobra.Command, []string, string) ([]string, cobra.ShellCompDirective) { + return func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + var comps []string + directive := cobra.ShellCompDirectiveNoFileComp + if len(args) == 0 { + comps, directive = doPodResourceCompletion(f, cmd, toComplete) + } + return comps, directive + } +} + +// PodResourceNameAndContainerCompletionFunc Returns a completion function that completes, as a first argument: +// 1- pod names that match the toComplete prefix +// 2- resource types containing pods which match the toComplete prefix +// and as a second argument the containers within the specified pod. +func PodResourceNameAndContainerCompletionFunc(f util.Factory) func(*cobra.Command, []string, string) ([]string, cobra.ShellCompDirective) { + return func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + var comps []string + directive := cobra.ShellCompDirectiveNoFileComp + if len(args) == 0 { + comps, directive = doPodResourceCompletion(f, cmd, toComplete) + } else if len(args) == 1 { + podName := convertResourceNameToPodName(f, args[0]) + comps = CompGetContainers(f, cmd, podName, toComplete) + } + return comps, directive + } +} + +// ContainerCompletionFunc Returns a completion function that completes the containers within the +// pod specified by the first argument. The resource containing the pod can be specified in +// the / form. +func ContainerCompletionFunc(f util.Factory) func(*cobra.Command, []string, string) ([]string, cobra.ShellCompDirective) { + return func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + var comps []string + // We need the pod name to be able to complete the container names, it must be in args[0]. + // That first argument can also be of the form / so we need to convert it. + if len(args) > 0 { + podName := convertResourceNameToPodName(f, args[0]) + comps = CompGetContainers(f, cmd, podName, toComplete) + } + return comps, cobra.ShellCompDirectiveNoFileComp + } +} + +// CompGetResource gets the list of the resource specified which begin with `toComplete`. +func CompGetResource(f util.Factory, cmd *cobra.Command, resourceName string, toComplete string) []string { + template := "{{ range .items }}{{ .metadata.name }} {{ end }}" + return CompGetFromTemplate(&template, f, cmd, "", []string{resourceName}, toComplete) +} + +// CompGetContainers gets the list of containers of the specified pod which begin with `toComplete`. +func CompGetContainers(f util.Factory, cmd *cobra.Command, podName string, toComplete string) []string { + template := "{{ range .spec.initContainers }}{{ .name }} {{end}}{{ range .spec.containers }}{{ .name }} {{ end }}" + return CompGetFromTemplate(&template, f, cmd, "", []string{"pod", podName}, toComplete) +} + +// CompGetFromTemplate executes a Get operation using the specified template and args and returns the results +// which begin with `toComplete`. +func CompGetFromTemplate(template *string, f util.Factory, cmd *cobra.Command, namespace string, args []string, toComplete string) []string { + buf := new(bytes.Buffer) + streams := genericiooptions.IOStreams{In: os.Stdin, Out: buf, ErrOut: io.Discard} + o := get.NewCommandGetOptions(streams) + + // Get the list of names of the specified resource + o.PrintFlags.TemplateFlags.GoTemplatePrintFlags.TemplateArgument = template + format := "go-template" + o.PrintFlags.OutputFormat = &format + + // Do the steps Complete() would have done. + // We cannot actually call Complete() or Validate() as these function check for + // the presence of flags, which, in our case won't be there + if namespace != "" { + o.Namespace = namespace + o.ExplicitNamespace = true + } else { + var err error + o.Namespace, o.ExplicitNamespace, err = f.ToRawKubeConfigLoader().Namespace() + if err != nil { + return nil + } + } + + o.ToPrinter = func(_ *meta.RESTMapping, _ *bool, _ bool, _ bool) (printers.ResourcePrinterFunc, error) { + printer, err := o.PrintFlags.ToPrinter() + if err != nil { + return nil, err + } + return printer.PrintObj, nil + } + + o.OperationScope = options.KarmadaControlPlane + // currently, the operation-scope of command `top`, `logs` and `promote` is members. + if cmd.Annotations["parent"] == "top" || cmd.Name() == "logs" || cmd.Name() == "promote" { + o.OperationScope = options.Members + } + operationScopeFlag := cmd.Flag("operation-scope") + if operationScopeFlag != nil { + o.OperationScope = options.OperationScope(operationScopeFlag.Value.String()) + } + o.Clusters, _ = cmd.Flags().GetStringSlice("clusters") + clusterFlag := cmd.Flag("cluster") + if clusterFlag != nil { + cluster := clusterFlag.Value.String() + if len(cluster) != 0 { + o.Clusters = []string{cluster} + } + } + + o.KarmadaClient, _ = f.KarmadaClientSet() + if err := o.HandleClusterScopeFlags(); err != nil { + return nil + } + + if err := o.Run(f, args); err != nil { + return nil + } + var comps []string + resources := strings.Split(buf.String(), " ") + for _, res := range resources { + if res != "" && strings.HasPrefix(res, toComplete) { + comps = append(comps, res) + } + } + return comps +} + +// ListContextsInConfig returns a list of context names which begin with `toComplete` +func ListContextsInConfig(toComplete string) []string { + config, err := factory.ToRawKubeConfigLoader().RawConfig() + if err != nil { + return nil + } + var ret []string + for name := range config.Contexts { + if strings.HasPrefix(name, toComplete) { + ret = append(ret, name) + } + } + return ret +} + +// ListClustersInConfig returns a list of cluster names which begin with `toComplete` +func ListClustersInConfig(toComplete string) []string { + set, err := factory.KarmadaClientSet() + if err != nil { + return nil + } + + list, err := set.ClusterV1alpha1().Clusters().List(context.Background(), metav1.ListOptions{}) + if err != nil { + return nil + } + + var ret []string + for _, cluster := range list.Items { + if strings.HasPrefix(cluster.Name, toComplete) { + ret = append(ret, cluster.Name) + } + } + return ret +} + +// compGetResourceList returns the list of api resources which begin with `toComplete`. +func compGetResourceList(restClientGetter genericclioptions.RESTClientGetter, cmd *cobra.Command, toComplete string) []string { + buf := new(bytes.Buffer) + streams := genericiooptions.IOStreams{In: os.Stdin, Out: buf, ErrOut: io.Discard} + + // TODO: Using karmadactlapiresources.CommandAPIResourcesOptions to adapt to the operation scope. + o := apiresources.NewAPIResourceOptions(streams) + + if err := o.Complete(restClientGetter, cmd, nil); err != nil { + return nil + } + + // Get the list of resources + o.Output = "name" + o.Cached = true + o.Verbs = []string{"get"} + // TODO:Should set --request-timeout=5s + + // Ignore errors as the output may still be valid + if err := o.RunAPIResources(); err != nil { + return nil + } + + // Resources can be a comma-separated list. The last element is then + // the one we should complete. For example if toComplete=="pods,secre" + // we should return "pods,secrets" + prefix := "" + suffix := toComplete + lastIdx := strings.LastIndex(toComplete, ",") + if lastIdx != -1 { + prefix = toComplete[0 : lastIdx+1] + suffix = toComplete[lastIdx+1:] + } + var comps []string + resources := strings.Split(buf.String(), "\n") + for _, res := range resources { + if res != "" && strings.HasPrefix(res, suffix) { + comps = append(comps, fmt.Sprintf("%s%s", prefix, res)) + } + } + return comps +} + +// resourceTypeAndNameCompletionFunc Returns a completion function that completes resource types +// and resource names that match the toComplete prefix. It supports the / form. +// +//nolint:gocyclo +func resourceTypeAndNameCompletionFunc(f util.Factory, allowedTypes []string, allowRepeat bool) func(*cobra.Command, []string, string) ([]string, cobra.ShellCompDirective) { + return func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + var comps []string + directive := cobra.ShellCompDirectiveNoFileComp + + if len(args) > 0 && !strings.Contains(args[0], "/") { + // The first argument is of the form (e.g., pods) + // All following arguments should be a resource name. + if allowRepeat || len(args) == 1 { + comps = CompGetResource(f, cmd, args[0], toComplete) + + // Remove choices already on the command-line + if len(args) > 1 { + comps = cmdutil.Difference(comps, args[1:]) + } + } + } else { + slashIdx := strings.Index(toComplete, "/") + if slashIdx == -1 { + if len(args) == 0 { + // We are completing the first argument. We default to the normal + // form (not the form /). + // So we suggest resource types and let the shell add a space after + // the completion. + if len(allowedTypes) == 0 { + comps = compGetResourceList(f, cmd, toComplete) + } else { + for _, c := range allowedTypes { + if strings.HasPrefix(c, toComplete) { + comps = append(comps, c) + } + } + } + } else { + // Here we know the first argument contains a / (/). + // All other arguments must also use that form. + if allowRepeat { + // Since toComplete does not already contain a / we know we are completing a + // resource type. Disable adding a space after the completion, and add the / + directive |= cobra.ShellCompDirectiveNoSpace + + if len(allowedTypes) == 0 { + typeComps := compGetResourceList(f, cmd, toComplete) + for _, c := range typeComps { + comps = append(comps, fmt.Sprintf("%s/", c)) + } + } else { + for _, c := range allowedTypes { + if strings.HasPrefix(c, toComplete) { + comps = append(comps, fmt.Sprintf("%s/", c)) + } + } + } + } + } + } else { + // We are completing an argument of the form / + // and since the / is already present, we are completing the resource name. + if allowRepeat || len(args) == 0 { + resourceType := toComplete[:slashIdx] + toComplete = toComplete[slashIdx+1:] + nameComps := CompGetResource(f, cmd, resourceType, toComplete) + for _, c := range nameComps { + comps = append(comps, fmt.Sprintf("%s/%s", resourceType, c)) + } + + // Remove choices already on the command-line. + if len(args) > 0 { + comps = cmdutil.Difference(comps, args[0:]) + } + } + } + } + return comps, directive + } +} + +// doPodResourceCompletion Returns completions of: +// 1- pod names that match the toComplete prefix +// 2- resource types containing pods which match the toComplete prefix +func doPodResourceCompletion(f util.Factory, cmd *cobra.Command, toComplete string) ([]string, cobra.ShellCompDirective) { + var comps []string + directive := cobra.ShellCompDirectiveNoFileComp + slashIdx := strings.Index(toComplete, "/") + if slashIdx == -1 { + // Standard case, complete pod names + comps = CompGetResource(f, cmd, "pod", toComplete) + + // Also include resource choices for the / form, + // but only for resources that contain pods + resourcesWithPods := []string{ + "daemonsets", + "deployments", + "pods", + "jobs", + "replicasets", + "replicationcontrollers", + "services", + "statefulsets"} + + if len(comps) == 0 { + // If there are no pods to complete, we will only be completing + // /. We should disable adding a space after the /. + directive |= cobra.ShellCompDirectiveNoSpace + } + + for _, resource := range resourcesWithPods { + if strings.HasPrefix(resource, toComplete) { + comps = append(comps, fmt.Sprintf("%s/", resource)) + } + } + } else { + // Dealing with the / form, use the specified resource type + resourceType := toComplete[:slashIdx] + toComplete = toComplete[slashIdx+1:] + nameComps := CompGetResource(f, cmd, resourceType, toComplete) + for _, c := range nameComps { + comps = append(comps, fmt.Sprintf("%s/%s", resourceType, c)) + } + } + return comps, directive +} + +// convertResourceNameToPodName Converts a resource name to a pod name. +// If the resource name is of the form /, we use +// polymorphichelpers.AttachablePodForObjectFn(), if not, the resource name +// is already a pod name. +func convertResourceNameToPodName(f cmdutil.Factory, resourceName string) string { + var podName string + if !strings.Contains(resourceName, "/") { + // When we don't have the / form, the resource name is the pod name + podName = resourceName + } else { + // if the resource name is of the form /, we need to convert it to a pod name + ns, _, err := f.ToRawKubeConfigLoader().Namespace() + if err != nil { + return "" + } + + resourceWithPod, err := f.NewBuilder(). + WithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...). + ContinueOnError(). + NamespaceParam(ns).DefaultNamespace(). + ResourceNames("pods", resourceName). + Do().Object() + if err != nil { + return "" + } + + // For shell completion, use a short timeout + forwardablePod, err := polymorphichelpers.AttachablePodForObjectFn(f, resourceWithPod, 100*time.Millisecond) + if err != nil { + return "" + } + podName = forwardablePod.Name + } + return podName +} + +// RegisterCompletionFuncForNamespaceFlag registers CompletionFunc for flag namespace. +func RegisterCompletionFuncForNamespaceFlag(cmd *cobra.Command, f util.Factory) { + cmdutil.CheckErr(cmd.RegisterFlagCompletionFunc( + "namespace", + func(cmd *cobra.Command, _ []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return CompGetResource(f, cmd, "namespace", toComplete), cobra.ShellCompDirectiveNoFileComp + })) +} + +// RegisterCompletionFuncForClusterFlag registers CompletionFunc for flag cluster. +func RegisterCompletionFuncForClusterFlag(cmd *cobra.Command) { + cmdutil.CheckErr(cmd.RegisterFlagCompletionFunc( + "cluster", + func(_ *cobra.Command, _ []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return ListClustersInConfig(toComplete), cobra.ShellCompDirectiveNoFileComp + })) +} + +// RegisterCompletionFuncForClustersFlag registers CompletionFunc for flag clusters. +func RegisterCompletionFuncForClustersFlag(cmd *cobra.Command) { + cmdutil.CheckErr(cmd.RegisterFlagCompletionFunc( + "clusters", + func(_ *cobra.Command, _ []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return ListClustersInConfig(toComplete), cobra.ShellCompDirectiveNoFileComp + })) +} + +// RegisterCompletionFuncForKarmadaContextFlag registers CompletionFunc for flag karmada-context. +func RegisterCompletionFuncForKarmadaContextFlag(cmd *cobra.Command) { + cmdutil.CheckErr(cmd.RegisterFlagCompletionFunc( + "karmada-context", + func(_ *cobra.Command, _ []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return ListContextsInConfig(toComplete), cobra.ShellCompDirectiveNoFileComp + })) +} + +// RegisterCompletionFuncForOperationScopeFlag registers CompletionFunc for flag operation-scope. +func RegisterCompletionFuncForOperationScopeFlag(cmd *cobra.Command, supportScope ...options.OperationScope) { + cmdutil.CheckErr(cmd.RegisterFlagCompletionFunc( + "operation-scope", + func(_ *cobra.Command, _ []string, toComplete string) ([]string, cobra.ShellCompDirective) { + var ret []string + + if len(supportScope) == 0 { + supportScope = []options.OperationScope{options.KarmadaControlPlane, options.Members, options.All} + } + for _, scope := range supportScope { + if strings.HasPrefix(scope.String(), toComplete) { + ret = append(ret, scope.String()) + } + } + return ret, cobra.ShellCompDirectiveNoFileComp + })) +} diff --git a/pkg/karmadactl/util/idempotency.go b/pkg/karmadactl/util/idempotency.go index 5a0337b4086d..3762c55ac93e 100644 --- a/pkg/karmadactl/util/idempotency.go +++ b/pkg/karmadactl/util/idempotency.go @@ -96,7 +96,7 @@ func CreateOrUpdateDeployment(client kubernetes.Interface, deploy *appsv1.Deploy // CreateOrUpdateAPIService creates a ApiService if the target resource doesn't exist. // If the resource exists already, this function will update the resource instead. -func CreateOrUpdateAPIService(apiRegistrationClient *aggregator.Clientset, apiservice *apiregistrationv1.APIService) error { +func CreateOrUpdateAPIService(apiRegistrationClient aggregator.Interface, apiservice *apiregistrationv1.APIService) error { if _, err := apiRegistrationClient.ApiregistrationV1().APIServices().Create(context.TODO(), apiservice, metav1.CreateOptions{}); err != nil { if !apierrors.IsAlreadyExists(err) { return fmt.Errorf("unable to create APIService: %v", err) diff --git a/pkg/karmadactl/util/validate.go b/pkg/karmadactl/util/validate.go index e387eebba503..10e026046b33 100644 --- a/pkg/karmadactl/util/validate.go +++ b/pkg/karmadactl/util/validate.go @@ -39,7 +39,7 @@ func VerifyClustersExist(input []string, clusters *clusterv1alpha1.ClusterList) } } if len(nonExistClusters) != 0 { - return fmt.Errorf("clusters don't exist: " + strings.Join(nonExistClusters, ",")) + return fmt.Errorf("clusters don't exist: %s", strings.Join(nonExistClusters, ",")) } return nil diff --git a/pkg/modeling/modeling.go b/pkg/modeling/modeling.go index 5564a1bebcb4..9e4842cae0a2 100644 --- a/pkg/modeling/modeling.go +++ b/pkg/modeling/modeling.go @@ -163,7 +163,7 @@ func (rs *ResourceSummary) clusterResourceNodeComparator(a, b interface{}) int { func (rs *ResourceSummary) AddToResourceSummary(crn ClusterResourceNode) { index := rs.getIndex(crn) if index == -1 { - klog.Error("ClusterResource can not add to resource summary: index is invalid.") + klog.Errorf("Failed to add node to resource summary due to no appropriate grade. ClusterResourceNode:%v", crn) return } modeling := &(*rs).RMs[index] diff --git a/pkg/registry/cluster/storage/aggregate.go b/pkg/registry/cluster/storage/aggregate.go index 8ab773ceb5a5..1af8466d5c59 100644 --- a/pkg/registry/cluster/storage/aggregate.go +++ b/pkg/registry/cluster/storage/aggregate.go @@ -149,7 +149,8 @@ func requestWithResourceNameHandlerFunc( klog.Errorf("failed to get impersonateToken for cluster %s: %v", cluster.Name, err) return } - statusCode, err := doClusterRequest(req.Method, requestURLStr(location.String(), proxyRequestInfo), transport, requester, impersonateToken) + statusCode, err := doClusterRequest(http.MethodGet, requestURLStr(location, proxyRequestInfo), transport, + requester, impersonateToken) if err != nil { klog.Errorf("failed to do request for cluster %s: %v", cluster.Name, err) return @@ -358,7 +359,7 @@ func doClusterRequest( } // requestURLStr returns the request resource url string. -func requestURLStr(urlStr string, requestInfo *request.RequestInfo) string { +func requestURLStr(location *url.URL, requestInfo *request.RequestInfo) string { parts := []string{requestInfo.APIPrefix} if requestInfo.APIGroup != "" { parts = append(parts, requestInfo.APIGroup) @@ -377,7 +378,7 @@ func requestURLStr(urlStr string, requestInfo *request.RequestInfo) string { requestInfo.Subresource != "exec" && requestInfo.Subresource != "log" { parts = append(parts, requestInfo.Subresource) } - return fmt.Sprintf("%s/%s", urlStr, strings.Join(parts, "/")) + return location.ResolveReference(&url.URL{Path: strings.Join(parts, "/")}).String() } func setRequestHeader(req *http.Request, userInfo user.Info, impersonateToken string) { diff --git a/pkg/registry/cluster/storage/aggregate_test.go b/pkg/registry/cluster/storage/aggregate_test.go new file mode 100644 index 000000000000..378df7b0ee46 --- /dev/null +++ b/pkg/registry/cluster/storage/aggregate_test.go @@ -0,0 +1,67 @@ +/* +Copyright 2022 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "net/http" + "net/url" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/karmada-io/karmada/pkg/util/lifted" +) + +func TestRequestURL(t *testing.T) { + tests := []struct { + name string + urlString string + request http.Request + want string + }{ + { + name: "without slash in the end", + urlString: "https://0.0.0.0:6443", + request: http.Request{ + Method: "GET", + URL: &url.URL{ + Path: "/api/v1/namespaces/test/pods/", + }, + }, + want: "https://0.0.0.0:6443/api/v1/namespaces/test/pods", + }, + { + name: "with slash in the end", + urlString: "https://0.0.0.0:6443/", + request: http.Request{ + Method: "GET", + URL: &url.URL{ + Path: "/api/v1/namespaces/test/pods/", + }, + }, + want: "https://0.0.0.0:6443/api/v1/namespaces/test/pods", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + proxyRequestInfo := lifted.NewRequestInfo(&tt.request) + location, _ := url.Parse(tt.urlString) + requestURL := requestURLStr(location, proxyRequestInfo) + require.Equal(t, tt.want, requestURL) + }) + } +} diff --git a/pkg/registry/search/storage/proxy_test.go b/pkg/registry/search/storage/proxy_test.go new file mode 100644 index 000000000000..ec710063f710 --- /dev/null +++ b/pkg/registry/search/storage/proxy_test.go @@ -0,0 +1,103 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "context" + _ "net/http" + _ "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" + genericrequest "k8s.io/apiserver/pkg/endpoints/request" + _ "k8s.io/apiserver/pkg/registry/rest" + + searchapis "github.com/karmada-io/karmada/pkg/apis/search" + "github.com/karmada-io/karmada/pkg/search/proxy" +) + +func TestNewProxyingREST(t *testing.T) { + ctl := &proxy.Controller{} + r := NewProxyingREST(ctl) + assert.NotNil(t, r) + assert.Equal(t, ctl, r.ctl) +} + +func TestProxyingREST_New(t *testing.T) { + ctl := &proxy.Controller{} + r := NewProxyingREST(ctl) + obj := r.New() + assert.NotNil(t, obj) + _, ok := obj.(*searchapis.Proxying) + assert.True(t, ok) +} + +func TestProxyingREST_NamespaceScoped(t *testing.T) { + ctl := &proxy.Controller{} + r := NewProxyingREST(ctl) + assert.False(t, r.NamespaceScoped()) +} + +func TestProxyingREST_ConnectMethods(t *testing.T) { + ctl := &proxy.Controller{} + r := NewProxyingREST(ctl) + methods := r.ConnectMethods() + assert.Equal(t, proxyMethods, methods) +} + +func TestProxyingREST_NewConnectOptions(t *testing.T) { + ctl := &proxy.Controller{} + r := NewProxyingREST(ctl) + obj, ok, s := r.NewConnectOptions() + assert.Nil(t, obj) + assert.True(t, ok) + assert.Equal(t, "", s) +} + +func TestProxyingREST_Connect(t *testing.T) { + ctl := &proxy.Controller{} + r := NewProxyingREST(ctl) + + t.Run("Test missing RequestInfo in context", func(t *testing.T) { + ctx := context.Background() + _, err := r.Connect(ctx, "", nil, nil) + assert.NotNil(t, err) + assert.EqualError(t, err, "no RequestInfo found in the context") + }) + + t.Run("Test invalid RequestInfo parts", func(t *testing.T) { + ctx := genericrequest.WithRequestInfo(context.Background(), &genericrequest.RequestInfo{ + Parts: []string{"proxying"}, + }) + _, err := r.Connect(ctx, "", nil, nil) + assert.NotNil(t, err) + assert.EqualError(t, err, "invalid requestInfo parts: [proxying]") + }) +} + +func TestProxyingREST_Destroy(_ *testing.T) { + ctl := &proxy.Controller{} + r := NewProxyingREST(ctl) + r.Destroy() +} + +func TestProxyingREST_GetSingularName(t *testing.T) { + ctl := &proxy.Controller{} + r := NewProxyingREST(ctl) + name := r.GetSingularName() + assert.Equal(t, "proxying", name) +} diff --git a/pkg/resourceinterpreter/OWNERS b/pkg/resourceinterpreter/OWNERS index 95f66b2b3224..2907c80f8de4 100644 --- a/pkg/resourceinterpreter/OWNERS +++ b/pkg/resourceinterpreter/OWNERS @@ -1,6 +1,5 @@ reviewers: - chaunceyjiang -- iawia002 - ikaven1024 - XiShanYongYe-Chang approvers: diff --git a/pkg/resourceinterpreter/customized/declarative/luavm/lua_convert.go b/pkg/resourceinterpreter/customized/declarative/luavm/lua_convert.go index a24023e60fc8..36b085dca1a1 100644 --- a/pkg/resourceinterpreter/customized/declarative/luavm/lua_convert.go +++ b/pkg/resourceinterpreter/customized/declarative/luavm/lua_convert.go @@ -52,7 +52,7 @@ func ConvertLuaResultInto(luaResult *lua.LTable, obj interface{}, references ... // but if a field is empty struct, it will be encoded into empty slice format as '[]' (root cause is empty lua.LTable // can not be distinguished from empty slice or empty struct). // - // Supposing an object contains empty fileds, like following one has an empty slice field and an empty struct field. + // Supposing an object contains empty fields, like following one has an empty slice field and an empty struct field. // e.g: struct{one-filed: {}, another-field: []} // // When it is converted to lua.LTable, empty slice and empty struct are all converted to lua.LTable{}, which can't be distinguished. @@ -195,7 +195,7 @@ func traverseToFindEmptyField(root gjson.Result, fieldPath []string) (sets.Set[s // 3. when traverse to the field `spec.dd.ee`, we got an empty slice, but it also exists in `fieldOfEmptyStruct`, // so, it originally is struct too, we add it into `fieldOfEmptySliceToStruct` variable. // 4. when traverse to the field `spec.dd.ff`, we got an empty slice, but it not exists in either map variable, -// so, it orinally not exist, we can't judge whether it is struct, so we add it into `fieldOfEmptySliceToDelete` variable to remove it. +// so, it originally not exist, we can't judge whether it is struct, so we add it into `fieldOfEmptySliceToDelete` variable to remove it. // // So, finally, fieldOfEmptySliceToStruct={"spec.aa", "spec.dd.ee"}, fieldOfEmptySliceToDelete={"spec.dd.ff"} func traverseToFindEmptyFieldNeededModify(root gjson.Result, fieldPath, fieldPathWithArrayIndex []string, fieldOfEmptySlice, fieldOfEmptyStruct sets.Set[string]) (sets.Set[string], sets.Set[string]) { diff --git a/pkg/resourceinterpreter/default/native/dependencies.go b/pkg/resourceinterpreter/default/native/dependencies.go index 94aaf2ba22fb..0e6095059672 100644 --- a/pkg/resourceinterpreter/default/native/dependencies.go +++ b/pkg/resourceinterpreter/default/native/dependencies.go @@ -27,6 +27,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" mcsv1alpha1 "sigs.k8s.io/mcs-api/pkg/apis/v1alpha1" configv1alpha1 "github.com/karmada-io/karmada/pkg/apis/config/v1alpha1" @@ -132,7 +133,36 @@ func getStatefulSetDependencies(object *unstructured.Unstructured) ([]configv1al return nil, err } - return helper.GetDependenciesFromPodTemplate(podObj) + deps, err := helper.GetDependenciesFromPodTemplate(podObj) + if err != nil { + return nil, err + } + + if len(statefulSetObj.Spec.VolumeClaimTemplates) == 0 { + return deps, nil + } + + // ignore the PersistentVolumeClaim dependency if it was created by the StatefulSet VolumeClaimTemplates + // the PVC dependency is not needed because the StatefulSet will manage the pvcs in the member cluster, + // if it exists here it was just a placeholder not a real PVC + var validDeps []configv1alpha1.DependentObjectReference + volumeClaimTemplateNames := sets.Set[string]{} + for i := range statefulSetObj.Spec.VolumeClaimTemplates { + volumeClaimTemplateNames.Insert(statefulSetObj.Spec.VolumeClaimTemplates[i].Name) + } + + for i := range deps { + if deps[i].Kind != util.PersistentVolumeClaimKind { + validDeps = append(validDeps, deps[i]) + continue + } + if volumeClaimTemplateNames.Has(deps[i].Name) { + continue + } + validDeps = append(validDeps, deps[i]) + } + + return validDeps, nil } func getIngressDependencies(object *unstructured.Unstructured) ([]configv1alpha1.DependentObjectReference, error) { diff --git a/pkg/resourceinterpreter/default/native/dependencies_test.go b/pkg/resourceinterpreter/default/native/dependencies_test.go index d9d5d310f1d4..94b6dc57c342 100644 --- a/pkg/resourceinterpreter/default/native/dependencies_test.go +++ b/pkg/resourceinterpreter/default/native/dependencies_test.go @@ -861,6 +861,39 @@ func Test_getStatefulSetDependencies(t *testing.T) { want: testPairs[2].dependentObjectReference, wantErr: false, }, + { + name: "statefulset with partial dependencies 4", + object: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "apps/v1", + "kind": "StatefulSet", + "metadata": map[string]interface{}{ + "name": "fake-statefulset", + "namespace": namespace, + }, + "spec": map[string]interface{}{ + "serviceName": "fake-service", + "selector": map[string]interface{}{ + "matchLabels": map[string]interface{}{ + "app": "fake", + }, + }, + "template": map[string]interface{}{ + "spec": testPairs[0].podSpecsWithDependencies.Object, + }, + "volumeClaimTemplates": []interface{}{ + map[string]interface{}{ + "metadata": map[string]interface{}{ + "name": "test-pvc", + }, + }, + }, + }, + }, + }, + want: testPairs[0].dependentObjectReference[:3], // remove the pvc dependency because it was found in the volumeClaimTemplates + wantErr: false, + }, } for i := range tests { diff --git a/pkg/resourceinterpreter/default/native/retain_test.go b/pkg/resourceinterpreter/default/native/retain_test.go index a72bbe78483e..e15b4cf74c28 100644 --- a/pkg/resourceinterpreter/default/native/retain_test.go +++ b/pkg/resourceinterpreter/default/native/retain_test.go @@ -559,13 +559,13 @@ func Test_retainPodFields(t *testing.T) { }}, observed: &corev1.Pod{Spec: corev1.PodSpec{ NodeName: "node1", - ServiceAccountName: "fake-obersved-sa", + ServiceAccountName: "fake-observed-sa", Volumes: []corev1.Volume{ { - Name: "fake-obersved-volume", + Name: "fake-observed-volume", VolumeSource: corev1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ - SecretName: "fake-obersved-secret", + SecretName: "fake-observed-secret", }, }, }, @@ -612,13 +612,13 @@ func Test_retainPodFields(t *testing.T) { }, want: &corev1.Pod{Spec: corev1.PodSpec{ NodeName: "node1", - ServiceAccountName: "fake-obersved-sa", + ServiceAccountName: "fake-observed-sa", Volumes: []corev1.Volume{ { - Name: "fake-obersved-volume", + Name: "fake-observed-volume", VolumeSource: corev1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ - SecretName: "fake-obersved-secret", + SecretName: "fake-observed-secret", }, }, }, diff --git a/pkg/resourceinterpreter/default/thirdparty/resourcecustomizations/apps.kruise.io/v1alpha1/DaemonSet/customizations.yaml b/pkg/resourceinterpreter/default/thirdparty/resourcecustomizations/apps.kruise.io/v1alpha1/DaemonSet/customizations.yaml index e01bbde7d282..8e4dd65cd984 100644 --- a/pkg/resourceinterpreter/default/thirdparty/resourcecustomizations/apps.kruise.io/v1alpha1/DaemonSet/customizations.yaml +++ b/pkg/resourceinterpreter/default/thirdparty/resourcecustomizations/apps.kruise.io/v1alpha1/DaemonSet/customizations.yaml @@ -1,166 +1,166 @@ -apiVersion: config.karmada.io/v1alpha1 -kind: ResourceInterpreterCustomization -metadata: - name: declarative-configuration-daemonset -spec: - target: - apiVersion: apps.kruise.io/v1alpha1 - kind: DaemonSet - customizations: - statusAggregation: - luaScript: > - function AggregateStatus(desiredObj, statusItems) - if desiredObj.status == nil then - desiredObj.status = {} - end - if desiredObj.metadata.generation == nil then - desiredObj.metadata.generation = 0 - end - - if desiredObj.status.observedGeneration == nil then - desiredObj.status.observedGeneration = 0 - end - - -- Initialize status fields if status doest not exist - -- If the DaemonSet is not spread to any cluster, its status also should be aggregated - if statusItems == nil then - desiredObj.status.observedGeneration = desiredObj.metadata.generation - desiredObj.status.currentNumberScheduled = 0 - desiredObj.status.numberMisscheduled = 0 - desiredObj.status.desiredNumberScheduled = 0 - desiredObj.status.numberReady = 0 - desiredObj.status.updatedNumberScheduled = 0 - desiredObj.status.numberAvailable = 0 - desiredObj.status.numberUnavailable = 0 - desiredObj.status.daemonSetHash = 0 - return desiredObj - end - - local generation = desiredObj.metadata.generation - local observedGeneration = desiredObj.status.observedGeneration - local currentNumberScheduled = 0 - local numberMisscheduled = 0 - local desiredNumberScheduled = 0 - local numberReady = 0 - local updatedNumberScheduled = 0 - local numberAvailable = 0 - local numberUnavailable = 0 - local daemonSetHash = 0 - - -- Count all members that their status is updated to the latest generation - local observedResourceTemplateGenerationCount = 0 - for i = 1, #statusItems do - if statusItems[i].status ~= nil and statusItems[i].status.currentNumberScheduled ~= nil then - currentNumberScheduled = currentNumberScheduled + statusItems[i].status.currentNumberScheduled - end - if statusItems[i].status ~= nil and statusItems[i].status.numberMisscheduled ~= nil then - numberMisscheduled = numberMisscheduled + statusItems[i].status.numberMisscheduled - end - if statusItems[i].status ~= nil and statusItems[i].status.desiredNumberScheduled ~= nil then - desiredNumberScheduled = desiredNumberScheduled + statusItems[i].status.desiredNumberScheduled - end - if statusItems[i].status ~= nil and statusItems[i].status.numberReady ~= nil then - numberReady = numberReady + statusItems[i].status.numberReady - end - if statusItems[i].status ~= nil and statusItems[i].status.updatedNumberScheduled ~= nil then - updatedNumberScheduled = updatedNumberScheduled + statusItems[i].status.updatedNumberScheduled - end - if statusItems[i].status ~= nil and statusItems[i].status.numberAvailable ~= nil then - numberAvailable = numberAvailable + statusItems[i].status.numberAvailable - end - if statusItems[i].status ~= nil and statusItems[i].status.numberUnavailable ~= nil then - numberUnavailable = numberUnavailable + statusItems[i].status.numberUnavailable - end - if statusItems[i].status ~= nil and statusItems[i].status.daemonSetHash ~= nil and statusItems[i].status.daemonSetHash ~= '' then - daemonSetHash = statusItems[i].status.daemonSetHash - end - - -- Check if the member's status is updated to the latest generation - local resourceTemplateGeneration = 0 - if statusItems[i].status ~= nil and statusItems[i].status.resourceTemplateGeneration ~= nil then - resourceTemplateGeneration = statusItems[i].status.resourceTemplateGeneration - end - local memberGeneration = 0 - if statusItems[i].status ~= nil and statusItems[i].status.generation ~= nil then - memberGeneration = statusItems[i].status.generation - end - local memberObservedGeneration = 0 - if statusItems[i].status ~= nil and statusItems[i].status.observedGeneration ~= nil then - memberObservedGeneration = statusItems[i].status.observedGeneration - end - if resourceTemplateGeneration == generation and memberGeneration == memberObservedGeneration then - observedResourceTemplateGenerationCount = observedResourceTemplateGenerationCount + 1 - end - end - - -- Update the observed generation based on the observedResourceTemplateGenerationCount - if observedResourceTemplateGenerationCount == #statusItems then - desiredObj.status.observedGeneration = generation - else - desiredObj.status.observedGeneration = observedGeneration - end - - desiredObj.status.currentNumberScheduled = currentNumberScheduled - desiredObj.status.numberMisscheduled = numberMisscheduled - desiredObj.status.desiredNumberScheduled = desiredNumberScheduled - desiredObj.status.numberReady = numberReady - desiredObj.status.updatedNumberScheduled = updatedNumberScheduled - desiredObj.status.numberAvailable = numberAvailable - desiredObj.status.numberUnavailable = numberUnavailable - desiredObj.status.daemonSetHash = daemonSetHash - return desiredObj - end - statusReflection: - luaScript: > - function ReflectStatus(observedObj) - local status = {} - if observedObj == nil or observedObj.status == nil then - return status - end - status.observedGeneration = observedObj.status.observedGeneration - status.currentNumberScheduled = observedObj.status.currentNumberScheduled - status.numberMisscheduled = observedObj.status.numberMisscheduled - status.desiredNumberScheduled = observedObj.status.desiredNumberScheduled - status.numberReady = observedObj.status.numberReady - status.updatedNumberScheduled = observedObj.status.updatedNumberScheduled - status.numberAvailable = observedObj.status.numberAvailable - status.numberUnavailable = observedObj.status.numberUnavailable - status.daemonSetHash = observedObj.status.daemonSetHash - - -- handle member resource generation report - if observedObj.metadata == nil then - return status - end - status.generation = observedObj.metadata.generation - - -- handle resource template generation report - if observedObj.metadata.annotations == nil then - return status - end - local resourceTemplateGeneration = tonumber(observedObj.metadata.annotations["resourcetemplate.karmada.io/generation"]) - if resourceTemplateGeneration ~= nil then - status.resourceTemplateGeneration = resourceTemplateGeneration - end - return status - end - healthInterpretation: - luaScript: > - function InterpretHealth(observedObj) - if observedObj.status.observedGeneration ~= observedObj.metadata.generation then - return false - end - if observedObj.status.updatedNumberScheduled < observedObj.status.desiredNumberScheduled then - return false - end - if observedObj.status.numberAvailable < observedObj.status.updatedNumberScheduled then - return false - end - return true - end - dependencyInterpretation: - luaScript: > - local kube = require("kube") - function GetDependencies(desiredObj) - refs = kube.getPodDependencies(desiredObj.spec.template, desiredObj.metadata.namespace) - return refs - end +apiVersion: config.karmada.io/v1alpha1 +kind: ResourceInterpreterCustomization +metadata: + name: declarative-configuration-daemonset +spec: + target: + apiVersion: apps.kruise.io/v1alpha1 + kind: DaemonSet + customizations: + statusAggregation: + luaScript: > + function AggregateStatus(desiredObj, statusItems) + if desiredObj.status == nil then + desiredObj.status = {} + end + if desiredObj.metadata.generation == nil then + desiredObj.metadata.generation = 0 + end + + if desiredObj.status.observedGeneration == nil then + desiredObj.status.observedGeneration = 0 + end + + -- Initialize status fields if status doest not exist + -- If the DaemonSet is not spread to any cluster, its status also should be aggregated + if statusItems == nil then + desiredObj.status.observedGeneration = desiredObj.metadata.generation + desiredObj.status.currentNumberScheduled = 0 + desiredObj.status.numberMisscheduled = 0 + desiredObj.status.desiredNumberScheduled = 0 + desiredObj.status.numberReady = 0 + desiredObj.status.updatedNumberScheduled = 0 + desiredObj.status.numberAvailable = 0 + desiredObj.status.numberUnavailable = 0 + desiredObj.status.daemonSetHash = 0 + return desiredObj + end + + local generation = desiredObj.metadata.generation + local observedGeneration = desiredObj.status.observedGeneration + local currentNumberScheduled = 0 + local numberMisscheduled = 0 + local desiredNumberScheduled = 0 + local numberReady = 0 + local updatedNumberScheduled = 0 + local numberAvailable = 0 + local numberUnavailable = 0 + local daemonSetHash = 0 + + -- Count all members that their status is updated to the latest generation + local observedResourceTemplateGenerationCount = 0 + for i = 1, #statusItems do + if statusItems[i].status ~= nil and statusItems[i].status.currentNumberScheduled ~= nil then + currentNumberScheduled = currentNumberScheduled + statusItems[i].status.currentNumberScheduled + end + if statusItems[i].status ~= nil and statusItems[i].status.numberMisscheduled ~= nil then + numberMisscheduled = numberMisscheduled + statusItems[i].status.numberMisscheduled + end + if statusItems[i].status ~= nil and statusItems[i].status.desiredNumberScheduled ~= nil then + desiredNumberScheduled = desiredNumberScheduled + statusItems[i].status.desiredNumberScheduled + end + if statusItems[i].status ~= nil and statusItems[i].status.numberReady ~= nil then + numberReady = numberReady + statusItems[i].status.numberReady + end + if statusItems[i].status ~= nil and statusItems[i].status.updatedNumberScheduled ~= nil then + updatedNumberScheduled = updatedNumberScheduled + statusItems[i].status.updatedNumberScheduled + end + if statusItems[i].status ~= nil and statusItems[i].status.numberAvailable ~= nil then + numberAvailable = numberAvailable + statusItems[i].status.numberAvailable + end + if statusItems[i].status ~= nil and statusItems[i].status.numberUnavailable ~= nil then + numberUnavailable = numberUnavailable + statusItems[i].status.numberUnavailable + end + if statusItems[i].status ~= nil and statusItems[i].status.daemonSetHash ~= nil and statusItems[i].status.daemonSetHash ~= '' then + daemonSetHash = statusItems[i].status.daemonSetHash + end + + -- Check if the member's status is updated to the latest generation + local resourceTemplateGeneration = 0 + if statusItems[i].status ~= nil and statusItems[i].status.resourceTemplateGeneration ~= nil then + resourceTemplateGeneration = statusItems[i].status.resourceTemplateGeneration + end + local memberGeneration = 0 + if statusItems[i].status ~= nil and statusItems[i].status.generation ~= nil then + memberGeneration = statusItems[i].status.generation + end + local memberObservedGeneration = 0 + if statusItems[i].status ~= nil and statusItems[i].status.observedGeneration ~= nil then + memberObservedGeneration = statusItems[i].status.observedGeneration + end + if resourceTemplateGeneration == generation and memberGeneration == memberObservedGeneration then + observedResourceTemplateGenerationCount = observedResourceTemplateGenerationCount + 1 + end + end + + -- Update the observed generation based on the observedResourceTemplateGenerationCount + if observedResourceTemplateGenerationCount == #statusItems then + desiredObj.status.observedGeneration = generation + else + desiredObj.status.observedGeneration = observedGeneration + end + + desiredObj.status.currentNumberScheduled = currentNumberScheduled + desiredObj.status.numberMisscheduled = numberMisscheduled + desiredObj.status.desiredNumberScheduled = desiredNumberScheduled + desiredObj.status.numberReady = numberReady + desiredObj.status.updatedNumberScheduled = updatedNumberScheduled + desiredObj.status.numberAvailable = numberAvailable + desiredObj.status.numberUnavailable = numberUnavailable + desiredObj.status.daemonSetHash = daemonSetHash + return desiredObj + end + statusReflection: + luaScript: > + function ReflectStatus(observedObj) + local status = {} + if observedObj == nil or observedObj.status == nil then + return status + end + status.observedGeneration = observedObj.status.observedGeneration + status.currentNumberScheduled = observedObj.status.currentNumberScheduled + status.numberMisscheduled = observedObj.status.numberMisscheduled + status.desiredNumberScheduled = observedObj.status.desiredNumberScheduled + status.numberReady = observedObj.status.numberReady + status.updatedNumberScheduled = observedObj.status.updatedNumberScheduled + status.numberAvailable = observedObj.status.numberAvailable + status.numberUnavailable = observedObj.status.numberUnavailable + status.daemonSetHash = observedObj.status.daemonSetHash + + -- handle member resource generation report + if observedObj.metadata == nil then + return status + end + status.generation = observedObj.metadata.generation + + -- handle resource template generation report + if observedObj.metadata.annotations == nil then + return status + end + local resourceTemplateGeneration = tonumber(observedObj.metadata.annotations["resourcetemplate.karmada.io/generation"]) + if resourceTemplateGeneration ~= nil then + status.resourceTemplateGeneration = resourceTemplateGeneration + end + return status + end + healthInterpretation: + luaScript: > + function InterpretHealth(observedObj) + if observedObj.status.observedGeneration ~= observedObj.metadata.generation then + return false + end + if observedObj.status.updatedNumberScheduled < observedObj.status.desiredNumberScheduled then + return false + end + if observedObj.status.numberAvailable < observedObj.status.updatedNumberScheduled then + return false + end + return true + end + dependencyInterpretation: + luaScript: > + local kube = require("kube") + function GetDependencies(desiredObj) + refs = kube.getPodDependencies(desiredObj.spec.template, desiredObj.metadata.namespace) + return refs + end diff --git a/pkg/resourceinterpreter/default/thirdparty/resourcecustomizations/kyverno.io/v1/Policy/testdata/desired-policy.yaml b/pkg/resourceinterpreter/default/thirdparty/resourcecustomizations/kyverno.io/v1/Policy/testdata/desired-policy.yaml index f69acb336662..3f6fe0b0be33 100644 --- a/pkg/resourceinterpreter/default/thirdparty/resourcecustomizations/kyverno.io/v1/Policy/testdata/desired-policy.yaml +++ b/pkg/resourceinterpreter/default/thirdparty/resourcecustomizations/kyverno.io/v1/Policy/testdata/desired-policy.yaml @@ -1,20 +1,20 @@ -apiVersion: kyverno.io/v1 -kind: Policy -metadata: - name: sample - namespace: test-policy -spec: - validationFailureAction: Enforce - rules: - - name: require-pod-purpose-label - match: - any: - - resources: - kinds: - - Pod - validate: - message: "You must have label `purpose` with value `production` set on all new Pod in test-policy Namespace." - pattern: - metadata: - labels: - purpose: production +apiVersion: kyverno.io/v1 +kind: Policy +metadata: + name: sample + namespace: test-policy +spec: + validationFailureAction: Enforce + rules: + - name: require-pod-purpose-label + match: + any: + - resources: + kinds: + - Pod + validate: + message: "You must have label `purpose` with value `production` set on all new Pod in test-policy Namespace." + pattern: + metadata: + labels: + purpose: production diff --git a/pkg/resourceinterpreter/default/thirdparty/resourcecustomizations/kyverno.io/v1/Policy/testdata/observed-policy.yaml b/pkg/resourceinterpreter/default/thirdparty/resourcecustomizations/kyverno.io/v1/Policy/testdata/observed-policy.yaml index b972ee9804c4..64050a5e4237 100644 --- a/pkg/resourceinterpreter/default/thirdparty/resourcecustomizations/kyverno.io/v1/Policy/testdata/observed-policy.yaml +++ b/pkg/resourceinterpreter/default/thirdparty/resourcecustomizations/kyverno.io/v1/Policy/testdata/observed-policy.yaml @@ -1,86 +1,86 @@ -apiVersion: kyverno.io/v1 -kind: Policy -metadata: - name: sample - namespace: test-policy -spec: - validationFailureAction: Enforce - rules: - - name: require-pod-purpose-label - match: - any: - - resources: - kinds: - - Pod - validate: - message: "You must have label `purpose` with value `production` set on all new Pod in test-policy Namespace." - pattern: - metadata: - labels: - purpose: production -status: - autogen: - rules: - - exclude: - resources: {} - generate: - clone: {} - cloneList: {} - match: - any: - - resources: - kinds: - - DaemonSet - - Deployment - - Job - - StatefulSet - - ReplicaSet - - ReplicationController - resources: {} - mutate: {} - name: autogen-require-pod-purpose-label - validate: - message: You must have label `purpose` with value `production` set on all - new Pod in test-policy Namespace. - pattern: - spec: - template: - metadata: - labels: - purpose: production - - exclude: - resources: {} - generate: - clone: {} - cloneList: {} - match: - any: - - resources: - kinds: - - CronJob - resources: {} - mutate: {} - name: autogen-cronjob-require-pod-purpose-label - validate: - message: You must have label `purpose` with value `production` set on all - new Pod in test-policy Namespace. - pattern: - spec: - jobTemplate: - spec: - template: - metadata: - labels: - purpose: production - conditions: - - lastTransitionTime: "2023-05-07T09:19:06Z" - message: "" - reason: Succeeded - status: "True" - type: Ready - ready: true - rulecount: - generate: 0 - mutate: 0 - validate: 1 - verifyimages: 0 +apiVersion: kyverno.io/v1 +kind: Policy +metadata: + name: sample + namespace: test-policy +spec: + validationFailureAction: Enforce + rules: + - name: require-pod-purpose-label + match: + any: + - resources: + kinds: + - Pod + validate: + message: "You must have label `purpose` with value `production` set on all new Pod in test-policy Namespace." + pattern: + metadata: + labels: + purpose: production +status: + autogen: + rules: + - exclude: + resources: {} + generate: + clone: {} + cloneList: {} + match: + any: + - resources: + kinds: + - DaemonSet + - Deployment + - Job + - StatefulSet + - ReplicaSet + - ReplicationController + resources: {} + mutate: {} + name: autogen-require-pod-purpose-label + validate: + message: You must have label `purpose` with value `production` set on all + new Pod in test-policy Namespace. + pattern: + spec: + template: + metadata: + labels: + purpose: production + - exclude: + resources: {} + generate: + clone: {} + cloneList: {} + match: + any: + - resources: + kinds: + - CronJob + resources: {} + mutate: {} + name: autogen-cronjob-require-pod-purpose-label + validate: + message: You must have label `purpose` with value `production` set on all + new Pod in test-policy Namespace. + pattern: + spec: + jobTemplate: + spec: + template: + metadata: + labels: + purpose: production + conditions: + - lastTransitionTime: "2023-05-07T09:19:06Z" + message: "" + reason: Succeeded + status: "True" + type: Ready + ready: true + rulecount: + generate: 0 + mutate: 0 + validate: 1 + verifyimages: 0 diff --git a/pkg/resourceinterpreter/default/thirdparty/resourcecustomizations/kyverno.io/v1/Policy/testdata/status-file.yaml b/pkg/resourceinterpreter/default/thirdparty/resourcecustomizations/kyverno.io/v1/Policy/testdata/status-file.yaml index 17cda3cd42db..0f16a7b8ce39 100644 --- a/pkg/resourceinterpreter/default/thirdparty/resourcecustomizations/kyverno.io/v1/Policy/testdata/status-file.yaml +++ b/pkg/resourceinterpreter/default/thirdparty/resourcecustomizations/kyverno.io/v1/Policy/testdata/status-file.yaml @@ -1,119 +1,119 @@ -applied: true -clusterName: member2 -health: Healthy -status: - autogen: - rules: - - exclude: [] - generate: [] - match: - any: - - resources: - kinds: - - DaemonSet - - Deployment - - Job - - StatefulSet - - ReplicaSet - - ReplicationController - name: autogen-require-pod-purpose-label - validate: - message: You must have label `purpose` with value `production` set on - all new Pod in test-policy Namespace. - pattern: - spec: - template: - metadata: - labels: - purpose: production - - exclude: [] - generate: [] - match: - any: - - resources: - kinds: - - CronJob - name: autogen-cronjob-require-pod-purpose-label - validate: - message: You must have label `purpose` with value `production` set on - all new Pod in test-policy Namespace. - pattern: - spec: - jobTemplate: - spec: - template: - metadata: - labels: - purpose: production - conditions: - - lastTransitionTime: "2023-05-07T09:19:06Z" - message: "" - reason: Succeeded - status: "True" - type: Ready - ready: true - rulecount: - generate: 0 - mutate: 0 - validate: 1 - verifyimages: 0 ---- -applied: true -clusterName: member3 -health: Healthy -status: - autogen: - rules: - - exclude: [] - generate: [] - match: - any: - - resources: - kinds: - - DaemonSet - - Deployment - - Job - - StatefulSet - - ReplicaSet - - ReplicationController - name: autogen-require-pod-purpose-label - validate: - message: You must have label `purpose` with value `production` set on - all new Pod in test-policy Namespace. - pattern: - spec: - template: - metadata: - labels: - purpose: production - - exclude: [] - generate: [] - match: - any: - - resources: - kinds: - - CronJob - name: autogen-cronjob-require-pod-purpose-label - validate: - message: You must have label `purpose` with value `production` set on - all new Pod in test-policy Namespace. - pattern: - spec: - jobTemplate: - spec: - template: - metadata: - labels: - purpose: production - conditions: - - lastTransitionTime: "2023-05-07T09:19:06Z" - message: "" - reason: Succeeded - status: "True" - type: Ready - ready: true - rulecount: - generate: 0 - mutate: 0 - validate: 1 - verifyimages: 0 +applied: true +clusterName: member2 +health: Healthy +status: + autogen: + rules: + - exclude: [] + generate: [] + match: + any: + - resources: + kinds: + - DaemonSet + - Deployment + - Job + - StatefulSet + - ReplicaSet + - ReplicationController + name: autogen-require-pod-purpose-label + validate: + message: You must have label `purpose` with value `production` set on + all new Pod in test-policy Namespace. + pattern: + spec: + template: + metadata: + labels: + purpose: production + - exclude: [] + generate: [] + match: + any: + - resources: + kinds: + - CronJob + name: autogen-cronjob-require-pod-purpose-label + validate: + message: You must have label `purpose` with value `production` set on + all new Pod in test-policy Namespace. + pattern: + spec: + jobTemplate: + spec: + template: + metadata: + labels: + purpose: production + conditions: + - lastTransitionTime: "2023-05-07T09:19:06Z" + message: "" + reason: Succeeded + status: "True" + type: Ready + ready: true + rulecount: + generate: 0 + mutate: 0 + validate: 1 + verifyimages: 0 +--- +applied: true +clusterName: member3 +health: Healthy +status: + autogen: + rules: + - exclude: [] + generate: [] + match: + any: + - resources: + kinds: + - DaemonSet + - Deployment + - Job + - StatefulSet + - ReplicaSet + - ReplicationController + name: autogen-require-pod-purpose-label + validate: + message: You must have label `purpose` with value `production` set on + all new Pod in test-policy Namespace. + pattern: + spec: + template: + metadata: + labels: + purpose: production + - exclude: [] + generate: [] + match: + any: + - resources: + kinds: + - CronJob + name: autogen-cronjob-require-pod-purpose-label + validate: + message: You must have label `purpose` with value `production` set on + all new Pod in test-policy Namespace. + pattern: + spec: + jobTemplate: + spec: + template: + metadata: + labels: + purpose: production + conditions: + - lastTransitionTime: "2023-05-07T09:19:06Z" + message: "" + reason: Succeeded + status: "True" + type: Ready + ready: true + rulecount: + generate: 0 + mutate: 0 + validate: 1 + verifyimages: 0 diff --git a/pkg/scheduler/cache/cache_test.go b/pkg/scheduler/cache/cache_test.go new file mode 100644 index 000000000000..8694bd693e00 --- /dev/null +++ b/pkg/scheduler/cache/cache_test.go @@ -0,0 +1,256 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/sets" + + clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1" + clusterlister "github.com/karmada-io/karmada/pkg/generated/listers/cluster/v1alpha1" +) + +func TestNewCache(t *testing.T) { + tests := []struct { + name string + clusterLister clusterlister.ClusterLister + }{ + { + name: "Create cache with empty mock lister", + clusterLister: &mockClusterLister{}, + }, + { + name: "Create cache with non-empty mock lister", + clusterLister: &mockClusterLister{clusters: []*clusterv1alpha1.Cluster{{}}}, + }, + { + name: "Create cache with nil lister", + clusterLister: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cache := NewCache(tt.clusterLister) + + assert.NotNil(t, cache, "NewCache() returned nil cache") + + // Assert that the cache is of the correct type + sc, ok := cache.(*schedulerCache) + assert.True(t, ok, "NewCache() did not return a *schedulerCache") + + // Assert that the clusterLister is correctly set + assert.Equal(t, tt.clusterLister, sc.clusterLister, "clusterLister not set correctly") + }) + } +} + +func TestSnapshot(t *testing.T) { + tests := []struct { + name string + clusters []*clusterv1alpha1.Cluster + wantTotal int + wantReady int + wantReadyNames sets.Set[string] + }{ + { + name: "empty cluster list", + clusters: []*clusterv1alpha1.Cluster{}, + wantTotal: 0, + wantReady: 0, + wantReadyNames: sets.New[string](), + }, + { + name: "mixed ready and not ready clusters", + clusters: []*clusterv1alpha1.Cluster{ + { + ObjectMeta: metav1.ObjectMeta{Name: "cluster1"}, + Status: clusterv1alpha1.ClusterStatus{Conditions: []metav1.Condition{{Type: clusterv1alpha1.ClusterConditionReady, Status: metav1.ConditionTrue}}}, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "cluster2"}, + Status: clusterv1alpha1.ClusterStatus{Conditions: []metav1.Condition{{Type: clusterv1alpha1.ClusterConditionReady, Status: metav1.ConditionFalse}}}, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "cluster3"}, + Status: clusterv1alpha1.ClusterStatus{Conditions: []metav1.Condition{{Type: clusterv1alpha1.ClusterConditionReady, Status: metav1.ConditionTrue}}}, + }, + }, + wantTotal: 3, + wantReady: 2, + wantReadyNames: sets.New("cluster1", "cluster3"), + }, + { + name: "all ready clusters", + clusters: []*clusterv1alpha1.Cluster{ + { + ObjectMeta: metav1.ObjectMeta{Name: "cluster1"}, + Status: clusterv1alpha1.ClusterStatus{Conditions: []metav1.Condition{{Type: clusterv1alpha1.ClusterConditionReady, Status: metav1.ConditionTrue}}}, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "cluster2"}, + Status: clusterv1alpha1.ClusterStatus{Conditions: []metav1.Condition{{Type: clusterv1alpha1.ClusterConditionReady, Status: metav1.ConditionTrue}}}, + }, + }, + wantTotal: 2, + wantReady: 2, + wantReadyNames: sets.New("cluster1", "cluster2"), + }, + { + name: "all not ready clusters", + clusters: []*clusterv1alpha1.Cluster{ + { + ObjectMeta: metav1.ObjectMeta{Name: "cluster1"}, + Status: clusterv1alpha1.ClusterStatus{Conditions: []metav1.Condition{{Type: clusterv1alpha1.ClusterConditionReady, Status: metav1.ConditionFalse}}}, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "cluster2"}, + Status: clusterv1alpha1.ClusterStatus{Conditions: []metav1.Condition{{Type: clusterv1alpha1.ClusterConditionReady, Status: metav1.ConditionFalse}}}, + }, + }, + wantTotal: 2, + wantReady: 0, + wantReadyNames: sets.New[string](), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mockLister := &mockClusterLister{clusters: tt.clusters} + cache := NewCache(mockLister) + snapshot := cache.Snapshot() + + assert.Equal(t, tt.wantTotal, snapshot.NumOfClusters(), "Incorrect number of total clusters") + assert.Equal(t, tt.wantTotal, len(snapshot.GetClusters()), "Incorrect number of clusters returned by GetClusters()") + assert.Equal(t, tt.wantReady, len(snapshot.GetReadyClusters()), "Incorrect number of ready clusters") + assert.Equal(t, tt.wantReadyNames, snapshot.GetReadyClusterNames(), "Incorrect ready cluster names") + + // Test GetCluster for existing clusters + for _, cluster := range tt.clusters { + gotCluster := snapshot.GetCluster(cluster.Name) + assert.NotNil(t, gotCluster, "GetCluster(%s) returned nil", cluster.Name) + assert.Equal(t, cluster, gotCluster.Cluster(), "GetCluster(%s) returned incorrect cluster", cluster.Name) + } + + // Test GetCluster for non-existent cluster + assert.Nil(t, snapshot.GetCluster("non-existent-cluster"), "GetCluster(non-existent-cluster) should return nil") + + // Verify that the snapshot is a deep copy + for i, cluster := range tt.clusters { + snapshotCluster := snapshot.GetClusters()[i].Cluster() + assert.Equal(t, cluster, snapshotCluster, "Snapshot cluster should be equal to original") + } + }) + } +} + +func TestNewEmptySnapshot(t *testing.T) { + snapshot := NewEmptySnapshot() + assert.Equal(t, 0, snapshot.NumOfClusters(), "New empty snapshot should have 0 clusters") + assert.Empty(t, snapshot.GetClusters(), "New empty snapshot should return empty cluster list") + assert.Empty(t, snapshot.GetReadyClusters(), "New empty snapshot should return empty ready cluster list") + assert.Empty(t, snapshot.GetReadyClusterNames(), "New empty snapshot should return empty ready cluster names") + assert.Nil(t, snapshot.GetCluster("any-cluster"), "GetCluster on empty snapshot should return nil") +} + +func TestAddUpdateDeleteCluster(t *testing.T) { + tests := []struct { + name string + action func(*schedulerCache, *clusterv1alpha1.Cluster) + }{ + { + name: "AddCluster", + action: func(cache *schedulerCache, cluster *clusterv1alpha1.Cluster) { + cache.AddCluster(cluster) + }, + }, + { + name: "UpdateCluster", + action: func(cache *schedulerCache, cluster *clusterv1alpha1.Cluster) { + cache.UpdateCluster(cluster) + }, + }, + { + name: "DeleteCluster", + action: func(cache *schedulerCache, cluster *clusterv1alpha1.Cluster) { + cache.DeleteCluster(cluster) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mockLister := &mockClusterLister{} + cache := NewCache(mockLister).(*schedulerCache) + + cluster := &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Name: "test-cluster"}, + } + + tt.action(cache, cluster) + + // Verify that the action doesn't modify the cache + assert.Empty(t, mockLister.clusters, "SchedulerCache.%s() modified the cache, which it shouldn't", tt.name) + }) + } +} + +func TestSnapshotError(t *testing.T) { + // mock error + mockError := errors.New("mock list error") + + mockLister := &mockClusterLister{ + clusters: nil, + err: mockError, + } + + cache := NewCache(mockLister) + + snapshot := cache.Snapshot() + + // Assert that the snapshot is empty + assert.Equal(t, 0, snapshot.NumOfClusters(), "Snapshot should be empty when there's an error") + assert.Empty(t, snapshot.GetClusters(), "Snapshot should have no clusters when there's an error") +} + +// Mock Implementations + +type mockClusterLister struct { + clusters []*clusterv1alpha1.Cluster + err error +} + +func (m *mockClusterLister) List(_ labels.Selector) ([]*clusterv1alpha1.Cluster, error) { + return m.clusters, m.err +} + +func (m *mockClusterLister) Get(name string) (*clusterv1alpha1.Cluster, error) { + if m.err != nil { + return nil, m.err + } + for _, cluster := range m.clusters { + if cluster.Name == name { + return cluster, nil + } + } + return nil, nil +} diff --git a/pkg/scheduler/core/common_test.go b/pkg/scheduler/core/common_test.go new file mode 100644 index 000000000000..26fee080292e --- /dev/null +++ b/pkg/scheduler/core/common_test.go @@ -0,0 +1,222 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package core + +import ( + "testing" + + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1" + policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" + workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2" + "github.com/karmada-io/karmada/pkg/scheduler/framework" +) + +func TestSelectClusters(t *testing.T) { + tests := []struct { + name string + clustersScore framework.ClusterScoreList + placement *policyv1alpha1.Placement + spec *workv1alpha2.ResourceBindingSpec + expectedResult []*clusterv1alpha1.Cluster + expectedError bool + }{ + { + name: "select all clusters", + clustersScore: framework.ClusterScoreList{ + {Cluster: &clusterv1alpha1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "cluster1"}}, Score: 10}, + {Cluster: &clusterv1alpha1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "cluster2"}}, Score: 20}, + }, + placement: &policyv1alpha1.Placement{}, + spec: &workv1alpha2.ResourceBindingSpec{}, + expectedResult: []*clusterv1alpha1.Cluster{ + {ObjectMeta: metav1.ObjectMeta{Name: "cluster1"}}, + {ObjectMeta: metav1.ObjectMeta{Name: "cluster2"}}, + }, + expectedError: false, + }, + { + name: "select top 1 cluster", + clustersScore: framework.ClusterScoreList{ + {Cluster: &clusterv1alpha1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "cluster1"}}, Score: 10}, + {Cluster: &clusterv1alpha1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "cluster2"}}, Score: 20}, + {Cluster: &clusterv1alpha1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "cluster3"}}, Score: 15}, + }, + placement: &policyv1alpha1.Placement{ + ClusterAffinity: &policyv1alpha1.ClusterAffinity{ + ClusterNames: []string{"cluster1", "cluster2", "cluster3"}, + }, + SpreadConstraints: []policyv1alpha1.SpreadConstraint{ + { + SpreadByField: policyv1alpha1.SpreadByFieldCluster, + MaxGroups: 1, + }, + }, + }, + spec: &workv1alpha2.ResourceBindingSpec{}, + expectedResult: []*clusterv1alpha1.Cluster{ + {ObjectMeta: metav1.ObjectMeta{Name: "cluster2"}}, + }, + expectedError: false, + }, + { + name: "select clusters with affinity", + clustersScore: framework.ClusterScoreList{ + {Cluster: &clusterv1alpha1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "cluster1"}}, Score: 10}, + {Cluster: &clusterv1alpha1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "cluster3"}}, Score: 15}, + }, + placement: &policyv1alpha1.Placement{ + ClusterAffinity: &policyv1alpha1.ClusterAffinity{ + ClusterNames: []string{"cluster1", "cluster3"}, + }, + }, + spec: &workv1alpha2.ResourceBindingSpec{}, + expectedResult: []*clusterv1alpha1.Cluster{ + {ObjectMeta: metav1.ObjectMeta{Name: "cluster1"}}, + {ObjectMeta: metav1.ObjectMeta{Name: "cluster3"}}, + }, + expectedError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := SelectClusters(tt.clustersScore, tt.placement, tt.spec) + + if tt.expectedError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, len(tt.expectedResult), len(result)) + + expectedNames := make([]string, len(tt.expectedResult)) + for i, cluster := range tt.expectedResult { + expectedNames[i] = cluster.Name + } + + actualNames := make([]string, len(result)) + for i, cluster := range result { + actualNames[i] = cluster.Name + } + + assert.ElementsMatch(t, expectedNames, actualNames) + } + }) + } +} + +func TestAssignReplicas(t *testing.T) { + tests := []struct { + name string + clusters []*clusterv1alpha1.Cluster + spec *workv1alpha2.ResourceBindingSpec + status *workv1alpha2.ResourceBindingStatus + expectedResult []workv1alpha2.TargetCluster + expectedError bool + expectedErrMsg string + }{ + { + name: "Assign replicas to single cluster", + clusters: []*clusterv1alpha1.Cluster{ + {ObjectMeta: metav1.ObjectMeta{Name: "cluster1"}}, + }, + spec: &workv1alpha2.ResourceBindingSpec{ + Replicas: 3, + Placement: &policyv1alpha1.Placement{ + ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{ + ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDivided, + ReplicaDivisionPreference: policyv1alpha1.ReplicaDivisionPreferenceWeighted, + }, + }, + }, + status: &workv1alpha2.ResourceBindingStatus{}, + expectedResult: []workv1alpha2.TargetCluster{{Name: "cluster1", Replicas: 3}}, + expectedError: false, + }, + { + name: "No clusters available", + clusters: []*clusterv1alpha1.Cluster{}, + spec: &workv1alpha2.ResourceBindingSpec{Replicas: 1}, + status: &workv1alpha2.ResourceBindingStatus{}, + expectedResult: nil, + expectedError: true, + expectedErrMsg: "no clusters available to schedule", + }, + { + name: "Non-workload scenario (zero replicas)", + clusters: []*clusterv1alpha1.Cluster{ + {ObjectMeta: metav1.ObjectMeta{Name: "cluster1"}}, + {ObjectMeta: metav1.ObjectMeta{Name: "cluster2"}}, + }, + spec: &workv1alpha2.ResourceBindingSpec{ + Replicas: 0, + }, + status: &workv1alpha2.ResourceBindingStatus{}, + expectedResult: []workv1alpha2.TargetCluster{{Name: "cluster1"}, {Name: "cluster2"}}, + expectedError: false, + }, + { + name: "Unsupported replica scheduling strategy", + clusters: []*clusterv1alpha1.Cluster{ + {Spec: clusterv1alpha1.ClusterSpec{ID: "cluster1"}}, + }, + spec: &workv1alpha2.ResourceBindingSpec{ + Replicas: 3, + Placement: &policyv1alpha1.Placement{ + ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{ + ReplicaSchedulingType: "UnsupportedType", + ReplicaDivisionPreference: "UnsupportedPreference", + }, + }, + }, + status: &workv1alpha2.ResourceBindingStatus{}, + expectedResult: nil, + expectedError: true, + expectedErrMsg: "unsupported replica scheduling strategy", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := AssignReplicas(tt.clusters, tt.spec, tt.status) + + if tt.expectedError { + assert.Error(t, err) + if tt.expectedErrMsg != "" { + assert.Contains(t, err.Error(), tt.expectedErrMsg) + } + } else { + assert.NoError(t, err) + assert.Equal(t, len(tt.expectedResult), len(result)) + + // Check if the total assigned replicas match the spec + totalReplicas := int32(0) + for i, cluster := range result { + assert.Equal(t, tt.expectedResult[i].Name, cluster.Name) + assert.Equal(t, tt.expectedResult[i].Replicas, cluster.Replicas) + totalReplicas += cluster.Replicas + } + + if tt.spec.Replicas > 0 { + assert.Equal(t, tt.spec.Replicas, totalReplicas) + } + } + }) + } +} diff --git a/pkg/scheduler/core/spreadconstraint/group_clusters.go b/pkg/scheduler/core/spreadconstraint/group_clusters.go index 893239416855..03227407695a 100644 --- a/pkg/scheduler/core/spreadconstraint/group_clusters.go +++ b/pkg/scheduler/core/spreadconstraint/group_clusters.go @@ -17,6 +17,8 @@ limitations under the License. package spreadconstraint import ( + "math" + "k8s.io/utils/ptr" clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1" @@ -40,7 +42,7 @@ type GroupClustersInfo struct { // ProviderInfo indicate the provider information type ProviderInfo struct { Name string - Score int64 // the highest score in all clusters of the provider + Score int64 // the comprehensive score in all clusters of the provider AvailableReplicas int64 // Regions under this provider @@ -54,7 +56,7 @@ type ProviderInfo struct { // RegionInfo indicate the region information type RegionInfo struct { Name string - Score int64 // the highest score in all clusters of the region + Score int64 // the comprehensive score in all clusters of the region AvailableReplicas int64 // Zones under this provider @@ -66,7 +68,7 @@ type RegionInfo struct { // ZoneInfo indicate the zone information type ZoneInfo struct { Name string - Score int64 // the highest score in all clusters of the zone + Score int64 // the comprehensive score in all clusters of the zone AvailableReplicas int64 // Clusters under this zone, sorted by cluster.Score descending. @@ -109,9 +111,9 @@ func groupClustersBasedTopology( } groupClustersInfo.calAvailableReplicasFunc = calAvailableReplicasFunc groupClustersInfo.generateClustersInfo(clustersScore, rbSpec) - groupClustersInfo.generateZoneInfo(spreadConstraints) - groupClustersInfo.generateRegionInfo(spreadConstraints) - groupClustersInfo.generateProviderInfo(spreadConstraints) + groupClustersInfo.generateZoneInfo(spreadConstraints, rbSpec) + groupClustersInfo.generateRegionInfo(spreadConstraints, rbSpec) + groupClustersInfo.generateProviderInfo(spreadConstraints, rbSpec) return groupClustersInfo } @@ -128,6 +130,205 @@ func groupClustersIgnoringTopology( return groupClustersInfo } +// weightUnit is used to minimize the impact of avg(cluster.Score). +// By multiply the weightUnit, the unit of targetReplica will be 1000, and the avg(cluster.Score) will in [0, 100]. +// Thus, when sorting by Group Score, targetReplica will be considered first, and if the Weights are the same, then Score will be considered. +const weightUnit int64 = 1000 + +func (info *GroupClustersInfo) calcGroupScoreForDuplicate( + clusters []ClusterDetailInfo, + rbSpec *workv1alpha2.ResourceBindingSpec) int64 { + targetReplica := int64(rbSpec.Replicas) + var validClusters int64 + // validClusters is the number of clusters that have available replicas. + var sumValidScore int64 + for _, cluster := range clusters { + if cluster.AvailableReplicas >= targetReplica { + validClusters++ + sumValidScore += cluster.Score + } + } + + // Here is an example, the rbSpec.Replicas == 50. + + // There is the Group 1, it has five clusters as follows. + // ---------------------------------------------------------------------- + // | clusterName | member1 | member2 | member3 | member4 | member5 | + // |--------------------------------------------------------------------- + // | score | 100 | 100 | 100 | 100 | 100 | + // |------------------------------------------------|---------|---------| + // |AvailableReplicas | 60 | 70 | 40 | 30 | 10 | + // |------------------------------------------------|---------|---------| + + // There is the Group 2, it has four clusters as follows. + // ------------------------------------------------------------ + // | clusterName | member1 | member2 | member3 | member4 | + // |----------------------------------------------------------- + // | score | 0 | 0 | 0 | 0 | + // |------------------------------------------------|---------| + // |AvailableReplicas | 60 | 60 | 60 | 60 | + // |------------------------------------------------|---------| + + // According to our expectations, Group 2 is a more ideal choice than Group 1, + // as the number of clusters in Group 2 that meet the Replica requirements + // for available copies is greater. Although the average Cluster.Score in Group 1 is higher, + // under the Duplicate replica allocation strategy, + // we prioritize whether the number of available replicas in each Cluster + // meets the Replica requirements. Based on our algorithm, the score for Group 2 + // is also higher than that of Group 1. + + // Group1's Score = 2 * 1000 + 100 = 2100 + // Group2's Score = 4 * 1000 + 0 = 4000 + + // There is another example, the rbSpec.Replicas == 50. + + // There is the Group 1, it has five clusters as follows. + // ---------------------------------------------------------------------- + // | clusterName | member1 | member2 | member3 | member4 | member5 | + // |--------------------------------------------------------------------- + // | score | 100 | 100 | 100 | 100 | 100 | + // |------------------------------------------------|---------|---------| + // |AvailableReplicas | 60 | 70 | 10 | 10 | 5 | + // |------------------------------------------------|---------|---------| + + // There is the Group 2, it has four clusters as follows. + // ------------------------------------------------------------ + // | clusterName | member1 | member2 | member3 | member4 | + // |----------------------------------------------------------- + // | score | 0 | 0 | 0 | 0 | + // |------------------------------------------------|---------| + // |AvailableReplicas | 100 | 100 | 10 | 10 | + // |------------------------------------------------|---------| + + // According to our expectations, Group 1 is a more ideal choice than Group 2. + // Although the number of clusters meeting the Replica requirements for available + // copies is the same in both Group 1 and Group 2, the average Cluster.Score in Group 1 is higher. + // Therefore, Group 1 is the better choice. Based on our algorithm, + // the score for Group 1 is also higher than that of Group 2. + + // Group1's Score = 2 * 1000 + 100 = 2100 + // Group2's Score = 2 * 1000 + 0 = 2000 + + // the priority of validClusters is higher than sumValidScore. + weightedValidClusters := validClusters * weightUnit + return weightedValidClusters + sumValidScore/validClusters +} + +func (info *GroupClustersInfo) calcGroupScore( + clusters []ClusterDetailInfo, + rbSpec *workv1alpha2.ResourceBindingSpec, + minGroups int) int64 { + if rbSpec.Placement == nil || rbSpec.Placement.ReplicaSchedulingType() == policyv1alpha1.ReplicaSchedulingTypeDuplicated { + // if the replica scheduling type is duplicated, the score is calculated by calcGroupScoreForDuplicate. + return info.calcGroupScoreForDuplicate(clusters, rbSpec) + } + + // if the replica scheduling type is divided, the score is calculated by followed. + float64MinGroups := float64(minGroups) + targetReplica := int64(math.Ceil(float64(rbSpec.Replicas) / float64MinGroups)) + + // get the minGroups of Cluster. + var clusterMinGroups int + if rbSpec.Placement.SpreadConstraints != nil { + for _, sc := range rbSpec.Placement.SpreadConstraints { + if sc.SpreadByField == policyv1alpha1.SpreadByFieldCluster { + clusterMinGroups = sc.MinGroups + } + } + } + + // if the minGroups of Cluster is less than the minGroups of Group, set the minGroups of Cluster to the minGroups of Group. + if clusterMinGroups < minGroups { + clusterMinGroups = minGroups + } + int64ClusterMinGroups := int64(clusterMinGroups) + + // clusters have been sorted by cluster.Score descending, + // and if the cluster.Score is the same, the cluster.availableReplica is ascending. + var sumAvailableReplica int64 + var sumScore int64 + var validClusters int64 + for _, cluster := range clusters { + sumAvailableReplica += cluster.AvailableReplicas + sumScore += cluster.Score + validClusters++ + if validClusters >= int64ClusterMinGroups && sumAvailableReplica >= targetReplica { + break + } + } + + // cluster.Score is 0 or 100. To minimize the impact of Score, + // set the atomic value of targetReplica to 1000. This way, + // when sorting by Group Score, targetReplica will be considered first, + // and if the Weights are the same, then Score will be considered. + + // Here is an example, the rbSpec.Replicas == 100 and the Group.minGroups == 2, Cluster.minGroups == 1. + // Thus, the targetReplica is 50, and the int64ClusterMinGroups == 2, because int64ClusterMinGroups == max(Group.minGroups, Cluster.minGroups). + + // There is the Group 1, it has three clusters as follows. + // ---------------------------------------------------------------------- + // | clusterName | member1 | member2 | member3 | member4 | member5 | + // |--------------------------------------------------------------------- + // | score | 100 | 100 | 100 | 100 | 100 | + // |------------------------------------------------|---------|---------| + // |AvailableReplicas | 10 | 10 | 10 | 10 | 5 | + // |------------------------------------------------|---------|---------| + + // There is the Group 2, it has four clusters as follows. + // ------------------------------------------------------------ + // | clusterName | member1 | member2 | member3 | member4 | + // |----------------------------------------------------------- + // | score | 0 | 0 | 0 | 0 | + // |------------------------------------------------|---------| + // |AvailableReplicas | 40 | 30 | 10 | 10 | + // |------------------------------------------------|---------| + + // According to our expectations, Group 2 is a more ideal choice + // than Group 1 because Group 2 has more available replica capacity, + // which meets the needs of replica allocation, even though Group 1 has a higher Cluster balance. + // Based on our algorithm, Group 2ā€™s Score is also higher than that of Group 1. + + // Group1's Score = 45 * 1000 + 100 = 45100 + // Group2's Score = 50 * 1000 + 0 = 50000 + + // There is another example, the targetReplica is 50, and the int64ClusterMinGroups == 2. + // The difference now is the situation of the Groups; both Groups now meet the requirements for available replica capacity. + + // There is the Group 1, it has three clusters as follows. + // ---------------------------------------------------------------------- + // | clusterName | member1 | member2 | member3 | member4 | member5 | + // |--------------------------------------------------------------------- + // | score | 100 | 100 | 100 | 100 | 100 | + // |------------------------------------------------|---------|---------| + // |AvailableReplicas | 40 | 40 | 10 | 10 | 5 | + // |------------------------------------------------|---------|---------| + + // There is the Group 2, it has four clusters as follows. + // ------------------------------------------------------------ + // | clusterName | member1 | member2 | member3 | member4 | + // |----------------------------------------------------------- + // | score | 0 | 0 | 0 | 0 | + // |------------------------------------------------|---------| + // |AvailableReplicas | 100 | 100 | 10 | 10 | + // |------------------------------------------------|---------| + + // According to our expectations, Group 1 is a more ideal choice than Group 2, + // as both Group 2 and Group 1 can now meet the replica allocation requirements. + // However, Group 1 has a higher Cluster balance (even though Group 2 has more available replicas). + // Based on our algorithm, the Score for Group 1 is also higher than that of Group 2. + + // Group1's Score = 50 * 1000 + 100 = 50100 + // Group2's Score = 50 * 1000 + 0 = 50000 + + if sumAvailableReplica < targetReplica { + sumAvailableReplica = sumAvailableReplica * weightUnit + return sumAvailableReplica + sumScore/int64(len(clusters)) + } + + targetReplica = targetReplica * weightUnit + return targetReplica + sumScore/validClusters +} + func (info *GroupClustersInfo) generateClustersInfo(clustersScore framework.ClusterScoreList, rbSpec *workv1alpha2.ResourceBindingSpec) { var clusters []*clusterv1alpha1.Cluster for _, clusterScore := range clustersScore { @@ -153,7 +354,7 @@ func (info *GroupClustersInfo) generateClustersInfo(clustersScore framework.Clus }) } -func (info *GroupClustersInfo) generateZoneInfo(spreadConstraints []policyv1alpha1.SpreadConstraint) { +func (info *GroupClustersInfo) generateZoneInfo(spreadConstraints []policyv1alpha1.SpreadConstraint, rbSpec *workv1alpha2.ResourceBindingSpec) { if !IsSpreadConstraintExisted(spreadConstraints, policyv1alpha1.SpreadByFieldZone) { return } @@ -178,13 +379,20 @@ func (info *GroupClustersInfo) generateZoneInfo(spreadConstraints []policyv1alph } } + var minGroups int + for _, sc := range spreadConstraints { + if sc.SpreadByField == policyv1alpha1.SpreadByFieldZone { + minGroups = sc.MinGroups + } + } + for zone, zoneInfo := range info.Zones { - zoneInfo.Score = zoneInfo.Clusters[0].Score + zoneInfo.Score = info.calcGroupScore(zoneInfo.Clusters, rbSpec, minGroups) info.Zones[zone] = zoneInfo } } -func (info *GroupClustersInfo) generateRegionInfo(spreadConstraints []policyv1alpha1.SpreadConstraint) { +func (info *GroupClustersInfo) generateRegionInfo(spreadConstraints []policyv1alpha1.SpreadConstraint, rbSpec *workv1alpha2.ResourceBindingSpec) { if !IsSpreadConstraintExisted(spreadConstraints, policyv1alpha1.SpreadByFieldRegion) { return } @@ -212,13 +420,20 @@ func (info *GroupClustersInfo) generateRegionInfo(spreadConstraints []policyv1al info.Regions[region] = regionInfo } + var minGroups int + for _, sc := range spreadConstraints { + if sc.SpreadByField == policyv1alpha1.SpreadByFieldRegion { + minGroups = sc.MinGroups + } + } + for region, regionInfo := range info.Regions { - regionInfo.Score = regionInfo.Clusters[0].Score + regionInfo.Score = info.calcGroupScore(regionInfo.Clusters, rbSpec, minGroups) info.Regions[region] = regionInfo } } -func (info *GroupClustersInfo) generateProviderInfo(spreadConstraints []policyv1alpha1.SpreadConstraint) { +func (info *GroupClustersInfo) generateProviderInfo(spreadConstraints []policyv1alpha1.SpreadConstraint, rbSpec *workv1alpha2.ResourceBindingSpec) { if !IsSpreadConstraintExisted(spreadConstraints, policyv1alpha1.SpreadByFieldProvider) { return } @@ -252,8 +467,15 @@ func (info *GroupClustersInfo) generateProviderInfo(spreadConstraints []policyv1 info.Providers[provider] = providerInfo } + var minGroups int + for _, sc := range spreadConstraints { + if sc.SpreadByField == policyv1alpha1.SpreadByFieldProvider { + minGroups = sc.MinGroups + } + } + for provider, providerInfo := range info.Providers { - providerInfo.Score = providerInfo.Clusters[0].Score + providerInfo.Score = info.calcGroupScore(providerInfo.Clusters, rbSpec, minGroups) info.Providers[provider] = providerInfo } } diff --git a/pkg/scheduler/core/spreadconstraint/group_clusters_test.go b/pkg/scheduler/core/spreadconstraint/group_clusters_test.go index fb1c8a6b4878..12773f0e5796 100644 --- a/pkg/scheduler/core/spreadconstraint/group_clusters_test.go +++ b/pkg/scheduler/core/spreadconstraint/group_clusters_test.go @@ -17,6 +17,7 @@ limitations under the License. package spreadconstraint import ( + "fmt" "testing" clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1" @@ -219,3 +220,161 @@ func Test_GroupClustersWithScore(t *testing.T) { }) } } + +var duplicated = "duplicated" +var aggregated = "aggregated" +var dynamicWeight = "dynamicWeight" +var staticWeight = "staticWeight" + +type RbSpecMap map[string]*workv1alpha2.ResourceBindingSpec + +func generateRbSpec(replica int32) RbSpecMap { + rbspecDuplicated := &workv1alpha2.ResourceBindingSpec{ + Replicas: replica, + Placement: &policyv1alpha1.Placement{ + ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{ + ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDuplicated, + }, + }, + } + rbspecAggregated := &workv1alpha2.ResourceBindingSpec{ + Replicas: replica, + Placement: &policyv1alpha1.Placement{ + ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{ + ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDivided, + ReplicaDivisionPreference: policyv1alpha1.ReplicaDivisionPreferenceAggregated, + }, + }, + } + rbspecDynamicWeight := &workv1alpha2.ResourceBindingSpec{ + Replicas: replica, + Placement: &policyv1alpha1.Placement{ + ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{ + ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDivided, + ReplicaDivisionPreference: policyv1alpha1.ReplicaDivisionPreferenceWeighted, + WeightPreference: &policyv1alpha1.ClusterPreferences{ + DynamicWeight: policyv1alpha1.DynamicWeightByAvailableReplicas, + }, + }, + }, + } + rbspecStaticWeight := &workv1alpha2.ResourceBindingSpec{ + Replicas: replica, + Placement: &policyv1alpha1.Placement{ + ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{ + ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDivided, + ReplicaDivisionPreference: policyv1alpha1.ReplicaDivisionPreferenceWeighted, + WeightPreference: &policyv1alpha1.ClusterPreferences{ + StaticWeightList: []policyv1alpha1.StaticClusterWeight{}, + }, + }, + }, + } + return RbSpecMap{ + "duplicated": rbspecDuplicated, + "aggregated": rbspecAggregated, + "dynamicWeight": rbspecDynamicWeight, + "staticWeight": rbspecStaticWeight, + } +} + +func generateClusterScores(n int, scores []int64, replicas []int64) []ClusterDetailInfo { + info := make([]ClusterDetailInfo, n) + for i := 0; i < n; i++ { + info[i] = ClusterDetailInfo{ + Name: fmt.Sprintf("member%d", i+1), + Score: scores[i], + AvailableReplicas: replicas[i], + } + } + return info +} + +type GroupScoreArgs struct { + id int + clusters1 []ClusterDetailInfo + clusters2 []ClusterDetailInfo + rbSpec *workv1alpha2.ResourceBindingSpec + minGroups int + group1Wins bool + description string +} + +func generateArgs() []GroupScoreArgs { + argsList := []GroupScoreArgs{ + { + id: 1, + clusters1: generateClusterScores(5, []int64{100, 100, 0, 0, 0}, []int64{30, 30, 30, 30, 30}), + clusters2: generateClusterScores(5, []int64{100, 100, 100, 100, 100}, []int64{30, 30, 30, 10, 10}), + rbSpec: generateRbSpec(20)[duplicated], + group1Wins: true, + description: "clusters1 is better than clusters2, because Because clusters1 meets the replica requirements for a larger number of clusters.", + }, + { + id: 2, + clusters1: generateClusterScores(5, []int64{100, 100, 0, 0, 0}, []int64{30, 30, 30, 10, 10}), + clusters2: generateClusterScores(5, []int64{100, 100, 100, 100, 100}, []int64{30, 30, 30, 10, 10}), + rbSpec: generateRbSpec(20)[duplicated], + group1Wins: false, + description: "clusters2 is better than clusters1, because clusters1 and clusters2 meet the replica requirements for the same number of clusters, but clusters2 has a higher score.", + }, + { + id: 3, + clusters1: generateClusterScores(5, []int64{100, 100, 0, 0, 0}, []int64{30, 30, 30, 10, 10}), + clusters2: generateClusterScores(5, []int64{100, 100, 100, 100, 100}, []int64{10, 10, 10, 5, 5}), + rbSpec: generateRbSpec(100)[aggregated], + minGroups: 2, + group1Wins: true, + description: "clusters1 is better than clusters2, because clusters1 meets the replica requirements, but clusters2 does not meet.", + }, + { + id: 4, + clusters1: generateClusterScores(5, []int64{100, 100, 0, 0, 0}, []int64{10, 10, 10, 10, 5}), + clusters2: generateClusterScores(5, []int64{100, 100, 100, 100, 100}, []int64{10, 10, 10, 5, 5}), + rbSpec: generateRbSpec(100)[dynamicWeight], + minGroups: 2, + group1Wins: true, + description: "clusters1 is better than clusters2, because clusters1's available replica is larger than clusters2.", + }, + { + id: 5, + clusters1: generateClusterScores(5, []int64{100, 100, 0, 0, 0}, []int64{10, 10, 10, 5, 5}), + clusters2: generateClusterScores(5, []int64{100, 100, 100, 100, 100}, []int64{10, 10, 10, 5, 5}), + rbSpec: generateRbSpec(100)[staticWeight], + minGroups: 2, + group1Wins: false, + description: "clusters2 is better than clusters1, because clusters2's score is higher than clusters1.", + }, + { + id: 6, + clusters1: generateClusterScores(5, []int64{0, 0, 0, 0, 0}, []int64{100, 100, 100, 100, 100}), + clusters2: generateClusterScores(5, []int64{100, 100, 100, 100, 100}, []int64{50, 50, 50, 50, 50}), + rbSpec: generateRbSpec(100)[aggregated], + minGroups: 2, + group1Wins: false, + description: "clusters2 is better than clusters1, because clusters2's score is higher than clusters1, although clusters2's available replica is less than clusters1.", + }, + } + + return argsList +} + +func Test_CalcGroupScore(t *testing.T) { + tests := generateArgs() + groupClustersInfo := &GroupClustersInfo{ + Providers: make(map[string]ProviderInfo), + Regions: make(map[string]RegionInfo), + Zones: make(map[string]ZoneInfo), + } + + for _, tt := range tests { + t.Run("", func(t *testing.T) { + score1 := groupClustersInfo.calcGroupScore(tt.clusters1, tt.rbSpec, tt.minGroups) + score2 := groupClustersInfo.calcGroupScore(tt.clusters2, tt.rbSpec, tt.minGroups) + t.Logf("test ID: %v, score1 = %v, score2 = %v, score1 >= score 2 res: %v, the description => %v", tt.id, score1, score2, score1 > score2, tt.description) + if tt.group1Wins != (score1 >= score2) { + t.Errorf("test ID: %v, score1 = %v, score2 = %v, score1 >= score 2 want %v, but res is %v", tt.id, score1, score2, tt.group1Wins, score1 > score2) + } + }) + } +} diff --git a/pkg/scheduler/event_handler_test.go b/pkg/scheduler/event_handler_test.go new file mode 100644 index 000000000000..fbf7088aeb7c --- /dev/null +++ b/pkg/scheduler/event_handler_test.go @@ -0,0 +1,453 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheduler + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/cache" + + clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1" + policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" + workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2" +) + +func TestResourceBindingEventFilter(t *testing.T) { + testCases := []struct { + name string + schedulerName string + obj interface{} + expectedResult bool + }{ + { + name: "ResourceBinding: Matching scheduler name, no labels", + schedulerName: "test-scheduler", + obj: createResourceBinding("test-rb", "test-scheduler", nil), + expectedResult: false, + }, + { + name: "ResourceBinding: Non-matching scheduler name", + schedulerName: "test-scheduler", + obj: createResourceBinding("test-rb", "other-scheduler", nil), + expectedResult: false, + }, + { + name: "ResourceBinding: Matching scheduler name, with PropagationPolicyPermanentIDLabel", + schedulerName: "test-scheduler", + obj: createResourceBinding("test-rb", "test-scheduler", map[string]string{ + policyv1alpha1.PropagationPolicyPermanentIDLabel: "test-id", + }), + expectedResult: true, + }, + { + name: "ResourceBinding: Matching scheduler name, with ClusterPropagationPolicyPermanentIDLabel", + schedulerName: "test-scheduler", + obj: createResourceBinding("test-rb", "test-scheduler", map[string]string{ + policyv1alpha1.ClusterPropagationPolicyPermanentIDLabel: "test-id", + }), + expectedResult: true, + }, + { + name: "ResourceBinding: Matching scheduler name, with BindingManagedByLabel", + schedulerName: "test-scheduler", + obj: createResourceBinding("test-rb", "test-scheduler", map[string]string{ + workv1alpha2.BindingManagedByLabel: "test-manager", + }), + expectedResult: true, + }, + { + name: "ResourceBinding: Matching scheduler name, with empty PropagationPolicyPermanentIDLabel", + schedulerName: "test-scheduler", + obj: createResourceBinding("test-rb", "test-scheduler", map[string]string{ + policyv1alpha1.PropagationPolicyPermanentIDLabel: "", + }), + expectedResult: false, + }, + { + name: "ClusterResourceBinding: Matching scheduler name, no labels", + schedulerName: "test-scheduler", + obj: createClusterResourceBinding("test-crb", "test-scheduler", nil), + expectedResult: false, + }, + { + name: "ClusterResourceBinding: Non-matching scheduler name", + schedulerName: "test-scheduler", + obj: createClusterResourceBinding("test-crb", "other-scheduler", nil), + expectedResult: false, + }, + { + name: "ClusterResourceBinding: Matching scheduler name, with ClusterPropagationPolicyPermanentIDLabel", + schedulerName: "test-scheduler", + obj: createClusterResourceBinding("test-crb", "test-scheduler", map[string]string{ + policyv1alpha1.ClusterPropagationPolicyPermanentIDLabel: "test-id", + }), + expectedResult: true, + }, + { + name: "Nil object", + schedulerName: "test-scheduler", + obj: nil, + expectedResult: false, + }, + { + name: "Invalid object type", + schedulerName: "test-scheduler", + obj: "not-a-valid-object", + expectedResult: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + s := &Scheduler{ + schedulerName: tc.schedulerName, + } + result := s.resourceBindingEventFilter(tc.obj) + assert.Equal(t, tc.expectedResult, result, "Test case: %s", tc.name) + }) + } +} + +func TestAddCluster(t *testing.T) { + tests := []struct { + name string + enableSchedulerEstimator bool + obj interface{} + expectedAdded bool + expectedClusterName string + }{ + { + name: "valid cluster object with estimator enabled", + enableSchedulerEstimator: true, + obj: createCluster("test-cluster", 0, nil), + expectedAdded: true, + expectedClusterName: "test-cluster", + }, + { + name: "valid cluster object with estimator disabled", + enableSchedulerEstimator: false, + obj: createCluster("test-cluster-2", 0, nil), + expectedAdded: false, + expectedClusterName: "", + }, + { + name: "invalid object type", + enableSchedulerEstimator: true, + obj: &corev1.Pod{}, + expectedAdded: false, + expectedClusterName: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + mockWorker := &mockAsyncWorker{} + s := &Scheduler{ + enableSchedulerEstimator: tt.enableSchedulerEstimator, + schedulerEstimatorWorker: mockWorker, + } + + s.addCluster(tt.obj) + + if tt.expectedAdded { + assert.Equal(t, 1, mockWorker.addCount, "Worker Add should have been called once") + assert.Equal(t, tt.expectedClusterName, mockWorker.lastAdded, "Incorrect cluster name added") + } else { + assert.Equal(t, 0, mockWorker.addCount, "Worker Add should not have been called") + assert.Nil(t, mockWorker.lastAdded, "No cluster name should have been added") + } + + assert.Equal(t, 0, mockWorker.enqueueCount, "Worker Enqueue should not have been called") + assert.Nil(t, mockWorker.lastEnqueued, "No item should have been enqueued") + }) + } +} + +func TestUpdateCluster(t *testing.T) { + tests := []struct { + name string + enableSchedulerEstimator bool + oldObj interface{} + newObj interface{} + expectedEstimatorAdded bool + expectedReconcileAdded int + }{ + { + name: "valid cluster update with generation change", + enableSchedulerEstimator: true, + oldObj: createCluster("test-cluster", 1, nil), + newObj: createCluster("test-cluster", 2, nil), + expectedEstimatorAdded: true, + expectedReconcileAdded: 2, + }, + { + name: "valid cluster update with label change", + enableSchedulerEstimator: true, + oldObj: createCluster("test-cluster", 0, map[string]string{"old": "label"}), + newObj: createCluster("test-cluster", 0, map[string]string{"new": "label"}), + expectedEstimatorAdded: true, + expectedReconcileAdded: 2, + }, + { + name: "valid cluster update without changes", + enableSchedulerEstimator: true, + oldObj: createCluster("test-cluster", 0, nil), + newObj: createCluster("test-cluster", 0, nil), + expectedEstimatorAdded: true, + expectedReconcileAdded: 0, + }, + { + name: "invalid old object type", + enableSchedulerEstimator: true, + oldObj: &corev1.Pod{}, + newObj: createCluster("test-cluster", 0, nil), + expectedEstimatorAdded: false, + expectedReconcileAdded: 0, + }, + { + name: "invalid new object type", + enableSchedulerEstimator: true, + oldObj: createCluster("test-cluster", 0, nil), + newObj: &corev1.Pod{}, + expectedEstimatorAdded: false, + expectedReconcileAdded: 0, + }, + { + name: "both objects invalid", + enableSchedulerEstimator: true, + oldObj: &corev1.Pod{}, + newObj: &corev1.Pod{}, + expectedEstimatorAdded: false, + expectedReconcileAdded: 0, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + estimatorWorker := &mockAsyncWorker{} + reconcileWorker := &mockAsyncWorker{} + s := &Scheduler{ + enableSchedulerEstimator: tt.enableSchedulerEstimator, + schedulerEstimatorWorker: estimatorWorker, + clusterReconcileWorker: reconcileWorker, + } + + s.updateCluster(tt.oldObj, tt.newObj) + + // Check schedulerEstimatorWorker + if tt.expectedEstimatorAdded { + assert.Equal(t, 1, estimatorWorker.addCount, "Estimator worker Add should have been called once") + if cluster, ok := tt.newObj.(*clusterv1alpha1.Cluster); ok { + assert.Equal(t, cluster.Name, estimatorWorker.lastAdded, "Incorrect cluster name added to estimator worker") + } else { + t.Errorf("Expected newObj to be a Cluster, but it wasn't") + } + } else { + assert.Equal(t, 0, estimatorWorker.addCount, "Estimator worker Add should not have been called") + assert.Nil(t, estimatorWorker.lastAdded, "No cluster should have been added to estimator worker") + } + + // Check clusterReconcileWorker + assert.Equal(t, tt.expectedReconcileAdded, reconcileWorker.addCount, "Reconcile worker Add called unexpected number of times") + + if tt.expectedReconcileAdded > 0 { + lastAdded, ok := reconcileWorker.lastAdded.(*clusterv1alpha1.Cluster) + assert.True(t, ok, "Last added item is not a Cluster object") + if ok { + newCluster, newOk := tt.newObj.(*clusterv1alpha1.Cluster) + assert.True(t, newOk, "newObj is not a Cluster object") + if newOk { + assert.Equal(t, newCluster.Name, lastAdded.Name, "Incorrect cluster added to reconcile worker") + } + } + } else { + assert.Nil(t, reconcileWorker.lastAdded, "No cluster should have been added to reconcile worker") + } + }) + } +} + +func TestDeleteCluster(t *testing.T) { + tests := []struct { + name string + enableSchedulerEstimator bool + obj interface{} + expectedAdded bool + expectedClusterName string + }{ + { + name: "valid cluster object with estimator enabled", + enableSchedulerEstimator: true, + obj: createCluster("test-cluster", 0, nil), + expectedAdded: true, + expectedClusterName: "test-cluster", + }, + { + name: "valid cluster object with estimator disabled", + enableSchedulerEstimator: false, + obj: createCluster("test-cluster", 0, nil), + expectedAdded: false, + expectedClusterName: "", + }, + { + name: "deleted final state unknown with valid cluster", + enableSchedulerEstimator: true, + obj: cache.DeletedFinalStateUnknown{ + Key: "test-cluster", + Obj: createCluster("test-cluster", 0, nil), + }, + expectedAdded: true, + expectedClusterName: "test-cluster", + }, + { + name: "deleted final state unknown with invalid object", + enableSchedulerEstimator: true, + obj: cache.DeletedFinalStateUnknown{ + Key: "test-pod", + Obj: &corev1.Pod{}, + }, + expectedAdded: false, + expectedClusterName: "", + }, + { + name: "invalid object type", + enableSchedulerEstimator: true, + obj: &corev1.Pod{}, + expectedAdded: false, + expectedClusterName: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + worker := &mockAsyncWorker{} + s := &Scheduler{ + enableSchedulerEstimator: tt.enableSchedulerEstimator, + schedulerEstimatorWorker: worker, + } + + s.deleteCluster(tt.obj) + + if tt.expectedAdded { + assert.Equal(t, 1, worker.addCount, "Worker Add should have been called once") + assert.Equal(t, tt.expectedClusterName, worker.lastAdded, "Incorrect cluster name added to worker") + } else { + assert.Equal(t, 0, worker.addCount, "Worker Add should not have been called") + assert.Nil(t, worker.lastAdded, "No cluster name should have been added") + } + }) + } +} + +func TestSchedulerNameFilter(t *testing.T) { + tests := []struct { + name string + schedulerNameFromOptions string + schedulerName string + expected bool + }{ + { + name: "matching scheduler names", + schedulerNameFromOptions: "test-scheduler", + schedulerName: "test-scheduler", + expected: true, + }, + { + name: "non-matching scheduler names", + schedulerNameFromOptions: "test-scheduler", + schedulerName: "other-scheduler", + expected: false, + }, + { + name: "empty scheduler name defaults to DefaultScheduler", + schedulerNameFromOptions: DefaultScheduler, + schedulerName: "", + expected: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := schedulerNameFilter(tt.schedulerNameFromOptions, tt.schedulerName) + assert.Equal(t, tt.expected, result) + }) + } +} + +// Helper functions + +func createCluster(name string, generation int64, labels map[string]string) *clusterv1alpha1.Cluster { + return &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Generation: generation, + Labels: labels, + }, + } +} + +func createResourceBinding(name, schedulerName string, labels map[string]string) *workv1alpha2.ResourceBinding { + return &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Labels: labels, + }, + Spec: workv1alpha2.ResourceBindingSpec{ + SchedulerName: schedulerName, + }, + } +} + +func createClusterResourceBinding(name, schedulerName string, labels map[string]string) *workv1alpha2.ClusterResourceBinding { + return &workv1alpha2.ClusterResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Labels: labels, + }, + Spec: workv1alpha2.ResourceBindingSpec{ + SchedulerName: schedulerName, + }, + } +} + +// Mock Implementations + +// mockAsyncWorker is a mock implementation of util.AsyncWorker +type mockAsyncWorker struct { + addCount int + enqueueCount int + lastAdded interface{} + lastEnqueued interface{} +} + +func (m *mockAsyncWorker) Add(item interface{}) { + m.addCount++ + m.lastAdded = item +} + +func (m *mockAsyncWorker) Enqueue(item interface{}) { + m.enqueueCount++ + m.lastEnqueued = item +} + +func (m *mockAsyncWorker) AddAfter(_ interface{}, _ time.Duration) {} + +func (m *mockAsyncWorker) Run(_ int, _ <-chan struct{}) {} diff --git a/pkg/scheduler/framework/interface.go b/pkg/scheduler/framework/interface.go index 7f660589e7da..674a558676a2 100644 --- a/pkg/scheduler/framework/interface.go +++ b/pkg/scheduler/framework/interface.go @@ -83,7 +83,7 @@ const ( // NOTE: A nil status is also considered as "Success". Success Code = iota // Unschedulable is used when a plugin finds the resource unschedulable. - // The accompanying status message should explain why the it is unschedulable. + // The accompanying status message should explain why it is unschedulable. Unschedulable // Error is used for internal plugin errors, unexpected input, etc. Error diff --git a/pkg/scheduler/framework/interface_test.go b/pkg/scheduler/framework/interface_test.go new file mode 100644 index 000000000000..6c4837568c0e --- /dev/null +++ b/pkg/scheduler/framework/interface_test.go @@ -0,0 +1,260 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework + +import ( + "errors" + "sort" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestPluginToResult_Merge(t *testing.T) { + tests := []struct { + name string + results PluginToResult + want *Result + }{ + { + name: "empty results", + results: PluginToResult{}, + want: nil, + }, + { + name: "all success results", + results: PluginToResult{ + "plugin1": NewResult(Success), + "plugin2": NewResult(Success), + }, + want: NewResult(Success), + }, + { + name: "mixed results with unschedulable", + results: PluginToResult{ + "plugin1": NewResult(Success), + "plugin2": NewResult(Unschedulable, "reason1"), + "plugin3": NewResult(Success), + }, + want: NewResult(Unschedulable, "reason1"), + }, + { + name: "mixed results with error", + results: PluginToResult{ + "plugin1": NewResult(Success), + "plugin2": NewResult(Unschedulable, "reason1"), + "plugin3": NewResult(Error, "error occurred"), + }, + want: NewResult(Error, "reason1", "error occurred"), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := tt.results.Merge() + if tt.want == nil { + assert.Nil(t, got) + } else { + assert.NotNil(t, got) + assert.Equal(t, tt.want.code, got.code) + + // Sort the reasons before comparing + sortedWantReasons := make([]string, len(tt.want.reasons)) + copy(sortedWantReasons, tt.want.reasons) + sort.Strings(sortedWantReasons) + + sortedGotReasons := make([]string, len(got.reasons)) + copy(sortedGotReasons, got.reasons) + sort.Strings(sortedGotReasons) + + assert.Equal(t, sortedWantReasons, sortedGotReasons) + + if tt.want.err != nil { + assert.Error(t, got.err) + } else { + assert.NoError(t, got.err) + } + } + }) + } +} + +func TestResult_IsSuccess(t *testing.T) { + tests := []struct { + name string + result *Result + want bool + }{ + { + name: "nil result", + result: nil, + want: true, + }, + { + name: "success result", + result: NewResult(Success), + want: true, + }, + { + name: "unschedulable result", + result: NewResult(Unschedulable), + want: false, + }, + { + name: "error result", + result: NewResult(Error), + want: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equal(t, tt.want, tt.result.IsSuccess()) + }) + } +} + +func TestResult_AsError(t *testing.T) { + tests := []struct { + name string + result *Result + wantErr bool + errorMsg string + }{ + { + name: "success result", + result: NewResult(Success), + wantErr: false, + }, + { + name: "unschedulable result", + result: NewResult(Unschedulable, "reason1", "reason2"), + wantErr: true, + errorMsg: "reason1, reason2", + }, + { + name: "error result", + result: NewResult(Error, "error occurred"), + wantErr: true, + errorMsg: "error occurred", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.result.AsError() + if tt.wantErr { + assert.Error(t, err) + assert.Equal(t, tt.errorMsg, err.Error()) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestResult_AsResult(t *testing.T) { + tests := []struct { + name string + err error + wantCode Code + wantReasons []string + }{ + { + name: "non-nil error", + err: errors.New("test error"), + wantCode: Error, + wantReasons: []string{"test error"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := AsResult(tt.err) + assert.Equal(t, tt.wantCode, got.code) + assert.Equal(t, tt.wantReasons, got.reasons) + assert.Equal(t, tt.err, got.err) + }) + } +} + +func TestResult_Code(t *testing.T) { + tests := []struct { + name string + result *Result + want Code + }{ + { + name: "nil result", + result: nil, + want: Success, + }, + { + name: "success result", + result: NewResult(Success), + want: Success, + }, + { + name: "unschedulable result", + result: NewResult(Unschedulable), + want: Unschedulable, + }, + { + name: "error result", + result: NewResult(Error), + want: Error, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := tt.result.Code() + assert.Equal(t, tt.want, got) + }) + } +} + +func TestCode_String(t *testing.T) { + tests := []struct { + name string + code Code + want string + }{ + { + name: "Success code", + code: Success, + want: "Success", + }, + { + name: "Unschedulable code", + code: Unschedulable, + want: "Unschedulable", + }, + { + name: "Error code", + code: Error, + want: "Error", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := tt.code.String() + assert.Equal(t, tt.want, got) + }) + } +} diff --git a/pkg/scheduler/framework/plugins/apienablement/api_enablement.go b/pkg/scheduler/framework/plugins/apienablement/api_enablement.go index 15d32ab35951..791180f639e4 100644 --- a/pkg/scheduler/framework/plugins/apienablement/api_enablement.go +++ b/pkg/scheduler/framework/plugins/apienablement/api_enablement.go @@ -19,6 +19,7 @@ package apienablement import ( "context" + "k8s.io/apimachinery/pkg/api/meta" "k8s.io/klog/v2" clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1" @@ -54,10 +55,19 @@ func (p *APIEnablement) Filter( _ *workv1alpha2.ResourceBindingStatus, cluster *clusterv1alpha1.Cluster, ) *framework.Result { - if !helper.IsAPIEnabled(cluster.Status.APIEnablements, bindingSpec.Resource.APIVersion, bindingSpec.Resource.Kind) { - klog.V(2).Infof("Cluster(%s) not fit as missing API(%s, kind=%s)", cluster.Name, bindingSpec.Resource.APIVersion, bindingSpec.Resource.Kind) - return framework.NewResult(framework.Unschedulable, "cluster(s) did not have the API resource") + if helper.IsAPIEnabled(cluster.Status.APIEnablements, bindingSpec.Resource.APIVersion, bindingSpec.Resource.Kind) { + return framework.NewResult(framework.Success) } - return framework.NewResult(framework.Success) + // Let the cluster pass if it is already on the list of schedule result and the cluster's + // API enablements is incomplete, to avoid the issue that cluster be accidentally removed + // due to untrusted API enablements. + if bindingSpec.TargetContains(cluster.Name) && + !meta.IsStatusConditionTrue(cluster.Status.Conditions, clusterv1alpha1.ClusterConditionCompleteAPIEnablements) { + return framework.NewResult(framework.Success) + } + + klog.V(2).Infof("Cluster(%s) not fit as missing API(%s, kind=%s)", cluster.Name, bindingSpec.Resource.APIVersion, bindingSpec.Resource.Kind) + + return framework.NewResult(framework.Unschedulable, "cluster(s) did not have the API resource") } diff --git a/pkg/scheduler/framework/plugins/apienablement/api_enablement_test.go b/pkg/scheduler/framework/plugins/apienablement/api_enablement_test.go new file mode 100644 index 000000000000..cf68ae0c0bac --- /dev/null +++ b/pkg/scheduler/framework/plugins/apienablement/api_enablement_test.go @@ -0,0 +1,147 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apienablement + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1" + workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2" + "github.com/karmada-io/karmada/pkg/scheduler/framework" +) + +func TestAPIEnablement_Filter(t *testing.T) { + tests := []struct { + name string + bindingSpec *workv1alpha2.ResourceBindingSpec + cluster *clusterv1alpha1.Cluster + expectedCode framework.Code + expectError bool + }{ + { + name: "API is enabled in cluster", + bindingSpec: &workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + }, + }, + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + }, + Status: clusterv1alpha1.ClusterStatus{ + APIEnablements: []clusterv1alpha1.APIEnablement{ + { + GroupVersion: "apps/v1", + Resources: []clusterv1alpha1.APIResource{ + { + Kind: "Deployment", + }, + }, + }, + }, + }, + }, + expectedCode: framework.Success, + expectError: false, + }, + { + name: "API is not enabled in cluster", + bindingSpec: &workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "custom.io/v1", + Kind: "CustomResource", + }, + }, + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + }, + Status: clusterv1alpha1.ClusterStatus{ + APIEnablements: []clusterv1alpha1.APIEnablement{ + { + GroupVersion: "apps/v1", + Resources: []clusterv1alpha1.APIResource{ + { + Kind: "Deployment", + }, + }, + }, + }, + }, + }, + expectedCode: framework.Unschedulable, + expectError: true, + }, + { + name: "cluster in target list with incomplete API enablements", + bindingSpec: &workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "custom.io/v1", + Kind: "CustomResource", + }, + Clusters: []workv1alpha2.TargetCluster{ + { + Name: "cluster1", + }, + }, + }, + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + }, + Status: clusterv1alpha1.ClusterStatus{ + Conditions: []metav1.Condition{ + { + Type: clusterv1alpha1.ClusterConditionCompleteAPIEnablements, + Status: metav1.ConditionFalse, + }, + }, + }, + }, + expectedCode: framework.Success, + expectError: false, + }, + } + + p := &APIEnablement{} + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := p.Filter(context.Background(), tt.bindingSpec, nil, tt.cluster) + assert.Equal(t, tt.expectedCode, result.Code()) + assert.Equal(t, tt.expectError, result.AsError() != nil) + }) + } +} + +func TestNew(t *testing.T) { + plugin, err := New() + assert.NoError(t, err) + assert.NotNil(t, plugin) + _, ok := plugin.(*APIEnablement) + assert.True(t, ok) +} + +func TestAPIEnablement_Name(t *testing.T) { + p := &APIEnablement{} + assert.Equal(t, Name, p.Name()) +} diff --git a/pkg/scheduler/framework/plugins/clusteraffinity/cluster_affinity_test.go b/pkg/scheduler/framework/plugins/clusteraffinity/cluster_affinity_test.go new file mode 100644 index 000000000000..f18ff25e6e9d --- /dev/null +++ b/pkg/scheduler/framework/plugins/clusteraffinity/cluster_affinity_test.go @@ -0,0 +1,160 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clusteraffinity + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1" + policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" + workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2" + "github.com/karmada-io/karmada/pkg/scheduler/framework" +) + +func TestClusterAffinity_Filter(t *testing.T) { + tests := []struct { + name string + bindingSpec *workv1alpha2.ResourceBindingSpec + bindingStatus *workv1alpha2.ResourceBindingStatus + cluster *clusterv1alpha1.Cluster + expectedCode framework.Code + expectError bool + }{ + { + name: "matching affinity", + bindingSpec: &workv1alpha2.ResourceBindingSpec{ + Placement: &policyv1alpha1.Placement{ + ClusterAffinity: &policyv1alpha1.ClusterAffinity{ + ClusterNames: []string{"cluster1"}, + }, + }, + }, + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + }, + }, + expectedCode: framework.Success, + expectError: false, + }, + { + name: "non-matching affinity", + bindingSpec: &workv1alpha2.ResourceBindingSpec{ + Placement: &policyv1alpha1.Placement{ + ClusterAffinity: &policyv1alpha1.ClusterAffinity{ + ClusterNames: []string{"cluster2"}, + }, + }, + }, + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + }, + }, + expectedCode: framework.Unschedulable, + expectError: true, + }, + { + name: "matching affinity from ClusterAffinities", + bindingSpec: &workv1alpha2.ResourceBindingSpec{ + Placement: &policyv1alpha1.Placement{ + ClusterAffinities: []policyv1alpha1.ClusterAffinityTerm{ + { + AffinityName: "affinity1", + ClusterAffinity: policyv1alpha1.ClusterAffinity{ + ClusterNames: []string{"cluster1"}, + }, + }, + }, + }, + }, + bindingStatus: &workv1alpha2.ResourceBindingStatus{ + SchedulerObservedAffinityName: "affinity1", + }, + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + }, + }, + expectedCode: framework.Success, + expectError: false, + }, + { + name: "no affinity specified", + bindingSpec: &workv1alpha2.ResourceBindingSpec{ + Placement: &policyv1alpha1.Placement{}, + }, + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + }, + }, + expectedCode: framework.Success, + expectError: false, + }, + } + + p := &ClusterAffinity{} + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := p.Filter(context.Background(), tt.bindingSpec, tt.bindingStatus, tt.cluster) + assert.Equal(t, tt.expectedCode, result.Code()) + assert.Equal(t, tt.expectError, result.AsError() != nil) + }) + } +} + +func TestClusterAffinity_Score(t *testing.T) { + p := &ClusterAffinity{} + spec := &workv1alpha2.ResourceBindingSpec{} + cluster := &clusterv1alpha1.Cluster{} + + score, result := p.Score(context.Background(), spec, cluster) + + assert.Equal(t, framework.MinClusterScore, score) + assert.Equal(t, framework.Success, result.Code()) +} + +func TestClusterAffinity_ScoreExtensions(t *testing.T) { + p := &ClusterAffinity{} + assert.Equal(t, p, p.ScoreExtensions()) +} + +func TestClusterAffinity_NormalizeScore(t *testing.T) { + p := &ClusterAffinity{} + result := p.NormalizeScore(context.Background(), nil) + assert.Equal(t, framework.Success, result.Code()) +} + +func TestNew(t *testing.T) { + plugin, err := New() + + assert.NoError(t, err) + assert.NotNil(t, plugin) + _, ok := plugin.(*ClusterAffinity) + assert.True(t, ok) +} + +func TestClusterAffinity_Name(t *testing.T) { + p := &ClusterAffinity{} + assert.Equal(t, Name, p.Name()) +} diff --git a/pkg/scheduler/framework/plugins/clustereviction/cluster_eviction_test.go b/pkg/scheduler/framework/plugins/clustereviction/cluster_eviction_test.go new file mode 100644 index 000000000000..18111cb1927b --- /dev/null +++ b/pkg/scheduler/framework/plugins/clustereviction/cluster_eviction_test.go @@ -0,0 +1,107 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clustereviction + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1" + workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2" + "github.com/karmada-io/karmada/pkg/scheduler/framework" +) + +func TestClusterEviction_Filter(t *testing.T) { + tests := []struct { + name string + bindingSpec *workv1alpha2.ResourceBindingSpec + cluster *clusterv1alpha1.Cluster + expectedCode framework.Code + expectError bool + }{ + { + name: "cluster is in graceful eviction tasks", + bindingSpec: &workv1alpha2.ResourceBindingSpec{ + GracefulEvictionTasks: []workv1alpha2.GracefulEvictionTask{ + { + FromCluster: "cluster1", + }, + }, + }, + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + }, + }, + expectedCode: framework.Unschedulable, + expectError: true, + }, + { + name: "cluster is not in graceful eviction tasks", + bindingSpec: &workv1alpha2.ResourceBindingSpec{ + GracefulEvictionTasks: []workv1alpha2.GracefulEvictionTask{ + { + FromCluster: "cluster2", + }, + }, + }, + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + }, + }, + expectedCode: framework.Success, + expectError: false, + }, + { + name: "no graceful eviction tasks", + bindingSpec: &workv1alpha2.ResourceBindingSpec{}, + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + }, + }, + expectedCode: framework.Success, + expectError: false, + }, + } + + p := &ClusterEviction{} + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := p.Filter(context.Background(), tt.bindingSpec, nil, tt.cluster) + assert.Equal(t, tt.expectedCode, result.Code()) + assert.Equal(t, tt.expectError, result.AsError() != nil) + }) + } +} + +func TestNew(t *testing.T) { + plugin, err := New() + assert.NoError(t, err) + assert.NotNil(t, plugin) + _, ok := plugin.(*ClusterEviction) + assert.True(t, ok) +} + +func TestClusterEviction_Name(t *testing.T) { + p := &ClusterEviction{} + assert.Equal(t, Name, p.Name()) +} diff --git a/pkg/scheduler/framework/plugins/clusterlocality/cluster_locality_test.go b/pkg/scheduler/framework/plugins/clusterlocality/cluster_locality_test.go new file mode 100644 index 000000000000..6ed32ec73c9a --- /dev/null +++ b/pkg/scheduler/framework/plugins/clusterlocality/cluster_locality_test.go @@ -0,0 +1,109 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clusterlocality + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1" + workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2" + "github.com/karmada-io/karmada/pkg/scheduler/framework" +) + +func TestClusterLocality_Score(t *testing.T) { + tests := []struct { + name string + bindingSpec *workv1alpha2.ResourceBindingSpec + cluster *clusterv1alpha1.Cluster + expectedScore int64 + }{ + { + name: "no clusters in spec", + bindingSpec: &workv1alpha2.ResourceBindingSpec{ + Clusters: []workv1alpha2.TargetCluster{}, + }, + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + }, + }, + expectedScore: framework.MinClusterScore, + }, + { + name: "cluster in spec", + bindingSpec: &workv1alpha2.ResourceBindingSpec{ + Clusters: []workv1alpha2.TargetCluster{ + {Name: "cluster1"}, + {Name: "cluster2"}, + }, + }, + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + }, + }, + expectedScore: framework.MaxClusterScore, + }, + { + name: "cluster not in spec", + bindingSpec: &workv1alpha2.ResourceBindingSpec{ + Clusters: []workv1alpha2.TargetCluster{ + {Name: "cluster2"}, + {Name: "cluster3"}, + }, + }, + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + }, + }, + expectedScore: framework.MinClusterScore, + }, + } + + p := &ClusterLocality{} + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + score, result := p.Score(context.Background(), tt.bindingSpec, tt.cluster) + assert.Equal(t, tt.expectedScore, score) + assert.Equal(t, framework.Success, result.Code()) + }) + } +} + +func TestNew(t *testing.T) { + plugin, err := New() + assert.NoError(t, err) + assert.NotNil(t, plugin) + _, ok := plugin.(*ClusterLocality) + assert.True(t, ok) +} + +func TestClusterLocality_Name(t *testing.T) { + p := &ClusterLocality{} + assert.Equal(t, Name, p.Name()) +} + +func TestClusterLocality_ScoreExtensions(t *testing.T) { + p := &ClusterLocality{} + assert.Nil(t, p.ScoreExtensions()) +} diff --git a/pkg/scheduler/framework/plugins/spreadconstraint/spread_constraint_test.go b/pkg/scheduler/framework/plugins/spreadconstraint/spread_constraint_test.go new file mode 100644 index 000000000000..771171a17178 --- /dev/null +++ b/pkg/scheduler/framework/plugins/spreadconstraint/spread_constraint_test.go @@ -0,0 +1,160 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spreadconstraint + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + + clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1" + policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" + workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2" + "github.com/karmada-io/karmada/pkg/scheduler/framework" +) + +func TestSpreadConstraint_Filter(t *testing.T) { + tests := []struct { + name string + bindingSpec *workv1alpha2.ResourceBindingSpec + cluster *clusterv1alpha1.Cluster + expectedCode framework.Code + expectedReason string + }{ + { + name: "no spread constraints", + bindingSpec: &workv1alpha2.ResourceBindingSpec{ + Placement: &policyv1alpha1.Placement{}, + }, + cluster: &clusterv1alpha1.Cluster{}, + expectedCode: framework.Success, + }, + { + name: "spread by provider - provider present", + bindingSpec: &workv1alpha2.ResourceBindingSpec{ + Placement: &policyv1alpha1.Placement{ + SpreadConstraints: []policyv1alpha1.SpreadConstraint{ + {SpreadByField: policyv1alpha1.SpreadByFieldProvider}, + }, + }, + }, + cluster: &clusterv1alpha1.Cluster{ + Spec: clusterv1alpha1.ClusterSpec{ + Provider: "aws", + }, + }, + expectedCode: framework.Success, + }, + { + name: "spread by provider - provider missing", + bindingSpec: &workv1alpha2.ResourceBindingSpec{ + Placement: &policyv1alpha1.Placement{ + SpreadConstraints: []policyv1alpha1.SpreadConstraint{ + {SpreadByField: policyv1alpha1.SpreadByFieldProvider}, + }, + }, + }, + cluster: &clusterv1alpha1.Cluster{}, + expectedCode: framework.Unschedulable, + expectedReason: "cluster(s) did not have provider property", + }, + { + name: "spread by region - region present", + bindingSpec: &workv1alpha2.ResourceBindingSpec{ + Placement: &policyv1alpha1.Placement{ + SpreadConstraints: []policyv1alpha1.SpreadConstraint{ + {SpreadByField: policyv1alpha1.SpreadByFieldRegion}, + }, + }, + }, + cluster: &clusterv1alpha1.Cluster{ + Spec: clusterv1alpha1.ClusterSpec{ + Region: "us-west-2", + }, + }, + expectedCode: framework.Success, + }, + { + name: "spread by region - region missing", + bindingSpec: &workv1alpha2.ResourceBindingSpec{ + Placement: &policyv1alpha1.Placement{ + SpreadConstraints: []policyv1alpha1.SpreadConstraint{ + {SpreadByField: policyv1alpha1.SpreadByFieldRegion}, + }, + }, + }, + cluster: &clusterv1alpha1.Cluster{}, + expectedCode: framework.Unschedulable, + expectedReason: "cluster(s) did not have region property", + }, + { + name: "spread by zone - zones present", + bindingSpec: &workv1alpha2.ResourceBindingSpec{ + Placement: &policyv1alpha1.Placement{ + SpreadConstraints: []policyv1alpha1.SpreadConstraint{ + {SpreadByField: policyv1alpha1.SpreadByFieldZone}, + }, + }, + }, + cluster: &clusterv1alpha1.Cluster{ + Spec: clusterv1alpha1.ClusterSpec{ + Zones: []string{"us-west-2a"}, + }, + }, + expectedCode: framework.Success, + }, + { + name: "spread by zone - zones missing", + bindingSpec: &workv1alpha2.ResourceBindingSpec{ + Placement: &policyv1alpha1.Placement{ + SpreadConstraints: []policyv1alpha1.SpreadConstraint{ + {SpreadByField: policyv1alpha1.SpreadByFieldZone}, + }, + }, + }, + cluster: &clusterv1alpha1.Cluster{}, + expectedCode: framework.Unschedulable, + expectedReason: "cluster(s) did not have zones property", + }, + } + + p := &SpreadConstraint{} + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := p.Filter(context.Background(), tt.bindingSpec, nil, tt.cluster) + assert.Equal(t, tt.expectedCode, result.Code()) + if tt.expectedReason != "" { + assert.Contains(t, result.AsError().Error(), tt.expectedReason) + } + }) + } +} + +func TestNew(t *testing.T) { + plugin, err := New() + assert.NoError(t, err) + assert.NotNil(t, plugin) + _, ok := plugin.(*SpreadConstraint) + assert.True(t, ok) +} + +func TestSpreadConstraint_Name(t *testing.T) { + p := &SpreadConstraint{} + assert.Equal(t, Name, p.Name()) +} diff --git a/pkg/scheduler/framework/plugins/tainttoleration/taint_toleration_test.go b/pkg/scheduler/framework/plugins/tainttoleration/taint_toleration_test.go new file mode 100644 index 000000000000..05a39c6e2d51 --- /dev/null +++ b/pkg/scheduler/framework/plugins/tainttoleration/taint_toleration_test.go @@ -0,0 +1,135 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tainttoleration + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1" + policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" + workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2" + "github.com/karmada-io/karmada/pkg/scheduler/framework" +) + +func TestTaintToleration_Filter(t *testing.T) { + tests := []struct { + name string + bindingSpec *workv1alpha2.ResourceBindingSpec + cluster *clusterv1alpha1.Cluster + expectedCode framework.Code + expectedReason string + }{ + { + name: "cluster already in target clusters", + bindingSpec: &workv1alpha2.ResourceBindingSpec{ + Clusters: []workv1alpha2.TargetCluster{ + {Name: "cluster1"}, + }, + }, + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster1", + }, + }, + expectedCode: framework.Success, + }, + { + name: "no taints", + bindingSpec: &workv1alpha2.ResourceBindingSpec{ + Placement: &policyv1alpha1.Placement{}, + }, + cluster: &clusterv1alpha1.Cluster{}, + expectedCode: framework.Success, + }, + { + name: "tolerated taint", + bindingSpec: &workv1alpha2.ResourceBindingSpec{ + Placement: &policyv1alpha1.Placement{ + ClusterTolerations: []corev1.Toleration{ + { + Key: "key1", + Operator: corev1.TolerationOpEqual, + Value: "value1", + Effect: corev1.TaintEffectNoSchedule, + }, + }, + }, + }, + cluster: &clusterv1alpha1.Cluster{ + Spec: clusterv1alpha1.ClusterSpec{ + Taints: []corev1.Taint{ + { + Key: "key1", + Value: "value1", + Effect: corev1.TaintEffectNoSchedule, + }, + }, + }, + }, + expectedCode: framework.Success, + }, + { + name: "untolerated taint", + bindingSpec: &workv1alpha2.ResourceBindingSpec{ + Placement: &policyv1alpha1.Placement{}, + }, + cluster: &clusterv1alpha1.Cluster{ + Spec: clusterv1alpha1.ClusterSpec{ + Taints: []corev1.Taint{ + { + Key: "key1", + Value: "value1", + Effect: corev1.TaintEffectNoSchedule, + }, + }, + }, + }, + expectedCode: framework.Unschedulable, + expectedReason: "cluster(s) had untolerated taint {key1=value1:NoSchedule}", + }, + } + + p := &TaintToleration{} + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := p.Filter(context.Background(), tt.bindingSpec, nil, tt.cluster) + assert.Equal(t, tt.expectedCode, result.Code()) + if tt.expectedReason != "" { + assert.Contains(t, result.AsError().Error(), tt.expectedReason) + } + }) + } +} + +func TestNew(t *testing.T) { + plugin, err := New() + assert.NoError(t, err) + assert.NotNil(t, plugin) + _, ok := plugin.(*TaintToleration) + assert.True(t, ok) +} + +func TestTaintToleration_Name(t *testing.T) { + p := &TaintToleration{} + assert.Equal(t, Name, p.Name()) +} diff --git a/pkg/scheduler/framework/types_test.go b/pkg/scheduler/framework/types_test.go new file mode 100644 index 000000000000..10f2ddb4c1b5 --- /dev/null +++ b/pkg/scheduler/framework/types_test.go @@ -0,0 +1,183 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1" +) + +func TestNewClusterInfo(t *testing.T) { + testCases := []struct { + name string + cluster *clusterv1alpha1.Cluster + want *ClusterInfo + }{ + { + name: "Create ClusterInfo with valid cluster", + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + }, + }, + want: &ClusterInfo{ + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + }, + }, + }, + }, + { + name: "Create ClusterInfo with nil cluster", + cluster: nil, + want: &ClusterInfo{cluster: nil}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + got := NewClusterInfo(tc.cluster) + assert.Equal(t, tc.want, got) + }) + } +} + +func TestClusterInfo_Cluster(t *testing.T) { + testCases := []struct { + name string + clusterInfo *ClusterInfo + want *clusterv1alpha1.Cluster + }{ + { + name: "Get cluster from valid ClusterInfo", + clusterInfo: &ClusterInfo{ + cluster: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + }, + }, + }, + want: &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + }, + }, + }, + { + name: "Get cluster from nil ClusterInfo", + clusterInfo: nil, + want: nil, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + got := tc.clusterInfo.Cluster() + assert.Equal(t, tc.want, got) + }) + } +} + +func TestFitError_Error(t *testing.T) { + testCases := []struct { + name string + fitError FitError + expectedOutput string + expectedReasons []string + }{ + { + name: "No clusters available", + fitError: FitError{ + NumAllClusters: 0, + Diagnosis: Diagnosis{ClusterToResultMap: ClusterToResultMap{}}, + }, + expectedOutput: "0/0 clusters are available: no cluster exists.", + expectedReasons: []string{}, + }, + { + name: "Multiple reasons for unavailability", + fitError: FitError{ + NumAllClusters: 3, + Diagnosis: Diagnosis{ + ClusterToResultMap: ClusterToResultMap{ + "cluster1": &Result{reasons: []string{"insufficient CPU", "insufficient memory"}}, + "cluster2": &Result{reasons: []string{"insufficient CPU"}}, + "cluster3": &Result{reasons: []string{"taint mismatch"}}, + }, + }, + }, + expectedOutput: "0/3 clusters are available:", + expectedReasons: []string{ + "2 insufficient CPU", + "1 insufficient memory", + "1 taint mismatch", + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + got := tc.fitError.Error() + + // Check if the error message starts with the expected output + assert.True(t, strings.HasPrefix(got, tc.expectedOutput), "Error message should start with expected output") + + if len(tc.expectedReasons) > 0 { + // Check each reason + for _, reason := range tc.expectedReasons { + assert.Contains(t, got, reason, "Error message should contain the reason: %s", reason) + } + + // Check the total number of reasons + gotReasons := strings.Split(strings.TrimPrefix(got, tc.expectedOutput), ",") + assert.Equal(t, len(tc.expectedReasons), len(gotReasons), "Number of reasons should match") + } else { + // If no reasons are expected, the got message should exactly match the expected output + assert.Equal(t, tc.expectedOutput, got, "Error message should exactly match expected output when no reasons are provided") + } + }) + } +} + +func TestUnschedulableError_Error(t *testing.T) { + testCases := []struct { + name string + message string + }{ + { + name: "Unschedulable due to insufficient resources", + message: "Insufficient CPU in all clusters", + }, + { + name: "Unschedulable due to taint mismatch", + message: "No cluster matches required tolerations", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + unschedulableErr := UnschedulableError{Message: tc.message} + assert.Equal(t, tc.message, unschedulableErr.Error()) + }) + } +} diff --git a/pkg/scheduler/metrics/metrics_test.go b/pkg/scheduler/metrics/metrics_test.go new file mode 100644 index 000000000000..2872539b046c --- /dev/null +++ b/pkg/scheduler/metrics/metrics_test.go @@ -0,0 +1,102 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "errors" + "testing" + "time" +) + +func TestBindingSchedule(t *testing.T) { + tests := []struct { + name string + scheduleType string + duration float64 + err error + }{ + { + name: "Successful schedule", + scheduleType: "test", + duration: 1.5, + err: nil, + }, + { + name: "Failed schedule", + scheduleType: "test", + duration: 0.5, + err: errors.New("schedule failed"), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(_ *testing.T) { + // We can't easily test the metric values directly, so we'll just ensure the function doesn't panic + BindingSchedule(tt.scheduleType, tt.duration, tt.err) + }) + } +} + +func TestScheduleStep(t *testing.T) { + tests := []struct { + name string + action string + duration time.Duration + }{ + { + name: "Filter step", + action: ScheduleStepFilter, + duration: 100 * time.Millisecond, + }, + { + name: "Score step", + action: ScheduleStepScore, + duration: 200 * time.Millisecond, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(_ *testing.T) { + startTime := time.Now().Add(-tt.duration) + // Ensure the function doesn't panic + ScheduleStep(tt.action, startTime) + }) + } +} + +func TestCountSchedulerBindings(t *testing.T) { + tests := []struct { + name string + event string + }{ + { + name: "Binding add event", + event: BindingAdd, + }, + { + name: "Binding update event", + event: BindingUpdate, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(_ *testing.T) { + // Ensure the function doesn't panic + CountSchedulerBindings(tt.event) + }) + } +} diff --git a/pkg/scheduler/scheduler.go b/pkg/scheduler/scheduler.go index 864df518042e..f4ac7f23e640 100644 --- a/pkg/scheduler/scheduler.go +++ b/pkg/scheduler/scheduler.go @@ -106,6 +106,7 @@ type Scheduler struct { enableSchedulerEstimator bool disableSchedulerEstimatorInPullMode bool schedulerEstimatorCache *estimatorclient.SchedulerEstimatorCache + schedulerEstimatorServiceNamespace string schedulerEstimatorServicePrefix string schedulerEstimatorWorker util.AsyncWorker schedulerEstimatorClientConfig *grpcconnection.ClientConfig @@ -121,6 +122,8 @@ type schedulerOptions struct { disableSchedulerEstimatorInPullMode bool // schedulerEstimatorTimeout specifies the timeout period of calling the accurate scheduler estimator service. schedulerEstimatorTimeout metav1.Duration + // schedulerEstimatorServiceNamespace specifies the namespace to be used for discovering scheduler estimator services. + schedulerEstimatorServiceNamespace string // SchedulerEstimatorServicePrefix presents the prefix of the accurate scheduler estimator service name. schedulerEstimatorServicePrefix string // schedulerName is the name of the scheduler. Default is "default-scheduler". @@ -174,6 +177,13 @@ func WithSchedulerEstimatorTimeout(schedulerEstimatorTimeout metav1.Duration) Op } } +// WithSchedulerEstimatorServiceNamespace sets the schedulerEstimatorServiceNamespace for the scheduler +func WithSchedulerEstimatorServiceNamespace(schedulerEstimatorServiceNamespace string) Option { + return func(o *schedulerOptions) { + o.schedulerEstimatorServiceNamespace = schedulerEstimatorServiceNamespace + } +} + // WithSchedulerEstimatorServicePrefix sets the schedulerEstimatorServicePrefix for scheduler func WithSchedulerEstimatorServicePrefix(schedulerEstimatorServicePrefix string) Option { return func(o *schedulerOptions) { @@ -262,6 +272,7 @@ func NewScheduler(dynamicClient dynamic.Interface, karmadaClient karmadaclientse sched.enableSchedulerEstimator = options.enableSchedulerEstimator sched.disableSchedulerEstimatorInPullMode = options.disableSchedulerEstimatorInPullMode sched.schedulerEstimatorServicePrefix = options.schedulerEstimatorServicePrefix + sched.schedulerEstimatorServiceNamespace = options.schedulerEstimatorServiceNamespace sched.schedulerEstimatorClientConfig = options.schedulerEstimatorClientConfig sched.schedulerEstimatorCache = estimatorclient.NewSchedulerEstimatorCache() schedulerEstimatorWorkerOptions := util.Options{ @@ -782,7 +793,12 @@ func (s *Scheduler) reconcileEstimatorConnection(key util.QueueKey) error { return nil } - return estimatorclient.EstablishConnection(s.KubeClient, name, s.schedulerEstimatorCache, s.schedulerEstimatorServicePrefix, s.schedulerEstimatorClientConfig) + serviceInfo := estimatorclient.SchedulerEstimatorServiceInfo{ + Name: name, + Namespace: s.schedulerEstimatorServiceNamespace, + NamePrefix: s.schedulerEstimatorServicePrefix, + } + return estimatorclient.EstablishConnection(s.KubeClient, serviceInfo, s.schedulerEstimatorCache, s.schedulerEstimatorClientConfig) } func (s *Scheduler) establishEstimatorConnections() { @@ -795,7 +811,12 @@ func (s *Scheduler) establishEstimatorConnections() { if clusterList.Items[i].Spec.SyncMode == clusterv1alpha1.Pull && s.disableSchedulerEstimatorInPullMode { continue } - if err = estimatorclient.EstablishConnection(s.KubeClient, clusterList.Items[i].Name, s.schedulerEstimatorCache, s.schedulerEstimatorServicePrefix, s.schedulerEstimatorClientConfig); err != nil { + serviceInfo := estimatorclient.SchedulerEstimatorServiceInfo{ + Name: clusterList.Items[i].Name, + Namespace: s.schedulerEstimatorServiceNamespace, + NamePrefix: s.schedulerEstimatorServicePrefix, + } + if err = estimatorclient.EstablishConnection(s.KubeClient, serviceInfo, s.schedulerEstimatorCache, s.schedulerEstimatorClientConfig); err != nil { klog.Error(err) } } diff --git a/pkg/scheduler/scheduler_test.go b/pkg/scheduler/scheduler_test.go index 90c870c0df98..e1145ae3791d 100644 --- a/pkg/scheduler/scheduler_test.go +++ b/pkg/scheduler/scheduler_test.go @@ -18,32 +18,1198 @@ package scheduler import ( "context" + "errors" "fmt" "reflect" + "strings" "testing" "time" + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" dynamicfake "k8s.io/client-go/dynamic/fake" "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/kubernetes/scheme" + clienttesting "k8s.io/client-go/testing" "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/workqueue" + policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2" karmadafake "github.com/karmada-io/karmada/pkg/generated/clientset/versioned/fake" + workv1alpha2lister "github.com/karmada-io/karmada/pkg/generated/listers/work/v1alpha2" + "github.com/karmada-io/karmada/pkg/scheduler/core" + schedulercore "github.com/karmada-io/karmada/pkg/scheduler/core" + "github.com/karmada-io/karmada/pkg/sharedcli/ratelimiterflag" "github.com/karmada-io/karmada/pkg/util" "github.com/karmada-io/karmada/pkg/util/grpcconnection" ) +func TestDoSchedule(t *testing.T) { + tests := []struct { + name string + key string + binding interface{} + expectError bool + }{ + { + name: "invalid key format", + key: "invalid/key/format", + binding: nil, + expectError: true, + }, + { + name: "ResourceBinding scheduling", + key: "default/test-binding", + binding: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + Namespace: "default", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Placement: &policyv1alpha1.Placement{ + ClusterAffinity: &policyv1alpha1.ClusterAffinity{ + ClusterNames: []string{"cluster1"}, + }, + }, + }, + }, + expectError: false, + }, + { + name: "ClusterResourceBinding scheduling", + key: "test-cluster-binding", + binding: &workv1alpha2.ClusterResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster-binding", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Placement: &policyv1alpha1.Placement{ + ClusterAffinity: &policyv1alpha1.ClusterAffinity{ + ClusterNames: []string{"cluster1"}, + }, + }, + }, + }, + expectError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fakeClient := karmadafake.NewSimpleClientset() + fakeRecorder := record.NewFakeRecorder(10) + + var bindingLister *fakeBindingLister + var clusterBindingLister *fakeClusterBindingLister + + if rb, ok := tt.binding.(*workv1alpha2.ResourceBinding); ok { + bindingLister = &fakeBindingLister{binding: rb} + _, err := fakeClient.WorkV1alpha2().ResourceBindings(rb.Namespace).Create(context.TODO(), rb, metav1.CreateOptions{}) + assert.NoError(t, err) + } + if crb, ok := tt.binding.(*workv1alpha2.ClusterResourceBinding); ok { + clusterBindingLister = &fakeClusterBindingLister{binding: crb} + _, err := fakeClient.WorkV1alpha2().ClusterResourceBindings().Create(context.TODO(), crb, metav1.CreateOptions{}) + assert.NoError(t, err) + } + + mockAlgo := &mockAlgorithm{ + scheduleFunc: func(_ context.Context, _ *workv1alpha2.ResourceBindingSpec, _ *workv1alpha2.ResourceBindingStatus, _ *schedulercore.ScheduleAlgorithmOption) (schedulercore.ScheduleResult, error) { + return schedulercore.ScheduleResult{ + SuggestedClusters: []workv1alpha2.TargetCluster{ + {Name: "cluster1", Replicas: 1}, + }, + }, nil + }, + } + + s := &Scheduler{ + KarmadaClient: fakeClient, + eventRecorder: fakeRecorder, + bindingLister: bindingLister, + clusterBindingLister: clusterBindingLister, + Algorithm: mockAlgo, + } + + err := s.doSchedule(tt.key) + + if tt.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + + if !tt.expectError { + if rb, ok := tt.binding.(*workv1alpha2.ResourceBinding); ok { + updated, err := fakeClient.WorkV1alpha2().ResourceBindings(rb.Namespace).Get(context.TODO(), rb.Name, metav1.GetOptions{}) + assert.NoError(t, err) + assert.NotNil(t, updated.Spec.Clusters) + assert.Len(t, updated.Spec.Clusters, 1) + assert.Equal(t, "cluster1", updated.Spec.Clusters[0].Name) + } + if crb, ok := tt.binding.(*workv1alpha2.ClusterResourceBinding); ok { + updated, err := fakeClient.WorkV1alpha2().ClusterResourceBindings().Get(context.TODO(), crb.Name, metav1.GetOptions{}) + assert.NoError(t, err) + assert.NotNil(t, updated.Spec.Clusters) + assert.Len(t, updated.Spec.Clusters, 1) + assert.Equal(t, "cluster1", updated.Spec.Clusters[0].Name) + } + } + }) + } +} + +func TestDoScheduleBinding(t *testing.T) { + tests := []struct { + name string + binding *workv1alpha2.ResourceBinding + expectSchedule bool + expectError bool + expectedClusters []workv1alpha2.TargetCluster + expectedEvent string + }{ + { + name: "binding with changed placement", + binding: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding-1", + Namespace: "default", + Annotations: map[string]string{ + util.PolicyPlacementAnnotation: `{"clusterAffinity":{"clusterNames":["cluster1"]}}`, + }, + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Placement: &policyv1alpha1.Placement{ + ClusterAffinity: &policyv1alpha1.ClusterAffinity{ + ClusterNames: []string{"cluster1", "cluster2"}, + }, + }, + }, + }, + expectSchedule: true, + expectError: false, + expectedClusters: []workv1alpha2.TargetCluster{ + {Name: "cluster1", Replicas: 1}, + {Name: "cluster2", Replicas: 1}, + }, + expectedEvent: "Normal ScheduleBindingSucceed", + }, + { + name: "binding with replicas changed", + binding: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding-2", + Namespace: "default", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Replicas: 2, + Placement: &policyv1alpha1.Placement{ + ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{ + ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDivided, + }, + }, + }, + Status: workv1alpha2.ResourceBindingStatus{ + SchedulerObservedGeneration: 1, + }, + }, + expectSchedule: true, + expectError: false, + expectedClusters: []workv1alpha2.TargetCluster{ + {Name: "cluster1", Replicas: 1}, + {Name: "cluster2", Replicas: 1}, + }, + expectedEvent: "Normal ScheduleBindingSucceed", + }, + { + name: "binding with reschedule triggered", + binding: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding-3", + Namespace: "default", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + RescheduleTriggeredAt: &metav1.Time{Time: time.Now()}, + Placement: &policyv1alpha1.Placement{}, + }, + }, + expectSchedule: true, + expectError: false, + expectedClusters: []workv1alpha2.TargetCluster{ + {Name: "cluster1", Replicas: 1}, + }, + expectedEvent: "Normal ScheduleBindingSucceed", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fakeClient := karmadafake.NewSimpleClientset(tt.binding) + fakeRecorder := record.NewFakeRecorder(10) + mockAlgorithm := &mockAlgorithm{ + scheduleFunc: func(context.Context, *workv1alpha2.ResourceBindingSpec, *workv1alpha2.ResourceBindingStatus, *core.ScheduleAlgorithmOption) (core.ScheduleResult, error) { + return core.ScheduleResult{SuggestedClusters: tt.expectedClusters}, nil + }, + } + + s := &Scheduler{ + KarmadaClient: fakeClient, + bindingLister: &fakeBindingLister{binding: tt.binding}, + eventRecorder: fakeRecorder, + Algorithm: mockAlgorithm, + } + + err := s.doScheduleBinding(tt.binding.Namespace, tt.binding.Name) + + if tt.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + + updatedBinding, err := fakeClient.WorkV1alpha2().ResourceBindings(tt.binding.Namespace).Get(context.TODO(), tt.binding.Name, metav1.GetOptions{}) + assert.NoError(t, err) + + if tt.expectSchedule { + assert.Equal(t, tt.expectedClusters, updatedBinding.Spec.Clusters) + assert.NotEqual(t, tt.binding.Spec.Clusters, updatedBinding.Spec.Clusters) + } else { + assert.Equal(t, tt.binding.Spec.Clusters, updatedBinding.Spec.Clusters) + } + + // Check for expected events + select { + case event := <-fakeRecorder.Events: + assert.Contains(t, event, tt.expectedEvent) + default: + t.Errorf("Expected an event to be recorded") + } + }) + } +} + +func TestDoScheduleClusterBinding(t *testing.T) { + tests := []struct { + name string + binding *workv1alpha2.ClusterResourceBinding + expectSchedule bool + expectError bool + expectedClusters []workv1alpha2.TargetCluster + expectedEvent string + }{ + { + name: "cluster binding with changed placement", + binding: &workv1alpha2.ClusterResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster-binding-1", + Annotations: map[string]string{ + util.PolicyPlacementAnnotation: `{"clusterAffinity":{"clusterNames":["cluster1"]}}`, + }, + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Placement: &policyv1alpha1.Placement{ + ClusterAffinity: &policyv1alpha1.ClusterAffinity{ + ClusterNames: []string{"cluster1", "cluster2"}, + }, + }, + }, + }, + expectSchedule: true, + expectError: false, + expectedClusters: []workv1alpha2.TargetCluster{ + {Name: "cluster1", Replicas: 1}, + {Name: "cluster2", Replicas: 1}, + }, + expectedEvent: "Normal ScheduleBindingSucceed", + }, + { + name: "cluster binding with replicas changed", + binding: &workv1alpha2.ClusterResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster-binding-2", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Replicas: 2, + Placement: &policyv1alpha1.Placement{ + ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{ + ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDivided, + }, + }, + }, + Status: workv1alpha2.ResourceBindingStatus{ + SchedulerObservedGeneration: 1, + }, + }, + expectSchedule: true, + expectError: false, + expectedClusters: []workv1alpha2.TargetCluster{ + {Name: "cluster1", Replicas: 1}, + {Name: "cluster2", Replicas: 1}, + }, + expectedEvent: "Normal ScheduleBindingSucceed", + }, + { + name: "cluster binding with reschedule triggered", + binding: &workv1alpha2.ClusterResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster-binding-3", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + RescheduleTriggeredAt: &metav1.Time{Time: time.Now()}, + Placement: &policyv1alpha1.Placement{}, + }, + }, + expectSchedule: true, + expectError: false, + expectedClusters: []workv1alpha2.TargetCluster{ + {Name: "cluster1", Replicas: 1}, + }, + expectedEvent: "Normal ScheduleBindingSucceed", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fakeClient := karmadafake.NewSimpleClientset(tt.binding) + fakeRecorder := record.NewFakeRecorder(10) + mockAlgorithm := &mockAlgorithm{ + scheduleFunc: func(context.Context, *workv1alpha2.ResourceBindingSpec, *workv1alpha2.ResourceBindingStatus, *core.ScheduleAlgorithmOption) (core.ScheduleResult, error) { + return core.ScheduleResult{SuggestedClusters: tt.expectedClusters}, nil + }, + } + + s := &Scheduler{ + KarmadaClient: fakeClient, + clusterBindingLister: &fakeClusterBindingLister{binding: tt.binding}, + eventRecorder: fakeRecorder, + Algorithm: mockAlgorithm, + } + + err := s.doScheduleClusterBinding(tt.binding.Name) + + if tt.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + + updatedBinding, err := fakeClient.WorkV1alpha2().ClusterResourceBindings().Get(context.TODO(), tt.binding.Name, metav1.GetOptions{}) + assert.NoError(t, err) + + if tt.expectSchedule { + assert.Equal(t, tt.expectedClusters, updatedBinding.Spec.Clusters) + assert.NotEqual(t, tt.binding.Spec.Clusters, updatedBinding.Spec.Clusters) + } else { + assert.Equal(t, tt.binding.Spec.Clusters, updatedBinding.Spec.Clusters) + } + + // Check for expected events + select { + case event := <-fakeRecorder.Events: + assert.Contains(t, event, tt.expectedEvent) + default: + t.Errorf("Expected an event to be recorded") + } + }) + } +} + +func TestScheduleResourceBindingWithClusterAffinity(t *testing.T) { + tests := []struct { + name string + binding *workv1alpha2.ResourceBinding + scheduleResult core.ScheduleResult + scheduleError error + expectError bool + expectedPatch string + expectedEvent string + }{ + { + name: "successful scheduling", + binding: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + Namespace: "default", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Placement: &policyv1alpha1.Placement{ + ClusterAffinity: &policyv1alpha1.ClusterAffinity{ + ClusterNames: []string{"cluster1"}, + }, + }, + }, + }, + scheduleResult: core.ScheduleResult{ + SuggestedClusters: []workv1alpha2.TargetCluster{ + {Name: "cluster1", Replicas: 1}, + }, + }, + expectError: false, + expectedPatch: `{"metadata":{"annotations":{"policy.karmada.io/applied-placement":"{\"clusterAffinity\":{\"clusterNames\":[\"cluster1\"]}}"}},"spec":{"clusters":[{"name":"cluster1","replicas":1}]}}`, + expectedEvent: "Normal ScheduleBindingSucceed Binding has been scheduled successfully.", + }, + { + name: "scheduling error", + binding: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding-error", + Namespace: "default", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Placement: &policyv1alpha1.Placement{ + ClusterAffinity: &policyv1alpha1.ClusterAffinity{ + ClusterNames: []string{"cluster1"}, + }, + }, + }, + }, + scheduleResult: core.ScheduleResult{}, + scheduleError: errors.New("scheduling error"), + expectError: true, + expectedEvent: "Warning ScheduleBindingFailed scheduling error", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fakeClient := karmadafake.NewSimpleClientset(tt.binding) + fakeRecorder := record.NewFakeRecorder(10) + mockAlgorithm := &mockAlgorithm{ + scheduleFunc: func(context.Context, *workv1alpha2.ResourceBindingSpec, *workv1alpha2.ResourceBindingStatus, *core.ScheduleAlgorithmOption) (core.ScheduleResult, error) { + return tt.scheduleResult, tt.scheduleError + }, + } + s := &Scheduler{ + KarmadaClient: fakeClient, + eventRecorder: fakeRecorder, + Algorithm: mockAlgorithm, + } + + err := s.scheduleResourceBindingWithClusterAffinity(tt.binding) + + if (err != nil) != tt.expectError { + t.Errorf("scheduleResourceBindingWithClusterAffinity() error = %v, expectError %v", err, tt.expectError) + } + + actions := fakeClient.Actions() + patchActions := filterPatchActions(actions) + + if tt.expectError { + assert.Empty(t, patchActions, "Expected no patch actions for error case") + } else { + assert.Len(t, patchActions, 1, "Expected one patch action") + if len(patchActions) > 0 { + actualPatch := string(patchActions[0].GetPatch()) + assert.JSONEq(t, tt.expectedPatch, actualPatch, "Patch does not match expected") + } + } + + // Check if an event was recorded + select { + case event := <-fakeRecorder.Events: + assert.Contains(t, event, tt.expectedEvent, "Event does not match expected") + default: + t.Errorf("Expected an event to be recorded") + } + }) + } +} + +func TestScheduleResourceBindingWithClusterAffinities(t *testing.T) { + tests := []struct { + name string + binding *workv1alpha2.ResourceBinding + scheduleResults []core.ScheduleResult + scheduleErrors []error + expectError bool + expectedPatches []string + expectedEvent string + }{ + { + name: "successful scheduling with first affinity", + binding: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + Namespace: "default", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Placement: &policyv1alpha1.Placement{ + ClusterAffinities: []policyv1alpha1.ClusterAffinityTerm{ + { + AffinityName: "affinity1", + ClusterAffinity: policyv1alpha1.ClusterAffinity{ + ClusterNames: []string{"cluster1"}, + }, + }, + { + AffinityName: "affinity2", + ClusterAffinity: policyv1alpha1.ClusterAffinity{ + ClusterNames: []string{"cluster2"}, + }, + }, + }, + }, + }, + }, + scheduleResults: []core.ScheduleResult{ + { + SuggestedClusters: []workv1alpha2.TargetCluster{ + {Name: "cluster1", Replicas: 1}, + }, + }, + }, + scheduleErrors: []error{nil}, + expectError: false, + expectedPatches: []string{ + `{"metadata":{"annotations":{"policy.karmada.io/applied-placement":"{\"clusterAffinities\":[{\"affinityName\":\"affinity1\",\"clusterNames\":[\"cluster1\"]},{\"affinityName\":\"affinity2\",\"clusterNames\":[\"cluster2\"]}]}"}},"spec":{"clusters":[{"name":"cluster1","replicas":1}]}}`, + `{"status":{"schedulerObservingAffinityName":"affinity1"}}`, + }, + expectedEvent: "Normal ScheduleBindingSucceed Binding has been scheduled successfully.", + }, + { + name: "successful scheduling with second affinity", + binding: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding-2", + Namespace: "default", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Placement: &policyv1alpha1.Placement{ + ClusterAffinities: []policyv1alpha1.ClusterAffinityTerm{ + { + AffinityName: "affinity1", + ClusterAffinity: policyv1alpha1.ClusterAffinity{ + ClusterNames: []string{"cluster1"}, + }, + }, + { + AffinityName: "affinity2", + ClusterAffinity: policyv1alpha1.ClusterAffinity{ + ClusterNames: []string{"cluster2"}, + }, + }, + }, + }, + }, + }, + scheduleResults: []core.ScheduleResult{ + {}, + { + SuggestedClusters: []workv1alpha2.TargetCluster{ + {Name: "cluster2", Replicas: 1}, + }, + }, + }, + scheduleErrors: []error{errors.New("first affinity failed"), nil}, + expectError: false, + expectedPatches: []string{ + `{"metadata":{"annotations":{"policy.karmada.io/applied-placement":"{\"clusterAffinities\":[{\"affinityName\":\"affinity1\",\"clusterNames\":[\"cluster1\"]},{\"affinityName\":\"affinity2\",\"clusterNames\":[\"cluster2\"]}]}"}},"spec":{"clusters":[{"name":"cluster2","replicas":1}]}}`, + `{"status":{"schedulerObservingAffinityName":"affinity2"}}`, + }, + expectedEvent: "Warning ScheduleBindingFailed failed to schedule ResourceBinding(default/test-binding-2) with clusterAffiliates index(0): first affinity failed", + }, + { + name: "all affinities fail", + binding: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding-fail", + Namespace: "default", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Placement: &policyv1alpha1.Placement{ + ClusterAffinities: []policyv1alpha1.ClusterAffinityTerm{ + { + AffinityName: "affinity1", + ClusterAffinity: policyv1alpha1.ClusterAffinity{ + ClusterNames: []string{"cluster1"}, + }, + }, + { + AffinityName: "affinity2", + ClusterAffinity: policyv1alpha1.ClusterAffinity{ + ClusterNames: []string{"cluster2"}, + }, + }, + }, + }, + }, + }, + scheduleResults: []core.ScheduleResult{{}, {}}, + scheduleErrors: []error{errors.New("first affinity failed"), errors.New("second affinity failed")}, + expectError: true, + expectedPatches: []string{}, + expectedEvent: "Warning ScheduleBindingFailed failed to schedule ResourceBinding", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fakeClient := karmadafake.NewSimpleClientset(tt.binding) + fakeRecorder := record.NewFakeRecorder(10) + mockAlgorithm := &mockAlgorithm{ + scheduleFunc: func(_ context.Context, spec *workv1alpha2.ResourceBindingSpec, status *workv1alpha2.ResourceBindingStatus, _ *core.ScheduleAlgorithmOption) (core.ScheduleResult, error) { + index := getAffinityIndex(spec.Placement.ClusterAffinities, status.SchedulerObservedAffinityName) + if index < len(tt.scheduleResults) { + return tt.scheduleResults[index], tt.scheduleErrors[index] + } + return core.ScheduleResult{}, errors.New("unexpected call to Schedule") + }, + } + s := &Scheduler{ + KarmadaClient: fakeClient, + eventRecorder: fakeRecorder, + Algorithm: mockAlgorithm, + } + + err := s.scheduleResourceBindingWithClusterAffinities(tt.binding) + + if (err != nil) != tt.expectError { + t.Errorf("scheduleResourceBindingWithClusterAffinities() error = %v, expectError %v", err, tt.expectError) + } + + actions := fakeClient.Actions() + patchActions := filterPatchActions(actions) + + if tt.expectError { + assert.Empty(t, patchActions, "Expected no patch actions for error case") + } else { + assert.Len(t, patchActions, len(tt.expectedPatches), "Expected %d patch actions", len(tt.expectedPatches)) + for i, expectedPatch := range tt.expectedPatches { + actualPatch := string(patchActions[i].GetPatch()) + assert.JSONEq(t, expectedPatch, actualPatch, "Patch %d does not match expected", i+1) + } + } + + // Check if an event was recorded + select { + case event := <-fakeRecorder.Events: + if strings.Contains(event, "ScheduleBindingFailed") { + assert.Contains(t, event, tt.expectedEvent, "Event does not match expected") + } else { + assert.Contains(t, event, "ScheduleBindingSucceed", "Expected ScheduleBindingSucceed event") + } + default: + t.Errorf("Expected an event to be recorded") + } + }) + } +} + +func TestPatchScheduleResultForResourceBinding(t *testing.T) { + tests := []struct { + name string + oldBinding *workv1alpha2.ResourceBinding + placement string + scheduleResult []workv1alpha2.TargetCluster + expectError bool + expectedBinding *workv1alpha2.ResourceBinding + }{ + { + name: "successful patch", + oldBinding: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + Namespace: "default", + }, + }, + placement: "test-placement", + scheduleResult: []workv1alpha2.TargetCluster{ + {Name: "cluster1", Replicas: 1}, + }, + expectError: false, + expectedBinding: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + Namespace: "default", + Annotations: map[string]string{ + util.PolicyPlacementAnnotation: "test-placement", + }, + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Clusters: []workv1alpha2.TargetCluster{ + {Name: "cluster1", Replicas: 1}, + }, + }, + }, + }, + { + name: "no changes", + oldBinding: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + Namespace: "default", + Annotations: map[string]string{ + util.PolicyPlacementAnnotation: "test-placement", + }, + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Clusters: []workv1alpha2.TargetCluster{ + {Name: "cluster1", Replicas: 1}, + }, + }, + }, + placement: "test-placement", + scheduleResult: []workv1alpha2.TargetCluster{ + {Name: "cluster1", Replicas: 1}, + }, + expectError: false, + expectedBinding: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + Namespace: "default", + Annotations: map[string]string{ + util.PolicyPlacementAnnotation: "test-placement", + }, + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Clusters: []workv1alpha2.TargetCluster{ + {Name: "cluster1", Replicas: 1}, + }, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s := &Scheduler{ + KarmadaClient: karmadafake.NewSimpleClientset(tt.oldBinding), + } + + err := s.patchScheduleResultForResourceBinding(tt.oldBinding, tt.placement, tt.scheduleResult) + + if tt.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + + updatedBinding, err := s.KarmadaClient.WorkV1alpha2().ResourceBindings(tt.oldBinding.Namespace).Get(context.TODO(), tt.oldBinding.Name, metav1.GetOptions{}) + assert.NoError(t, err) + assert.Equal(t, tt.expectedBinding.Annotations, updatedBinding.Annotations) + assert.Equal(t, tt.expectedBinding.Spec.Clusters, updatedBinding.Spec.Clusters) + } + }) + } +} + +func TestScheduleClusterResourceBindingWithClusterAffinity(t *testing.T) { + tests := []struct { + name string + binding *workv1alpha2.ClusterResourceBinding + scheduleResult core.ScheduleResult + scheduleError error + expectError bool + }{ + { + name: "successful scheduling", + binding: &workv1alpha2.ClusterResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster-binding", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Placement: &policyv1alpha1.Placement{ + ClusterAffinity: &policyv1alpha1.ClusterAffinity{ + ClusterNames: []string{"cluster1", "cluster2"}, + }, + }, + }, + }, + scheduleResult: core.ScheduleResult{ + SuggestedClusters: []workv1alpha2.TargetCluster{ + {Name: "cluster1", Replicas: 1}, + {Name: "cluster2", Replicas: 1}, + }, + }, + scheduleError: nil, + expectError: false, + }, + { + name: "scheduling error", + binding: &workv1alpha2.ClusterResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster-binding-error", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Placement: &policyv1alpha1.Placement{ + ClusterAffinity: &policyv1alpha1.ClusterAffinity{ + ClusterNames: []string{"cluster1"}, + }, + }, + }, + }, + scheduleResult: core.ScheduleResult{}, + scheduleError: errors.New("scheduling error"), + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fakeClient := karmadafake.NewSimpleClientset(tt.binding) + fakeRecorder := record.NewFakeRecorder(10) + mockAlgorithm := &mockAlgorithm{ + scheduleFunc: func(_ context.Context, _ *workv1alpha2.ResourceBindingSpec, _ *workv1alpha2.ResourceBindingStatus, _ *core.ScheduleAlgorithmOption) (core.ScheduleResult, error) { + return tt.scheduleResult, tt.scheduleError + }, + } + s := &Scheduler{ + KarmadaClient: fakeClient, + eventRecorder: fakeRecorder, + Algorithm: mockAlgorithm, + } + + err := s.scheduleClusterResourceBindingWithClusterAffinity(tt.binding) + + if (err != nil) != tt.expectError { + t.Errorf("scheduleClusterResourceBindingWithClusterAffinity() error = %v, expectError %v", err, tt.expectError) + } + + // Check if a patch was applied + actions := fakeClient.Actions() + patchActions := filterPatchActions(actions) + if tt.expectError { + assert.Empty(t, patchActions, "Expected no patch actions for error case") + } else { + assert.NotEmpty(t, patchActions, "Expected patch actions for success case") + } + + // Check if an event was recorded + select { + case event := <-fakeRecorder.Events: + if tt.expectError { + assert.Contains(t, event, "ScheduleBindingFailed", "Expected ScheduleBindingFailed event") + } else { + assert.Contains(t, event, "ScheduleBindingSucceed", "Expected ScheduleBindingSucceed event") + } + default: + t.Errorf("Expected an event to be recorded") + } + }) + } +} + +func TestScheduleClusterResourceBindingWithClusterAffinities(t *testing.T) { + tests := []struct { + name string + binding *workv1alpha2.ClusterResourceBinding + scheduleResults []core.ScheduleResult + scheduleErrors []error + expectError bool + }{ + { + name: "successful scheduling with first affinity", + binding: &workv1alpha2.ClusterResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster-binding", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Placement: &policyv1alpha1.Placement{ + ClusterAffinities: []policyv1alpha1.ClusterAffinityTerm{ + { + AffinityName: "affinity1", + ClusterAffinity: policyv1alpha1.ClusterAffinity{ + ClusterNames: []string{"cluster1"}, + }, + }, + { + AffinityName: "affinity2", + ClusterAffinity: policyv1alpha1.ClusterAffinity{ + ClusterNames: []string{"cluster2"}, + }, + }, + }, + }, + }, + }, + scheduleResults: []core.ScheduleResult{ + { + SuggestedClusters: []workv1alpha2.TargetCluster{ + {Name: "cluster1", Replicas: 1}, + }, + }, + }, + scheduleErrors: []error{nil}, + expectError: false, + }, + { + name: "successful scheduling with second affinity", + binding: &workv1alpha2.ClusterResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster-binding-2", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Placement: &policyv1alpha1.Placement{ + ClusterAffinities: []policyv1alpha1.ClusterAffinityTerm{ + { + AffinityName: "affinity1", + ClusterAffinity: policyv1alpha1.ClusterAffinity{ + ClusterNames: []string{"cluster1"}, + }, + }, + { + AffinityName: "affinity2", + ClusterAffinity: policyv1alpha1.ClusterAffinity{ + ClusterNames: []string{"cluster2"}, + }, + }, + }, + }, + }, + }, + scheduleResults: []core.ScheduleResult{ + {}, + { + SuggestedClusters: []workv1alpha2.TargetCluster{ + {Name: "cluster2", Replicas: 1}, + }, + }, + }, + scheduleErrors: []error{errors.New("first affinity failed"), nil}, + expectError: false, + }, + { + name: "all affinities fail", + binding: &workv1alpha2.ClusterResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster-binding-fail", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Placement: &policyv1alpha1.Placement{ + ClusterAffinities: []policyv1alpha1.ClusterAffinityTerm{ + { + AffinityName: "affinity1", + ClusterAffinity: policyv1alpha1.ClusterAffinity{ + ClusterNames: []string{"cluster1"}, + }, + }, + { + AffinityName: "affinity2", + ClusterAffinity: policyv1alpha1.ClusterAffinity{ + ClusterNames: []string{"cluster2"}, + }, + }, + }, + }, + }, + }, + scheduleResults: []core.ScheduleResult{{}, {}}, + scheduleErrors: []error{errors.New("first affinity failed"), errors.New("second affinity failed")}, + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fakeClient := karmadafake.NewSimpleClientset(tt.binding) + fakeRecorder := record.NewFakeRecorder(10) + mockAlgorithm := &mockAlgorithm{ + scheduleFunc: func(_ context.Context, spec *workv1alpha2.ResourceBindingSpec, status *workv1alpha2.ResourceBindingStatus, _ *core.ScheduleAlgorithmOption) (core.ScheduleResult, error) { + index := getAffinityIndex(spec.Placement.ClusterAffinities, status.SchedulerObservedAffinityName) + if index < len(tt.scheduleResults) { + return tt.scheduleResults[index], tt.scheduleErrors[index] + } + return core.ScheduleResult{}, errors.New("unexpected call to Schedule") + }, + } + s := &Scheduler{ + KarmadaClient: fakeClient, + eventRecorder: fakeRecorder, + Algorithm: mockAlgorithm, + } + + err := s.scheduleClusterResourceBindingWithClusterAffinities(tt.binding) + + if (err != nil) != tt.expectError { + t.Errorf("scheduleClusterResourceBindingWithClusterAffinities() error = %v, expectError %v", err, tt.expectError) + } + }) + } +} + +func TestWorkerAndScheduleNext(t *testing.T) { + testScheme := setupScheme() + + resourceBinding := &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-binding", + Namespace: "default", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Placement: &policyv1alpha1.Placement{ + ClusterAffinity: &policyv1alpha1.ClusterAffinity{ + ClusterNames: []string{"cluster1"}, + }, + }, + }, + } + + clusterResourceBinding := &workv1alpha2.ClusterResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster-binding", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Placement: &policyv1alpha1.Placement{ + ClusterAffinity: &policyv1alpha1.ClusterAffinity{ + ClusterNames: []string{"cluster1"}, + }, + }, + }, + } + + fakeClient := karmadafake.NewSimpleClientset(resourceBinding, clusterResourceBinding) + + testCases := []struct { + name string + key string + shutdown bool + expectResult bool + }{ + { + name: "Schedule ResourceBinding", + key: "default/test-binding", + shutdown: false, + expectResult: true, + }, + { + name: "Schedule ClusterResourceBinding", + key: "test-cluster-binding", + shutdown: false, + expectResult: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + queue := workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()) + bindingLister := &fakeBindingLister{binding: resourceBinding} + clusterBindingLister := &fakeClusterBindingLister{binding: clusterResourceBinding} + + mockAlgo := &mockAlgorithm{ + scheduleFunc: func(_ context.Context, _ *workv1alpha2.ResourceBindingSpec, _ *workv1alpha2.ResourceBindingStatus, _ *schedulercore.ScheduleAlgorithmOption) (schedulercore.ScheduleResult, error) { + return schedulercore.ScheduleResult{ + SuggestedClusters: []workv1alpha2.TargetCluster{ + {Name: "cluster1", Replicas: 1}, + }, + }, nil + }, + } + + eventBroadcaster := record.NewBroadcaster() + eventRecorder := eventBroadcaster.NewRecorder(testScheme, corev1.EventSource{Component: "test-scheduler"}) + + s := &Scheduler{ + KarmadaClient: fakeClient, + queue: queue, + bindingLister: bindingLister, + clusterBindingLister: clusterBindingLister, + Algorithm: mockAlgo, + eventRecorder: eventRecorder, + } + + s.queue.Add(tc.key) + + if tc.shutdown { + s.queue.ShutDown() + } + + result := s.scheduleNext() + + assert.Equal(t, tc.expectResult, result, "scheduleNext return value mismatch") + + if !tc.shutdown { + assert.Equal(t, 0, s.queue.Len(), "Queue should be empty after processing") + } + }) + } +} + +func TestPlacementChanged(t *testing.T) { + tests := []struct { + name string + placement *policyv1alpha1.Placement + appliedPlacementStr string + observedAffinityName string + want bool + }{ + { + name: "placement changed", + placement: &policyv1alpha1.Placement{ + ClusterAffinity: &policyv1alpha1.ClusterAffinity{ + ClusterNames: []string{"cluster1", "cluster2"}, + }, + }, + appliedPlacementStr: `{"clusterAffinity":{"clusterNames":["cluster1"]}}`, + observedAffinityName: "", + want: true, + }, + { + name: "placement not changed", + placement: &policyv1alpha1.Placement{ + ClusterAffinity: &policyv1alpha1.ClusterAffinity{ + ClusterNames: []string{"cluster1", "cluster2"}, + }, + }, + appliedPlacementStr: `{"clusterAffinity":{"clusterNames":["cluster1","cluster2"]}}`, + observedAffinityName: "", + want: false, + }, + { + name: "invalid applied placement string", + placement: &policyv1alpha1.Placement{ + ClusterAffinity: &policyv1alpha1.ClusterAffinity{ + ClusterNames: []string{"cluster1", "cluster2"}, + }, + }, + appliedPlacementStr: `invalid json`, + observedAffinityName: "", + want: false, + }, + { + name: "empty placement", + placement: &policyv1alpha1.Placement{}, + appliedPlacementStr: `{}`, + observedAffinityName: "", + want: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + rb := &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test-namespace", + Name: "test-name", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Placement: tt.placement, + }, + Status: workv1alpha2.ResourceBindingStatus{ + SchedulerObservedAffinityName: tt.observedAffinityName, + }, + } + got := placementChanged(*rb.Spec.Placement, tt.appliedPlacementStr, rb.Status.SchedulerObservedAffinityName) + assert.Equal(t, tt.want, got, "placementChanged() result mismatch") + }) + } +} + func TestCreateScheduler(t *testing.T) { dynamicClient := dynamicfake.NewSimpleDynamicClient(runtime.NewScheme()) karmadaClient := karmadafake.NewSimpleClientset() kubeClient := fake.NewSimpleClientset() port := 10025 + serviceNamespace := "tenant1" servicePrefix := "test-service-prefix" schedulerName := "test-scheduler" timeout := metav1.Duration{Duration: 5 * time.Second} + mockPlugins := []string{"plugin1", "plugin2"} + mockRateLimiterOptions := ratelimiterflag.Options{} + testcases := []struct { name string opts []Option @@ -51,11 +1217,13 @@ func TestCreateScheduler(t *testing.T) { schedulerEstimatorPort int disableSchedulerEstimatorInPullMode bool schedulerEstimatorTimeout metav1.Duration + schedulerEstimatorServiceNamespace string schedulerEstimatorServicePrefix string schedulerName string schedulerEstimatorClientConfig *grpcconnection.ClientConfig enableEmptyWorkloadPropagation bool plugins []string + rateLimiterOptions ratelimiterflag.Options }{ { name: "scheduler with default configuration", @@ -101,6 +1269,17 @@ func TestCreateScheduler(t *testing.T) { schedulerEstimatorPort: port, schedulerEstimatorServicePrefix: servicePrefix, }, + { + name: "scheduler with custom SchedulerEstimatorServiceNamespace set", + opts: []Option{ + WithEnableSchedulerEstimator(true), + WithSchedulerEstimatorConnection(port, "", "", "", false), + WithSchedulerEstimatorServiceNamespace(serviceNamespace), + }, + enableSchedulerEstimator: true, + schedulerEstimatorPort: port, + schedulerEstimatorServiceNamespace: serviceNamespace, + }, { name: "scheduler with SchedulerName enabled", opts: []Option{ @@ -126,6 +1305,20 @@ func TestCreateScheduler(t *testing.T) { schedulerEstimatorPort: port, schedulerEstimatorTimeout: timeout, }, + { + name: "scheduler with EnableSchedulerPlugin", + opts: []Option{ + WithEnableSchedulerPlugin(mockPlugins), + }, + plugins: mockPlugins, + }, + { + name: "scheduler with RateLimiterOptions", + opts: []Option{ + WithRateLimiterOptions(mockRateLimiterOptions), + }, + rateLimiterOptions: mockRateLimiterOptions, + }, } for _, tc := range testcases { @@ -147,6 +1340,10 @@ func TestCreateScheduler(t *testing.T) { t.Errorf("unexpected disableSchedulerEstimatorInPullMode want %v, got %v", tc.disableSchedulerEstimatorInPullMode, sche.disableSchedulerEstimatorInPullMode) } + if tc.schedulerEstimatorServiceNamespace != sche.schedulerEstimatorServiceNamespace { + t.Errorf("unexpected schedulerEstimatorServiceNamespace want %v, got %v", tc.schedulerEstimatorServiceNamespace, sche.schedulerEstimatorServiceNamespace) + } + if tc.schedulerEstimatorServicePrefix != sche.schedulerEstimatorServicePrefix { t.Errorf("unexpected schedulerEstimatorServicePrefix want %v, got %v", tc.schedulerEstimatorServicePrefix, sche.schedulerEstimatorServicePrefix) } @@ -158,10 +1355,17 @@ func TestCreateScheduler(t *testing.T) { if tc.enableEmptyWorkloadPropagation != sche.enableEmptyWorkloadPropagation { t.Errorf("unexpected enableEmptyWorkloadPropagation want %v, got %v", tc.enableEmptyWorkloadPropagation, sche.enableEmptyWorkloadPropagation) } + if len(tc.plugins) > 0 && sche.Algorithm == nil { + t.Errorf("expected Algorithm to be set when plugins are provided") + } + if tc.rateLimiterOptions != (ratelimiterflag.Options{}) && sche.queue == nil { + t.Errorf("expected queue to be set when rate limiter options are provided") + } }) } } -func Test_patchBindingStatusCondition(t *testing.T) { + +func TestPatchBindingStatusCondition(t *testing.T) { oneHourBefore := time.Now().Add(-1 * time.Hour).Round(time.Second) oneHourAfter := time.Now().Add(1 * time.Hour).Round(time.Second) @@ -319,7 +1523,7 @@ func Test_patchBindingStatusCondition(t *testing.T) { } } -func Test_patchBindingStatusWithAffinityName(t *testing.T) { +func TestPatchBindingStatusWithAffinityName(t *testing.T) { karmadaClient := karmadafake.NewSimpleClientset() tests := []struct { @@ -365,7 +1569,7 @@ func Test_patchBindingStatusWithAffinityName(t *testing.T) { } } -func Test_patchClusterBindingStatusCondition(t *testing.T) { +func TestPatchClusterBindingStatusCondition(t *testing.T) { oneHourBefore := time.Now().Add(-1 * time.Hour).Round(time.Second) oneHourAfter := time.Now().Add(1 * time.Hour).Round(time.Second) @@ -523,7 +1727,7 @@ func Test_patchClusterBindingStatusCondition(t *testing.T) { } } -func Test_patchClusterBindingStatusWithAffinityName(t *testing.T) { +func TestPatchClusterBindingStatusWithAffinityName(t *testing.T) { karmadaClient := karmadafake.NewSimpleClientset() tests := []struct { @@ -576,7 +1780,7 @@ func Test_patchClusterBindingStatusWithAffinityName(t *testing.T) { } } -func Test_recordScheduleResultEventForResourceBinding(t *testing.T) { +func TestRecordScheduleResultEventForResourceBinding(t *testing.T) { fakeRecorder := record.NewFakeRecorder(10) scheduler := &Scheduler{eventRecorder: fakeRecorder} @@ -667,7 +1871,7 @@ func contains(event, msg string) bool { return len(event) >= len(msg) && event[len(event)-len(msg):] == msg } -func Test_recordScheduleResultEventForClusterResourceBinding(t *testing.T) { +func TestRecordScheduleResultEventForClusterResourceBinding(t *testing.T) { fakeRecorder := record.NewFakeRecorder(10) scheduler := &Scheduler{eventRecorder: fakeRecorder} @@ -755,7 +1959,7 @@ func Test_recordScheduleResultEventForClusterResourceBinding(t *testing.T) { } } -func Test_targetClustersToString(t *testing.T) { +func TestTargetClustersToString(t *testing.T) { tests := []struct { name string tcs []workv1alpha2.TargetCluster @@ -800,3 +2004,73 @@ func Test_targetClustersToString(t *testing.T) { }) } } + +// Helper Functions + +// Helper function to setup scheme for testing +func setupScheme() *runtime.Scheme { + s := runtime.NewScheme() + + _ = scheme.AddToScheme(s) + _ = workv1alpha2.Install(s) + _ = policyv1alpha1.Install(s) + + return s +} + +// Helper function to filter patch actions +func filterPatchActions(actions []clienttesting.Action) []clienttesting.PatchAction { + var patchActions []clienttesting.PatchAction + for _, action := range actions { + if patch, ok := action.(clienttesting.PatchAction); ok { + patchActions = append(patchActions, patch) + } + } + return patchActions +} + +// Mock Implementations + +type mockAlgorithm struct { + scheduleFunc func(context.Context, *workv1alpha2.ResourceBindingSpec, *workv1alpha2.ResourceBindingStatus, *core.ScheduleAlgorithmOption) (core.ScheduleResult, error) +} + +func (m *mockAlgorithm) Schedule(ctx context.Context, spec *workv1alpha2.ResourceBindingSpec, status *workv1alpha2.ResourceBindingStatus, option *core.ScheduleAlgorithmOption) (core.ScheduleResult, error) { + return m.scheduleFunc(ctx, spec, status, option) +} + +type fakeBindingLister struct { + binding *workv1alpha2.ResourceBinding +} + +func (f *fakeBindingLister) List(_ labels.Selector) (ret []*workv1alpha2.ResourceBinding, err error) { + return []*workv1alpha2.ResourceBinding{f.binding}, nil +} + +func (f *fakeBindingLister) ResourceBindings(_ string) workv1alpha2lister.ResourceBindingNamespaceLister { + return &fakeBindingNamespaceLister{binding: f.binding} +} + +type fakeBindingNamespaceLister struct { + binding *workv1alpha2.ResourceBinding +} + +func (f *fakeBindingNamespaceLister) List(_ labels.Selector) (ret []*workv1alpha2.ResourceBinding, err error) { + return []*workv1alpha2.ResourceBinding{f.binding}, nil +} + +func (f *fakeBindingNamespaceLister) Get(_ string) (*workv1alpha2.ResourceBinding, error) { + return f.binding, nil +} + +type fakeClusterBindingLister struct { + binding *workv1alpha2.ClusterResourceBinding +} + +func (f *fakeClusterBindingLister) List(_ labels.Selector) (ret []*workv1alpha2.ClusterResourceBinding, err error) { + return []*workv1alpha2.ClusterResourceBinding{f.binding}, nil +} + +func (f *fakeClusterBindingLister) Get(_ string) (*workv1alpha2.ClusterResourceBinding, error) { + return f.binding, nil +} diff --git a/pkg/search/apiserver.go b/pkg/search/apiserver.go index 39764a68e7c3..aa96f5f77c51 100644 --- a/pkg/search/apiserver.go +++ b/pkg/search/apiserver.go @@ -17,7 +17,9 @@ limitations under the License. package search import ( + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/version" + "k8s.io/apiserver/pkg/registry/generic" "k8s.io/apiserver/pkg/registry/rest" genericapiserver "k8s.io/apiserver/pkg/server" "k8s.io/klog/v2" @@ -73,6 +75,13 @@ func (cfg *Config) Complete() CompletedConfig { return CompletedConfig{&c} } +var resourceRegistryStorageBuilder = func(scheme *runtime.Scheme, optsGetter generic.RESTOptionsGetter) (*searchstorage.ResourceRegistryStorage, error) { + return searchstorage.NewResourceRegistryStorage(scheme, optsGetter) +} +var apiGroupInstaller = func(server *APIServer, apiGroupInfo *genericapiserver.APIGroupInfo) error { + return server.GenericAPIServer.InstallAPIGroup(apiGroupInfo) +} + func (c completedConfig) New() (*APIServer, error) { genericServer, err := c.GenericConfig.New("karmada-search", genericapiserver.NewEmptyDelegate()) if err != nil { @@ -85,7 +94,7 @@ func (c completedConfig) New() (*APIServer, error) { apiGroupInfo := genericapiserver.NewDefaultAPIGroupInfo(searchapis.GroupName, searchscheme.Scheme, searchscheme.ParameterCodec, searchscheme.Codecs) - resourceRegistryStorage, err := searchstorage.NewResourceRegistryStorage(searchscheme.Scheme, c.GenericConfig.RESTOptionsGetter) + resourceRegistryStorage, err := resourceRegistryStorageBuilder(searchscheme.Scheme, c.GenericConfig.RESTOptionsGetter) if err != nil { klog.Errorf("unable to create REST storage for a resource due to %v, will die", err) return nil, err @@ -110,7 +119,7 @@ func (c completedConfig) New() (*APIServer, error) { apiGroupInfo.VersionedResourcesStorageMap["v1alpha1"] = v1alpha1search - if err = server.GenericAPIServer.InstallAPIGroup(&apiGroupInfo); err != nil { + if err = apiGroupInstaller(server, &apiGroupInfo); err != nil { return nil, err } diff --git a/pkg/search/apiserver_test.go b/pkg/search/apiserver_test.go new file mode 100644 index 000000000000..61ed3bf9de76 --- /dev/null +++ b/pkg/search/apiserver_test.go @@ -0,0 +1,157 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package search + +import ( + "errors" + "net/http" + "strings" + "testing" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apiserver/pkg/registry/generic" + genericapiserver "k8s.io/apiserver/pkg/server" + "k8s.io/client-go/informers" + clientset "k8s.io/client-go/kubernetes" + fakeclientset "k8s.io/client-go/kubernetes/fake" + restclient "k8s.io/client-go/rest" + + searchstorage "github.com/karmada-io/karmada/pkg/registry/search/storage" +) + +func TestNewKarmadaSearchAPIServer(t *testing.T) { + tests := []struct { + name string + cfg *completedConfig + genericAPIServerConfig *genericapiserver.Config + client clientset.Interface + restConfig *restclient.Config + prep func(*completedConfig, *genericapiserver.Config, clientset.Interface) error + wantErr bool + errMsg string + }{ + { + name: "NewKarmadaSearchAPIServer_NetworkIssue_FailedToCreateRESTStorage", + cfg: &completedConfig{ + ExtraConfig: &ExtraConfig{}, + }, + client: fakeclientset.NewSimpleClientset(), + genericAPIServerConfig: &genericapiserver.Config{ + RESTOptionsGetter: generic.RESTOptions{}, + Serializer: runtime.NewSimpleNegotiatedSerializer(runtime.SerializerInfo{ + MediaType: runtime.ContentTypeJSON, + }), + LoopbackClientConfig: &restclient.Config{}, + EquivalentResourceRegistry: runtime.NewEquivalentResourceRegistry(), + BuildHandlerChainFunc: func(http.Handler, *genericapiserver.Config) (secure http.Handler) { + return nil + }, + ExternalAddress: "10.0.0.0:10000", + }, + prep: func(cfg *completedConfig, genericAPIServerCfg *genericapiserver.Config, client clientset.Interface) error { + sharedInformer := informers.NewSharedInformerFactory(client, 0) + cfg.GenericConfig = genericAPIServerCfg.Complete(sharedInformer) + resourceRegistryStorageBuilder = func(*runtime.Scheme, generic.RESTOptionsGetter) (*searchstorage.ResourceRegistryStorage, error) { + return nil, errors.New("unexpected network issue while creating the resource registry storage") + } + return nil + }, + wantErr: true, + errMsg: "unexpected network issue while creating the resource registry storage", + }, + { + name: "NewKarmadaSearchAPIServer_InstalledAPIGroup_FailedToInstallAPIGroup", + cfg: &completedConfig{ + ExtraConfig: &ExtraConfig{}, + }, + client: fakeclientset.NewSimpleClientset(), + genericAPIServerConfig: &genericapiserver.Config{ + RESTOptionsGetter: generic.RESTOptions{}, + Serializer: runtime.NewSimpleNegotiatedSerializer(runtime.SerializerInfo{ + MediaType: runtime.ContentTypeJSON, + }), + LoopbackClientConfig: &restclient.Config{}, + EquivalentResourceRegistry: runtime.NewEquivalentResourceRegistry(), + BuildHandlerChainFunc: func(http.Handler, *genericapiserver.Config) (secure http.Handler) { + return nil + }, + ExternalAddress: "10.0.0.0:10000", + }, + prep: func(cfg *completedConfig, genericAPIServerCfg *genericapiserver.Config, client clientset.Interface) error { + sharedInformer := informers.NewSharedInformerFactory(client, 0) + cfg.GenericConfig = genericAPIServerCfg.Complete(sharedInformer) + resourceRegistryStorageBuilder = func(*runtime.Scheme, generic.RESTOptionsGetter) (*searchstorage.ResourceRegistryStorage, error) { + return &searchstorage.ResourceRegistryStorage{}, nil + } + apiGroupInstaller = func(*APIServer, *genericapiserver.APIGroupInfo) error { + return errors.New("failed to install api group") + } + return nil + }, + wantErr: true, + errMsg: "failed to install api group", + }, + { + name: "NewKarmadaSearchAPIServer_InstalledAPIGroup_APIGroupInstalled", + cfg: &completedConfig{ + ExtraConfig: &ExtraConfig{}, + }, + client: fakeclientset.NewSimpleClientset(), + genericAPIServerConfig: &genericapiserver.Config{ + RESTOptionsGetter: generic.RESTOptions{}, + Serializer: runtime.NewSimpleNegotiatedSerializer(runtime.SerializerInfo{ + MediaType: runtime.ContentTypeJSON, + }), + LoopbackClientConfig: &restclient.Config{}, + EquivalentResourceRegistry: runtime.NewEquivalentResourceRegistry(), + BuildHandlerChainFunc: func(http.Handler, *genericapiserver.Config) (secure http.Handler) { + return nil + }, + ExternalAddress: "10.0.0.0:10000", + }, + prep: func(cfg *completedConfig, genericAPIServerCfg *genericapiserver.Config, client clientset.Interface) error { + sharedInformer := informers.NewSharedInformerFactory(client, 0) + cfg.GenericConfig = genericAPIServerCfg.Complete(sharedInformer) + resourceRegistryStorageBuilder = func(*runtime.Scheme, generic.RESTOptionsGetter) (*searchstorage.ResourceRegistryStorage, error) { + return &searchstorage.ResourceRegistryStorage{}, nil + } + apiGroupInstaller = func(*APIServer, *genericapiserver.APIGroupInfo) error { + return nil + } + return nil + }, + wantErr: false, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if err := test.prep(test.cfg, test.genericAPIServerConfig, test.client); err != nil { + t.Fatalf("failed to prep test environment before creating new karmada search apiserver, got: %v", err) + } + _, err := test.cfg.New() + if err == nil && test.wantErr { + t.Fatal("expected an error, but got none") + } + if err != nil && !test.wantErr { + t.Errorf("unexpected error, got: %v", err) + } + if err != nil && test.wantErr && !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("expected error message %s to be in %s", test.errMsg, err.Error()) + } + }) + } +} diff --git a/pkg/search/proxy/controller.go b/pkg/search/proxy/controller.go index 938a9a2447c2..460a43e08f8c 100644 --- a/pkg/search/proxy/controller.go +++ b/pkg/search/proxy/controller.go @@ -186,7 +186,7 @@ func (ctl *Controller) reconcile(util.QueueKey) error { if err != nil { return err } - + registeredResources := make(map[schema.GroupVersionResource]struct{}) resourcesByClusters := make(map[string]map[schema.GroupVersionResource]*store.MultiNamespace) for _, registry := range registries { matchedResources := make(map[schema.GroupVersionResource]*store.MultiNamespace, len(registry.Spec.ResourceSelectors)) @@ -203,8 +203,8 @@ func (ctl *Controller) reconcile(util.QueueKey) error { matchedResources[gvr] = nsSelector } nsSelector.Add(selector.Namespace) + registeredResources[gvr] = struct{}{} } - if len(matchedResources) == 0 { continue } @@ -238,7 +238,7 @@ func (ctl *Controller) reconcile(util.QueueKey) error { } } - return ctl.store.UpdateCache(resourcesByClusters) + return ctl.store.UpdateCache(resourcesByClusters, registeredResources) } type errorHTTPHandler struct { diff --git a/pkg/search/proxy/controller_test.go b/pkg/search/proxy/controller_test.go index 41eedbde96fd..26cd0dc9b257 100644 --- a/pkg/search/proxy/controller_test.go +++ b/pkg/search/proxy/controller_test.go @@ -293,7 +293,7 @@ func TestController_reconcile(t *testing.T) { clusterLister: karmadaFactory.Cluster().V1alpha1().Clusters().Lister(), registryLister: karmadaFactory.Search().V1alpha1().ResourceRegistries().Lister(), store: &proxytest.MockStore{ - UpdateCacheFunc: func(m map[string]map[schema.GroupVersionResource]*store.MultiNamespace) error { + UpdateCacheFunc: func(m map[string]map[schema.GroupVersionResource]*store.MultiNamespace, _ map[schema.GroupVersionResource]struct{}) error { for clusterName, resources := range m { resourceNames := make([]string, 0, len(resources)) for resource := range resources { diff --git a/pkg/search/proxy/framework/plugins/cache/apis/core/v1/conversion.go b/pkg/search/proxy/framework/plugins/cache/apis/core/v1/conversion.go new file mode 100644 index 000000000000..efdb80c011b6 --- /dev/null +++ b/pkg/search/proxy/framework/plugins/cache/apis/core/v1/conversion.go @@ -0,0 +1,167 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime" +) + +// addConversionFuncs ensures that the cache plugin can handle the field selectors for corev1 resources. +// It is copied from "k8s.io/kubernetes/pkg/apis/core/v1/conversion.go". +func addConversionFuncs(scheme *runtime.Scheme) error { + // Add field conversion funcs. + err := scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.WithKind("Pod"), + func(label, value string) (string, string, error) { + switch label { + case "metadata.name", + "metadata.namespace", + "spec.nodeName", + "spec.restartPolicy", + "spec.schedulerName", + "spec.serviceAccountName", + "spec.hostNetwork", + "status.phase", + "status.podIP", + "status.podIPs", + "status.nominatedNodeName": + return label, value, nil + // This is for backwards compatibility with old v1 clients which send spec.host + case "spec.host": + return "spec.nodeName", value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }, + ) + if err != nil { + return err + } + err = scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.WithKind("Node"), + func(label, value string) (string, string, error) { + switch label { + case "metadata.name": + return label, value, nil + case "spec.unschedulable": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }, + ) + if err != nil { + return err + } + err = scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.WithKind("ReplicationController"), + func(label, value string) (string, string, error) { + switch label { + case "metadata.name", + "metadata.namespace", + "status.replicas": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }) + if err != nil { + return err + } + if err := AddFieldLabelConversionsForEvent(scheme); err != nil { + return err + } + if err := AddFieldLabelConversionsForNamespace(scheme); err != nil { + return err + } + if err := AddFieldLabelConversionsForSecret(scheme); err != nil { + return err + } + if err := AddFieldLabelConversionsForService(scheme); err != nil { + return err + } + return nil +} + +// AddFieldLabelConversionsForEvent adds field label conversions for Event. +func AddFieldLabelConversionsForEvent(scheme *runtime.Scheme) error { + return scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.WithKind("Event"), + func(label, value string) (string, string, error) { + switch label { + case "involvedObject.kind", + "involvedObject.namespace", + "involvedObject.name", + "involvedObject.uid", + "involvedObject.apiVersion", + "involvedObject.resourceVersion", + "involvedObject.fieldPath", + "reason", + "reportingComponent", + "source", + "type", + "metadata.namespace", + "metadata.name": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }) +} + +// AddFieldLabelConversionsForNamespace adds field label conversions for Namespace. +func AddFieldLabelConversionsForNamespace(scheme *runtime.Scheme) error { + return scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.WithKind("Namespace"), + func(label, value string) (string, string, error) { + switch label { + case "status.phase", + "metadata.name": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }) +} + +// AddFieldLabelConversionsForSecret adds field label conversions for Secret. +func AddFieldLabelConversionsForSecret(scheme *runtime.Scheme) error { + return scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.WithKind("Secret"), + func(label, value string) (string, string, error) { + switch label { + case "type", + "metadata.namespace", + "metadata.name": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }) +} + +// AddFieldLabelConversionsForService adds field label conversions for Service. +func AddFieldLabelConversionsForService(scheme *runtime.Scheme) error { + return scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.WithKind("Service"), + func(label, value string) (string, string, error) { + switch label { + case "metadata.namespace", + "metadata.name", + "spec.clusterIP", + "spec.type": + return label, value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } + }) +} diff --git a/pkg/search/proxy/framework/plugins/cache/apis/core/v1/register.go b/pkg/search/proxy/framework/plugins/cache/apis/core/v1/register.go new file mode 100644 index 000000000000..bf4fdd70cdac --- /dev/null +++ b/pkg/search/proxy/framework/plugins/cache/apis/core/v1/register.go @@ -0,0 +1,32 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// SchemeGroupVersion is group version used to register these objects. +var SchemeGroupVersion = corev1.SchemeGroupVersion + +var ( + // SchemeBuilder points to a list of functions added to Scheme. + SchemeBuilder = runtime.NewSchemeBuilder(addConversionFuncs) + // AddToScheme applies all the stored functions to the scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/pkg/search/proxy/framework/plugins/cache/cache.go b/pkg/search/proxy/framework/plugins/cache/cache.go index a93e0fef1df1..48858fb1c811 100644 --- a/pkg/search/proxy/framework/plugins/cache/cache.go +++ b/pkg/search/proxy/framework/plugins/cache/cache.go @@ -110,7 +110,7 @@ func (c *Cache) Connect(_ context.Context, request framework.ProxyRequest) (http ClusterScoped: mapping.Scope.Name() == meta.RESTScopeNameRoot, }, Serializer: scheme.Codecs.WithoutConversion(), - Convertor: runtime.NewScheme(), + Convertor: cacheScheme, Subresource: requestInfo.Subresource, MetaGroupVersion: metav1.SchemeGroupVersion, TableConvertor: r.tableConvertor, diff --git a/pkg/search/proxy/framework/plugins/cache/scheme.go b/pkg/search/proxy/framework/plugins/cache/scheme.go new file mode 100644 index 000000000000..66b9c90e7ef1 --- /dev/null +++ b/pkg/search/proxy/framework/plugins/cache/scheme.go @@ -0,0 +1,35 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + + corev1 "github.com/karmada-io/karmada/pkg/search/proxy/framework/plugins/cache/apis/core/v1" +) + +var cacheScheme = runtime.NewScheme() + +func init() { + AddToScheme(cacheScheme) +} + +// AddToScheme applies all the stored functions to the scheme. +func AddToScheme(scheme *runtime.Scheme) { + utilruntime.Must(corev1.AddToScheme(scheme)) +} diff --git a/pkg/search/proxy/store/multi_cluster_cache.go b/pkg/search/proxy/store/multi_cluster_cache.go index 99a9de027ff5..81b9c6f62abc 100644 --- a/pkg/search/proxy/store/multi_cluster_cache.go +++ b/pkg/search/proxy/store/multi_cluster_cache.go @@ -40,7 +40,7 @@ import ( // Store is the cache for resources from multiple member clusters type Store interface { - UpdateCache(resourcesByCluster map[string]map[schema.GroupVersionResource]*MultiNamespace) error + UpdateCache(resourcesByCluster map[string]map[schema.GroupVersionResource]*MultiNamespace, registeredResources map[schema.GroupVersionResource]struct{}) error HasResource(resource schema.GroupVersionResource) bool GetResourceFromCache(ctx context.Context, gvr schema.GroupVersionResource, namespace, name string) (runtime.Object, string, error) Stop() @@ -52,10 +52,10 @@ type Store interface { // MultiClusterCache caches resource from multi member clusters type MultiClusterCache struct { - lock sync.RWMutex - cache map[string]*clusterCache - cachedResources map[schema.GroupVersionResource]struct{} - restMapper meta.RESTMapper + lock sync.RWMutex + cache map[string]*clusterCache + registeredResources map[schema.GroupVersionResource]struct{} + restMapper meta.RESTMapper // newClientFunc returns a dynamic client for member cluster apiserver newClientFunc func(string) (dynamic.Interface, error) } @@ -65,15 +65,15 @@ var _ Store = &MultiClusterCache{} // NewMultiClusterCache return a cache for resources from member clusters func NewMultiClusterCache(newClientFunc func(string) (dynamic.Interface, error), restMapper meta.RESTMapper) *MultiClusterCache { return &MultiClusterCache{ - restMapper: restMapper, - newClientFunc: newClientFunc, - cache: map[string]*clusterCache{}, - cachedResources: map[schema.GroupVersionResource]struct{}{}, + restMapper: restMapper, + newClientFunc: newClientFunc, + cache: map[string]*clusterCache{}, + registeredResources: map[schema.GroupVersionResource]struct{}{}, } } // UpdateCache update cache for multi clusters -func (c *MultiClusterCache) UpdateCache(resourcesByCluster map[string]map[schema.GroupVersionResource]*MultiNamespace) error { +func (c *MultiClusterCache) UpdateCache(resourcesByCluster map[string]map[schema.GroupVersionResource]*MultiNamespace, registeredResources map[schema.GroupVersionResource]struct{}) error { if klog.V(3).Enabled() { start := time.Now() defer func() { @@ -106,24 +106,7 @@ func (c *MultiClusterCache) UpdateCache(resourcesByCluster map[string]map[schema return err } } - - // update cachedResource - newCachedResources := make(map[schema.GroupVersionResource]struct{}, len(c.cachedResources)) - for _, resources := range resourcesByCluster { - for resource := range resources { - newCachedResources[resource] = struct{}{} - } - } - for resource := range c.cachedResources { - if _, exist := newCachedResources[resource]; !exist { - delete(c.cachedResources, resource) - } - } - for resource := range newCachedResources { - if _, exist := c.cachedResources[resource]; !exist { - c.cachedResources[resource] = struct{}{} - } - } + c.registeredResources = registeredResources return nil } @@ -137,11 +120,11 @@ func (c *MultiClusterCache) Stop() { } } -// HasResource return whether resource is cached. +// HasResource return whether resource is registered. func (c *MultiClusterCache) HasResource(resource schema.GroupVersionResource) bool { c.lock.RLock() defer c.lock.RUnlock() - _, ok := c.cachedResources[resource] + _, ok := c.registeredResources[resource] return ok } diff --git a/pkg/search/proxy/store/multi_cluster_cache_test.go b/pkg/search/proxy/store/multi_cluster_cache_test.go index 46241434b3ed..0ecb61970f9f 100644 --- a/pkg/search/proxy/store/multi_cluster_cache_test.go +++ b/pkg/search/proxy/store/multi_cluster_cache_test.go @@ -80,8 +80,11 @@ func TestMultiClusterCache_UpdateCache(t *testing.T) { cluster1.Name: resourceSet(podGVR, nodeGVR), cluster2.Name: resourceSet(podGVR), } - - err := cache.UpdateCache(resources) + registeredResources := map[schema.GroupVersionResource]struct{}{ + podGVR: {}, + nodeGVR: {}, + } + err := cache.UpdateCache(resources, registeredResources) if err != nil { t.Error(err) } @@ -93,7 +96,7 @@ func TestMultiClusterCache_UpdateCache(t *testing.T) { // Then test removing cluster2 and remove node cache for cluster1 err = cache.UpdateCache(map[string]map[schema.GroupVersionResource]*MultiNamespace{ cluster1.Name: resourceSet(podGVR), - }) + }, registeredResources) if err != nil { t.Error(err) } @@ -115,7 +118,11 @@ func TestMultiClusterCache_HasResource(t *testing.T) { cluster1.Name: resourceSet(podGVR, nodeGVR), cluster2.Name: resourceSet(podGVR), } - err := cache.UpdateCache(resources) + registeredResources := map[schema.GroupVersionResource]struct{}{ + podGVR: {}, + nodeGVR: {}, + } + err := cache.UpdateCache(resources, registeredResources) if err != nil { t.Error(err) return @@ -160,6 +167,9 @@ func TestMultiClusterCache_GetResourceFromCache(t *testing.T) { cluster1.Name: resourceSet(podGVR), cluster2.Name: resourceSet(podGVR), } + registeredResources := map[schema.GroupVersionResource]struct{}{ + podGVR: {}, + } cluster1Client := fakedynamic.NewSimpleDynamicClient(scheme, newUnstructuredObject(podGVK, "pod11", withDefaultNamespace()), newUnstructuredObject(podGVK, "pod_conflict", withDefaultNamespace()), @@ -180,7 +190,7 @@ func TestMultiClusterCache_GetResourceFromCache(t *testing.T) { } cache := NewMultiClusterCache(newClientFunc, restMapper) defer cache.Stop() - err := cache.UpdateCache(resources) + err := cache.UpdateCache(resources, registeredResources) if err != nil { t.Error(err) return @@ -300,11 +310,15 @@ func TestMultiClusterCache_Get(t *testing.T) { return fakedynamic.NewSimpleDynamicClient(scheme), nil } cache := NewMultiClusterCache(newClientFunc, restMapper) + registeredResources := map[schema.GroupVersionResource]struct{}{ + podGVR: {}, + nodeGVR: {}, + } defer cache.Stop() err := cache.UpdateCache(map[string]map[schema.GroupVersionResource]*MultiNamespace{ cluster1.Name: resourceSet(podGVR, nodeGVR), cluster2.Name: resourceSet(podGVR), - }) + }, registeredResources) if err != nil { t.Fatal(err) } @@ -440,6 +454,9 @@ func TestMultiClusterCache_Get_Namespaced(t *testing.T) { } return fakedynamic.NewSimpleDynamicClient(scheme), nil } + registeredResources := map[schema.GroupVersionResource]struct{}{ + podGVR: {}, + } cache := NewMultiClusterCache(newClientFunc, restMapper) defer cache.Stop() err := cache.UpdateCache(map[string]map[schema.GroupVersionResource]*MultiNamespace{ @@ -449,7 +466,7 @@ func TestMultiClusterCache_Get_Namespaced(t *testing.T) { cluster2.Name: { podGVR: &MultiNamespace{namespaces: sets.New[string]("ns1", "ns2")}, }, - }) + }, registeredResources) if err != nil { t.Fatal(err) } @@ -569,6 +586,9 @@ func TestMultiClusterCache_List(t *testing.T) { newUnstructuredObject(podGVK, "pod24", withDefaultNamespace(), withResourceVersion("2004")), newUnstructuredObject(podGVK, "pod25", withDefaultNamespace(), withResourceVersion("2005")), ) + registeredResources := map[schema.GroupVersionResource]struct{}{ + podGVR: {}, + } newClientFunc := func(cluster string) (dynamic.Interface, error) { switch cluster { @@ -657,7 +677,7 @@ func TestMultiClusterCache_List(t *testing.T) { t.Run(tt.name, func(t *testing.T) { cache := NewMultiClusterCache(newClientFunc, restMapper) defer cache.Stop() - err := cache.UpdateCache(tt.resources) + err := cache.UpdateCache(tt.resources, registeredResources) if err != nil { t.Error(err) return @@ -723,12 +743,15 @@ func TestMultiClusterCache_List_CacheSourceAnnotation(t *testing.T) { } return fakedynamic.NewSimpleDynamicClient(scheme), nil } + registeredResources := map[schema.GroupVersionResource]struct{}{ + podGVR: {}, + } cache := NewMultiClusterCache(newClientFunc, restMapper) defer cache.Stop() err := cache.UpdateCache(map[string]map[schema.GroupVersionResource]*MultiNamespace{ cluster1.Name: resourceSet(podGVR), cluster2.Name: resourceSet(podGVR), - }) + }, registeredResources) if err != nil { t.Error(err) return @@ -777,11 +800,14 @@ func TestMultiClusterCache_List_Namespaced(t *testing.T) { return fakedynamic.NewSimpleDynamicClient(scheme), nil } cache := NewMultiClusterCache(newClientFunc, restMapper) + registeredResources := map[schema.GroupVersionResource]struct{}{ + podGVR: {}, + } defer cache.Stop() err := cache.UpdateCache(map[string]map[schema.GroupVersionResource]*MultiNamespace{ cluster1.Name: {podGVR: &MultiNamespace{namespaces: sets.New[string]("ns1")}}, cluster2.Name: {podGVR: &MultiNamespace{namespaces: sets.New[string]("ns1", "ns2", "ns3")}}, - }) + }, registeredResources) if err != nil { t.Error(err) return @@ -917,11 +943,14 @@ func TestMultiClusterCache_Watch(t *testing.T) { return fakedynamic.NewSimpleDynamicClient(scheme), nil } cache := NewMultiClusterCache(newClientFunc, restMapper) + registeredResources := map[schema.GroupVersionResource]struct{}{ + podGVR: {}, + } defer cache.Stop() err := cache.UpdateCache(map[string]map[schema.GroupVersionResource]*MultiNamespace{ cluster1.Name: resourceSet(podGVR), cluster2.Name: resourceSet(podGVR), - }) + }, registeredResources) if err != nil { t.Error(err) return @@ -1038,12 +1067,15 @@ func TestMultiClusterCache_Watch_Namespaced(t *testing.T) { } return fakedynamic.NewSimpleDynamicClient(scheme), nil } + registeredResources := map[schema.GroupVersionResource]struct{}{ + podGVR: {}, + } cache := NewMultiClusterCache(newClientFunc, restMapper) defer cache.Stop() err := cache.UpdateCache(map[string]map[schema.GroupVersionResource]*MultiNamespace{ cluster1.Name: {podGVR: &MultiNamespace{namespaces: sets.New[string]("ns1")}}, cluster2.Name: {podGVR: &MultiNamespace{namespaces: sets.New[string]("ns1", "ns2", "ns3")}}, - }) + }, registeredResources) if err != nil { t.Error(err) return @@ -1396,6 +1428,8 @@ func TestMultiClusterCache_fillMissingClusterResourceVersion(t *testing.T) { defer cache.Stop() err := cache.UpdateCache(map[string]map[schema.GroupVersionResource]*MultiNamespace{ cluster1.Name: resourceSet(podGVR), + }, map[schema.GroupVersionResource]struct{}{ + podGVR: {}, }) if err != nil { t.Fatal(err) diff --git a/pkg/search/proxy/testing/mock_store.go b/pkg/search/proxy/testing/mock_store.go index d73d8a6d19c3..c0fabf36f71c 100644 --- a/pkg/search/proxy/testing/mock_store.go +++ b/pkg/search/proxy/testing/mock_store.go @@ -30,7 +30,7 @@ import ( // MockStore is a mock for store.Store interface type MockStore struct { - UpdateCacheFunc func(resourcesByCluster map[string]map[schema.GroupVersionResource]*store.MultiNamespace) error + UpdateCacheFunc func(resourcesByCluster map[string]map[schema.GroupVersionResource]*store.MultiNamespace, registeredResources map[schema.GroupVersionResource]struct{}) error HasResourceFunc func(resource schema.GroupVersionResource) bool GetResourceFromCacheFunc func(ctx context.Context, gvr schema.GroupVersionResource, namespace, name string) (runtime.Object, string, error) StopFunc func() @@ -42,11 +42,11 @@ type MockStore struct { var _ store.Store = &MockStore{} // UpdateCache implements store.Store interface -func (c *MockStore) UpdateCache(resourcesByCluster map[string]map[schema.GroupVersionResource]*store.MultiNamespace) error { +func (c *MockStore) UpdateCache(resourcesByCluster map[string]map[schema.GroupVersionResource]*store.MultiNamespace, registeredResources map[schema.GroupVersionResource]struct{}) error { if c.UpdateCacheFunc == nil { panic("implement me") } - return c.UpdateCacheFunc(resourcesByCluster) + return c.UpdateCacheFunc(resourcesByCluster, registeredResources) } // HasResource implements store.Store interface diff --git a/pkg/util/constants.go b/pkg/util/constants.go index 6204b76d4466..6a7b9d1d463f 100644 --- a/pkg/util/constants.go +++ b/pkg/util/constants.go @@ -20,6 +20,8 @@ import ( "time" discoveryv1 "k8s.io/api/discovery/v1" + + workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2" ) // Define labels used by karmada system. @@ -252,3 +254,26 @@ const ( // DefaultFilePerm default file perm DefaultFilePerm = 0640 ) + +var ( + // ManagedResourceLabels is the list of labels that are applied to + // resources in member clusters. + ManagedResourceLabels = []string{ + workv1alpha2.ResourceBindingPermanentIDLabel, + workv1alpha2.WorkPermanentIDLabel, + ManagedByKarmadaLabel, + } + + // ManagedResourceAnnotations is the list of annotations that are applied to + // resources in member clusters. + ManagedResourceAnnotations = []string{ + workv1alpha2.ManagedAnnotation, + workv1alpha2.ManagedLabels, + workv1alpha2.ResourceBindingNamespaceAnnotationKey, + workv1alpha2.ResourceBindingNameAnnotationKey, + workv1alpha2.ResourceTemplateUIDAnnotation, + workv1alpha2.ResourceTemplateGenerationAnnotationKey, + workv1alpha2.WorkNameAnnotation, + workv1alpha2.WorkNamespaceAnnotation, + } +) diff --git a/pkg/util/context_test.go b/pkg/util/context_test.go new file mode 100644 index 000000000000..5c601495fd44 --- /dev/null +++ b/pkg/util/context_test.go @@ -0,0 +1,140 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestContextForChannel(t *testing.T) { + tests := []struct { + name string + setup func() (chan struct{}, func(context.Context, context.CancelFunc)) + expectedDone bool + timeout time.Duration + }{ + { + name: "context is cancelled when cancel function is called", + setup: func() (chan struct{}, func(context.Context, context.CancelFunc)) { + ch := make(chan struct{}) + return ch, func(_ context.Context, cancel context.CancelFunc) { + cancel() + } + }, + expectedDone: true, + timeout: time.Second, + }, + { + name: "context is cancelled when parent channel is closed", + setup: func() (chan struct{}, func(context.Context, context.CancelFunc)) { + ch := make(chan struct{}) + return ch, func(_ context.Context, _ context.CancelFunc) { + close(ch) + } + }, + expectedDone: true, + timeout: time.Second, + }, + { + name: "context remains open when neither cancelled nor parent channel closed", + setup: func() (chan struct{}, func(context.Context, context.CancelFunc)) { + ch := make(chan struct{}) + return ch, func(_ context.Context, _ context.CancelFunc) { + // Do nothing - context should remain open + } + }, + expectedDone: false, + timeout: 100 * time.Millisecond, + }, + { + name: "concurrent operations - cancel first, then close parent", + setup: func() (chan struct{}, func(context.Context, context.CancelFunc)) { + ch := make(chan struct{}) + return ch, func(_ context.Context, cancel context.CancelFunc) { + go cancel() + go func() { + time.Sleep(50 * time.Millisecond) + close(ch) + }() + } + }, + expectedDone: true, + timeout: time.Second, + }, + { + name: "concurrent operations - close parent first, then cancel", + setup: func() (chan struct{}, func(context.Context, context.CancelFunc)) { + ch := make(chan struct{}) + return ch, func(_ context.Context, cancel context.CancelFunc) { + go close(ch) + go func() { + time.Sleep(50 * time.Millisecond) + cancel() + }() + } + }, + expectedDone: true, + timeout: time.Second, + }, + { + name: "multiple cancel calls should not panic", + setup: func() (chan struct{}, func(context.Context, context.CancelFunc)) { + ch := make(chan struct{}) + return ch, func(_ context.Context, cancel context.CancelFunc) { + cancel() + cancel() // Second call should not panic + cancel() // Third call should not panic + } + }, + expectedDone: true, + timeout: time.Second, + }, + { + name: "parent channel already closed", + setup: func() (chan struct{}, func(context.Context, context.CancelFunc)) { + ch := make(chan struct{}) + close(ch) + return ch, func(_ context.Context, _ context.CancelFunc) {} + }, + expectedDone: true, + timeout: time.Second, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + parentCh, operation := tt.setup() + ctx, cancel := ContextForChannel(parentCh) + defer cancel() // Always clean up + + // Run the test operation + operation(ctx, cancel) + + // Check if context is done within timeout + select { + case <-ctx.Done(): + assert.True(t, tt.expectedDone, "context was cancelled but expected to remain open") + case <-time.After(tt.timeout): + assert.False(t, tt.expectedDone, "context remained open but expected to be cancelled") + } + }) + } +} diff --git a/pkg/util/fedinformer/handlers_test.go b/pkg/util/fedinformer/handlers_test.go new file mode 100644 index 000000000000..41ef2bb12f9c --- /dev/null +++ b/pkg/util/fedinformer/handlers_test.go @@ -0,0 +1,248 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fedinformer + +import ( + "reflect" + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/cache" +) + +type TestObject struct { + metav1.TypeMeta + metav1.ObjectMeta + Spec string +} + +func (in *TestObject) DeepCopyObject() runtime.Object { + return &TestObject{ + TypeMeta: in.TypeMeta, + ObjectMeta: in.ObjectMeta, + Spec: in.Spec, + } +} + +type CustomResourceEventHandler struct { + handler cache.ResourceEventHandler +} + +func (c *CustomResourceEventHandler) OnAdd(obj interface{}, isInInitialList bool) { + if h, ok := c.handler.(interface{ OnAdd(interface{}, bool) }); ok { + h.OnAdd(obj, isInInitialList) + } else { + c.handler.OnAdd(obj, false) + } +} + +func (c *CustomResourceEventHandler) OnUpdate(oldObj, newObj interface{}) { + c.handler.OnUpdate(oldObj, newObj) +} + +func (c *CustomResourceEventHandler) OnDelete(obj interface{}) { + c.handler.OnDelete(obj) +} + +func TestNewHandlerOnAllEvents(t *testing.T) { + testCases := []struct { + name string + event string + input interface{} + expected runtime.Object + }{ + { + name: "Add event", + event: "add", + input: &TestObject{ObjectMeta: metav1.ObjectMeta{Name: "test-obj-add"}, Spec: "add"}, + expected: &TestObject{ObjectMeta: metav1.ObjectMeta{Name: "test-obj-add"}, Spec: "add"}, + }, + { + name: "Update event", + event: "update", + input: &TestObject{ObjectMeta: metav1.ObjectMeta{Name: "test-obj-update"}, Spec: "update"}, + expected: &TestObject{ObjectMeta: metav1.ObjectMeta{Name: "test-obj-update"}, Spec: "update"}, + }, + { + name: "Delete event", + event: "delete", + input: &TestObject{ObjectMeta: metav1.ObjectMeta{Name: "test-obj-delete"}, Spec: "delete"}, + expected: &TestObject{ObjectMeta: metav1.ObjectMeta{Name: "test-obj-delete"}, Spec: "delete"}, + }, + { + name: "Delete event with DeletedFinalStateUnknown", + event: "delete", + input: cache.DeletedFinalStateUnknown{Obj: &TestObject{ObjectMeta: metav1.ObjectMeta{Name: "test-obj-delete-unknown"}, Spec: "delete-unknown"}}, + expected: &TestObject{ObjectMeta: metav1.ObjectMeta{Name: "test-obj-delete-unknown"}, Spec: "delete-unknown"}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + var calledWith runtime.Object + fn := func(obj interface{}) { + calledWith = obj.(runtime.Object) + } + + handler := &CustomResourceEventHandler{NewHandlerOnAllEvents(fn)} + + switch tc.event { + case "add": + handler.OnAdd(tc.input, false) + case "update": + oldObj := &TestObject{ObjectMeta: metav1.ObjectMeta{Name: "old-obj"}, Spec: "old"} + handler.OnUpdate(oldObj, tc.input) + case "delete": + handler.OnDelete(tc.input) + } + + if !reflect.DeepEqual(calledWith, tc.expected) { + t.Errorf("expected %v, got %v", tc.expected, calledWith) + } + }) + } +} + +func TestNewHandlerOnEvents(t *testing.T) { + testCases := []struct { + name string + event string + }{ + {"Add event", "add"}, + {"Update event", "update"}, + {"Delete event", "delete"}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + var addCalled, updateCalled, deleteCalled bool + addFunc := func(_ interface{}) { addCalled = true } + updateFunc := func(_, _ interface{}) { updateCalled = true } + deleteFunc := func(_ interface{}) { deleteCalled = true } + + handler := &CustomResourceEventHandler{NewHandlerOnEvents(addFunc, updateFunc, deleteFunc)} + + testObj := &TestObject{ObjectMeta: metav1.ObjectMeta{Name: "test-obj"}} + + switch tc.event { + case "add": + handler.OnAdd(testObj, false) + if !addCalled { + t.Error("AddFunc was not called") + } + case "update": + handler.OnUpdate(testObj, testObj) + if !updateCalled { + t.Error("UpdateFunc was not called") + } + case "delete": + handler.OnDelete(testObj) + if !deleteCalled { + t.Error("DeleteFunc was not called") + } + } + }) + } +} + +func TestNewFilteringHandlerOnAllEvents(t *testing.T) { + testCases := []struct { + name string + event string + input interface{} + expectedAdd bool + expectedUpdate bool + expectedDelete bool + }{ + { + name: "Add passing object", + event: "add", + input: &TestObject{ObjectMeta: metav1.ObjectMeta{Name: "passing-obj"}, Spec: "pass"}, + expectedAdd: true, + expectedUpdate: false, + expectedDelete: false, + }, + { + name: "Add failing object", + event: "add", + input: &TestObject{ObjectMeta: metav1.ObjectMeta{Name: "failing-obj"}, Spec: "fail"}, + expectedAdd: false, + expectedUpdate: false, + expectedDelete: false, + }, + { + name: "Update to passing object", + event: "update", + input: &TestObject{ObjectMeta: metav1.ObjectMeta{Name: "passing-obj"}, Spec: "pass"}, + expectedAdd: false, + expectedUpdate: true, + expectedDelete: false, + }, + { + name: "Update to failing object", + event: "update", + input: &TestObject{ObjectMeta: metav1.ObjectMeta{Name: "failing-obj"}, Spec: "fail"}, + expectedAdd: false, + expectedUpdate: false, + expectedDelete: true, + }, + { + name: "Delete passing object", + event: "delete", + input: &TestObject{ObjectMeta: metav1.ObjectMeta{Name: "passing-obj"}, Spec: "pass"}, + expectedAdd: false, + expectedUpdate: false, + expectedDelete: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + var addCalled, updateCalled, deleteCalled bool + addFunc := func(_ interface{}) { addCalled = true } + updateFunc := func(_, _ interface{}) { updateCalled = true } + deleteFunc := func(_ interface{}) { deleteCalled = true } + + filterFunc := func(obj interface{}) bool { + testObj := obj.(*TestObject) + return testObj.Spec == "pass" + } + + handler := NewFilteringHandlerOnAllEvents(filterFunc, addFunc, updateFunc, deleteFunc) + + switch tc.event { + case "add": + handler.OnAdd(tc.input, false) + case "update": + handler.OnUpdate(&TestObject{Spec: "pass"}, tc.input) + case "delete": + handler.OnDelete(tc.input) + } + + if addCalled != tc.expectedAdd { + t.Errorf("AddFunc called: %v, expected: %v", addCalled, tc.expectedAdd) + } + if updateCalled != tc.expectedUpdate { + t.Errorf("UpdateFunc called: %v, expected: %v", updateCalled, tc.expectedUpdate) + } + if deleteCalled != tc.expectedDelete { + t.Errorf("DeleteFunc called: %v, expected: %v", deleteCalled, tc.expectedDelete) + } + }) + } +} diff --git a/pkg/util/fedinformer/keys/keys_test.go b/pkg/util/fedinformer/keys/keys_test.go index c9ec7178f65c..64c92b00d39d 100644 --- a/pkg/util/fedinformer/keys/keys_test.go +++ b/pkg/util/fedinformer/keys/keys_test.go @@ -73,6 +73,38 @@ var ( Name: "bar", }, } + podWithEmptyGroup = &corev1.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: "", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "bar", + }, + } + + podWithEmptyKind = &corev1.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: "", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "bar", + }, + } + + secretWithEmptyNamespace = &corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "", + Name: "bar", + }, + } ) func TestClusterWideKeyFunc(t *testing.T) { @@ -121,6 +153,22 @@ func TestClusterWideKeyFunc(t *testing.T) { object: deploymentObj, expectErr: true, }, + { + name: "resource with empty group", + object: podWithEmptyGroup, + expectErr: true, + }, + { + name: "resource with empty kind", + object: podWithEmptyKind, + expectErr: true, + }, + { + name: "resource with empty namespace", + object: secretWithEmptyNamespace, + expectErr: false, + expectKeyStr: "v1, kind=Secret, bar", + }, } for _, test := range tests { diff --git a/pkg/util/fedinformer/typedmanager/multi-cluster-manager_test.go b/pkg/util/fedinformer/typedmanager/multi-cluster-manager_test.go new file mode 100644 index 000000000000..a18f72fc2d71 --- /dev/null +++ b/pkg/util/fedinformer/typedmanager/multi-cluster-manager_test.go @@ -0,0 +1,160 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package typedmanager + +import ( + "testing" + "time" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/tools/cache" + + "github.com/karmada-io/karmada/pkg/util/fedinformer" +) + +func TestMultiClusterInformerManager(t *testing.T) { + stopCh := make(chan struct{}) + defer close(stopCh) + + transforms := map[schema.GroupVersionResource]cache.TransformFunc{ + nodeGVR: fedinformer.NodeTransformFunc, + podGVR: fedinformer.PodTransformFunc, + } + + manager := NewMultiClusterInformerManager(stopCh, transforms) + + t.Run("ForCluster", func(_ *testing.T) { + cluster := "test-cluster" + client := fake.NewSimpleClientset() + resync := 10 * time.Second + + singleManager := manager.ForCluster(cluster, client, resync) + if singleManager == nil { + t.Fatalf("ForCluster() returned nil") + } + + if !manager.IsManagerExist(cluster) { + t.Fatalf("IsManagerExist() returned false for existing cluster") + } + }) + + t.Run("GetSingleClusterManager", func(t *testing.T) { + cluster := "test-cluster" + singleManager := manager.GetSingleClusterManager(cluster) + if singleManager == nil { + t.Fatalf("GetSingleClusterManager() returned nil for existing cluster") + } + + nonExistentCluster := "non-existent-cluster" + singleManager = manager.GetSingleClusterManager(nonExistentCluster) + if singleManager != nil { + t.Fatalf("GetSingleClusterManager() returned non-nil for non-existent cluster") + } + }) + + t.Run("Start and Stop", func(t *testing.T) { + cluster := "test-cluster-2" + client := fake.NewSimpleClientset() + resync := 10 * time.Second + + manager.ForCluster(cluster, client, resync) + manager.Start(cluster) + + manager.Stop(cluster) + + if manager.IsManagerExist(cluster) { + t.Fatalf("IsManagerExist() returned true after Stop()") + } + }) + + t.Run("WaitForCacheSync", func(t *testing.T) { + cluster := "test-cluster-3" + client := fake.NewSimpleClientset() + resync := 10 * time.Millisecond + singleManager := manager.ForCluster(cluster, client, resync) + manager.Start(cluster) + + _, _ = singleManager.Lister(podGVR) + _, _ = singleManager.Lister(nodeGVR) + + time.Sleep(100 * time.Millisecond) + + result := manager.WaitForCacheSync(cluster) + if result == nil { + t.Fatalf("WaitForCacheSync() returned nil result") + } + + for gvr, synced := range result { + t.Logf("Resource %v synced: %v", gvr, synced) + } + + manager.Stop(cluster) + }) + + t.Run("WaitForCacheSyncWithTimeout", func(t *testing.T) { + cluster := "test-cluster-4" + client := fake.NewSimpleClientset() + resync := 10 * time.Millisecond + singleManager := manager.ForCluster(cluster, client, resync) + manager.Start(cluster) + + _, _ = singleManager.Lister(podGVR) + _, _ = singleManager.Lister(nodeGVR) + + timeout := 100 * time.Millisecond + result := manager.WaitForCacheSyncWithTimeout(cluster, timeout) + if result == nil { + t.Fatalf("WaitForCacheSyncWithTimeout() returned nil result") + } + + for gvr, synced := range result { + t.Logf("Resource %v synced: %v", gvr, synced) + } + + manager.Stop(cluster) + }) + + t.Run("WaitForCacheSync and WaitForCacheSyncWithTimeout with non-existent cluster", func(t *testing.T) { + nonExistentCluster := "non-existent-cluster" + + result1 := manager.WaitForCacheSync(nonExistentCluster) + if result1 != nil { + t.Fatalf("WaitForCacheSync() returned non-nil for non-existent cluster") + } + + result2 := manager.WaitForCacheSyncWithTimeout(nonExistentCluster, 1*time.Second) + if result2 != nil { + t.Fatalf("WaitForCacheSyncWithTimeout() returned non-nil for non-existent cluster") + } + }) +} + +func TestGetInstance(t *testing.T) { + instance1 := GetInstance() + instance2 := GetInstance() + + if instance1 != instance2 { + t.Fatalf("GetInstance() returned different instances") + } +} + +func TestStopInstance(_ *testing.T) { + StopInstance() + // Ensure StopInstance doesn't panic + StopInstance() +} diff --git a/pkg/util/fedinformer/typedmanager/single-cluster-manager_test.go b/pkg/util/fedinformer/typedmanager/single-cluster-manager_test.go new file mode 100644 index 000000000000..257739645301 --- /dev/null +++ b/pkg/util/fedinformer/typedmanager/single-cluster-manager_test.go @@ -0,0 +1,183 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package typedmanager + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/tools/cache" +) + +func TestSingleClusterInformerManager(t *testing.T) { + client := fake.NewSimpleClientset() + stopCh := make(chan struct{}) + defer close(stopCh) + + manager := NewSingleClusterInformerManager(client, 0, stopCh, nil) + + t.Run("ForResource", func(t *testing.T) { + handler := &testResourceEventHandler{} + err := manager.ForResource(podGVR, handler) + require.NoError(t, err, "ForResource failed") + + assert.True(t, manager.IsHandlerExist(podGVR, handler), "Handler should exist for podGVR") + }) + + t.Run("Lister", func(t *testing.T) { + lister, err := manager.Lister(podGVR) + require.NoError(t, err, "Lister failed") + assert.NotNil(t, lister, "Lister should not be nil") + }) + + t.Run("Start and Stop", func(_ *testing.T) { + manager.Start() + // Sleep to allow informers to start + time.Sleep(100 * time.Millisecond) + manager.Stop() + }) + + t.Run("WaitForCacheSync", func(t *testing.T) { + manager.Start() + defer manager.Stop() + + synced := manager.WaitForCacheSync() + assert.NotEmpty(t, synced, "WaitForCacheSync should return non-empty map") + }) + + t.Run("WaitForCacheSyncWithTimeout", func(t *testing.T) { + manager.Start() + defer manager.Stop() + + synced := manager.WaitForCacheSyncWithTimeout(5 * time.Second) + assert.NotEmpty(t, synced, "WaitForCacheSyncWithTimeout should return non-empty map") + }) + + t.Run("Context", func(t *testing.T) { + ctx := manager.Context() + assert.NotNil(t, ctx, "Context should not be nil") + }) + + t.Run("GetClient", func(t *testing.T) { + c := manager.GetClient() + assert.NotNil(t, c, "GetClient should not return nil") + }) +} + +func TestSingleClusterInformerManagerWithTransformFunc(t *testing.T) { + client := fake.NewSimpleClientset() + stopCh := make(chan struct{}) + defer close(stopCh) + + transformFunc := func(i interface{}) (interface{}, error) { + return i, nil + } + + transformFuncs := map[schema.GroupVersionResource]cache.TransformFunc{ + podGVR: transformFunc, + } + + manager := NewSingleClusterInformerManager(client, 0, stopCh, transformFuncs) + + t.Run("ForResourceWithTransform", func(t *testing.T) { + handler := &testResourceEventHandler{} + err := manager.ForResource(podGVR, handler) + require.NoError(t, err, "ForResource with transform failed") + }) +} + +func TestSingleClusterInformerManagerMultipleHandlers(t *testing.T) { + client := fake.NewSimpleClientset() + stopCh := make(chan struct{}) + defer close(stopCh) + + manager := NewSingleClusterInformerManager(client, 0, stopCh, nil) + + handler1 := &testResourceEventHandler{} + handler2 := &testResourceEventHandler{} + + t.Run("MultipleHandlers", func(t *testing.T) { + err := manager.ForResource(podGVR, handler1) + require.NoError(t, err, "ForResource failed for handler1") + + err = manager.ForResource(podGVR, handler2) + require.NoError(t, err, "ForResource failed for handler2") + + assert.True(t, manager.IsHandlerExist(podGVR, handler1), "Handler1 should exist for podGVR") + assert.True(t, manager.IsHandlerExist(podGVR, handler2), "Handler2 should exist for podGVR") + }) +} + +func TestSingleClusterInformerManagerDifferentResources(t *testing.T) { + client := fake.NewSimpleClientset() + stopCh := make(chan struct{}) + defer close(stopCh) + + manager := NewSingleClusterInformerManager(client, 0, stopCh, nil) + + t.Run("DifferentResources", func(t *testing.T) { + podHandler := &testResourceEventHandler{} + err := manager.ForResource(podGVR, podHandler) + require.NoError(t, err, "ForResource failed for podGVR") + + nodeHandler := &testResourceEventHandler{} + err = manager.ForResource(nodeGVR, nodeHandler) + require.NoError(t, err, "ForResource failed for nodeGVR") + + assert.True(t, manager.IsHandlerExist(podGVR, podHandler), "PodHandler should exist for podGVR") + assert.True(t, manager.IsHandlerExist(nodeGVR, nodeHandler), "NodeHandler should exist for nodeGVR") + }) +} + +func TestIsInformerSynced(t *testing.T) { + client := fake.NewSimpleClientset() + stopCh := make(chan struct{}) + defer close(stopCh) + manager := NewSingleClusterInformerManager(client, 0, stopCh, nil) + + assert.False(t, manager.IsInformerSynced(podGVR)) + assert.False(t, manager.IsInformerSynced(nodeGVR)) + + handler := &testResourceEventHandler{} + err := manager.ForResource(podGVR, handler) + require.NoError(t, err) + err = manager.ForResource(nodeGVR, handler) + require.NoError(t, err) + + manager.Start() + defer manager.Stop() + + synced := manager.WaitForCacheSyncWithTimeout(5 * time.Second) + + assert.True(t, synced[podGVR], "Pod informer should be synced") + assert.True(t, synced[nodeGVR], "Node informer should be synced") + + time.Sleep(100 * time.Millisecond) + + assert.True(t, manager.IsInformerSynced(podGVR), "Pod informer should be reported as synced") + assert.True(t, manager.IsInformerSynced(nodeGVR), "Node informer should be reported as synced") +} + +type testResourceEventHandler struct{} + +func (t *testResourceEventHandler) OnAdd(_ interface{}, _ bool) {} +func (t *testResourceEventHandler) OnUpdate(_, _ interface{}) {} +func (t *testResourceEventHandler) OnDelete(_ interface{}) {} diff --git a/pkg/util/grpcconnection/config.go b/pkg/util/grpcconnection/config.go index 149d0aeefa4e..b15a850b1a04 100644 --- a/pkg/util/grpcconnection/config.go +++ b/pkg/util/grpcconnection/config.go @@ -27,6 +27,7 @@ import ( "google.golang.org/grpc" grpccredentials "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" + utilerrors "k8s.io/apimachinery/pkg/util/errors" ) // ServerConfig the config of GRPC server side. @@ -99,11 +100,8 @@ func (s *ServerConfig) NewServer() (*grpc.Server, error) { return grpc.NewServer(grpc.Creds(grpccredentials.NewTLS(config))), nil } -// DialWithTimeOut creates a client connection to the given target. -func (c *ClientConfig) DialWithTimeOut(path string, timeout time.Duration) (*grpc.ClientConn, error) { - ctx, cancel := context.WithTimeout(context.Background(), timeout) - defer cancel() - +// DialWithTimeOut will attempt to create a client connection based on the given targets, one at a time, until a client connection is successfully established. +func (c *ClientConfig) DialWithTimeOut(paths []string, timeout time.Duration) (*grpc.ClientConn, error) { opts := []grpc.DialOption{ grpc.WithBlock(), } @@ -138,6 +136,25 @@ func (c *ClientConfig) DialWithTimeOut(path string, timeout time.Duration) (*grp } opts = append(opts, grpc.WithTransportCredentials(cred)) + + var cc *grpc.ClientConn + var err error + var allErrs []error + for _, path := range paths { + cc, err = createGRPCConnection(path, timeout, opts...) + if err == nil { + return cc, nil + } + allErrs = append(allErrs, err) + } + + return nil, utilerrors.NewAggregate(allErrs) +} + +func createGRPCConnection(path string, timeout time.Duration, opts ...grpc.DialOption) (conn *grpc.ClientConn, err error) { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + cc, err := grpc.DialContext(ctx, path, opts...) if err != nil { return nil, fmt.Errorf("dial %s error: %v", path, err) diff --git a/pkg/util/helper/work.go b/pkg/util/helper/work.go index c5956270e585..438a7309e8eb 100644 --- a/pkg/util/helper/work.go +++ b/pkg/util/helper/work.go @@ -39,7 +39,7 @@ import ( ) // CreateOrUpdateWork creates a Work object if not exist, or updates if it already exists. -func CreateOrUpdateWork(ctx context.Context, client client.Client, workMeta metav1.ObjectMeta, resource *unstructured.Unstructured, suspendDispatching *bool) error { +func CreateOrUpdateWork(ctx context.Context, client client.Client, workMeta metav1.ObjectMeta, resource *unstructured.Unstructured, options ...WorkOption) error { if workMeta.Labels[util.PropagationInstruction] != util.PropagationInstructionSuppressed { resource = resource.DeepCopy() // set labels @@ -62,7 +62,6 @@ func CreateOrUpdateWork(ctx context.Context, client client.Client, workMeta meta work := &workv1alpha1.Work{ ObjectMeta: workMeta, Spec: workv1alpha1.WorkSpec{ - SuspendDispatching: suspendDispatching, Workload: workv1alpha1.WorkloadTemplate{ Manifests: []workv1alpha1.Manifest{ { @@ -75,6 +74,8 @@ func CreateOrUpdateWork(ctx context.Context, client client.Client, workMeta meta }, } + applyWorkOptions(work, options) + runtimeObject := work.DeepCopy() var operationResult controllerutil.OperationResult err = retry.RetryOnConflict(retry.DefaultRetry, func() (err error) { diff --git a/pkg/util/helper/workoption.go b/pkg/util/helper/workoption.go new file mode 100644 index 000000000000..817fcc288205 --- /dev/null +++ b/pkg/util/helper/workoption.go @@ -0,0 +1,43 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package helper + +import workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1" + +// WorkOption is a function that applies changes to a Work object. +// It is used to configure Work fields for clients of CreateOrUpdateWork. +type WorkOption func(work *workv1alpha1.Work) + +// WithSuspendDispatching sets the SuspendDispatching field of the Work Spec. +func WithSuspendDispatching(suspendDispatching bool) WorkOption { + return func(work *workv1alpha1.Work) { + work.Spec.SuspendDispatching = &suspendDispatching + } +} + +// WithPreserveResourcesOnDeletion sets the PreserveResourcesOnDeletion field of the Work Spec. +func WithPreserveResourcesOnDeletion(preserveResourcesOnDeletion bool) WorkOption { + return func(work *workv1alpha1.Work) { + work.Spec.PreserveResourcesOnDeletion = &preserveResourcesOnDeletion + } +} + +func applyWorkOptions(work *workv1alpha1.Work, options []WorkOption) { + for _, option := range options { + option(work) + } +} diff --git a/pkg/util/helper/workoption_test.go b/pkg/util/helper/workoption_test.go new file mode 100644 index 000000000000..c8e798712e9f --- /dev/null +++ b/pkg/util/helper/workoption_test.go @@ -0,0 +1,78 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package helper + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1" +) + +func TestWithSuspendDispatching(t *testing.T) { + tests := []struct { + name string + suspendDispatching bool + }{ + { + name: "WithSuspendDispatching: true", + suspendDispatching: true, + }, + { + name: "WithSuspendDispatching: false", + suspendDispatching: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + work := &workv1alpha1.Work{} + applyWorkOptions(work, []WorkOption{ + WithSuspendDispatching(tt.suspendDispatching), + }) + + assert.NotNilf(t, work.Spec.SuspendDispatching, "WithSuspendDispatching(%v)", tt.suspendDispatching) + assert.Equalf(t, tt.suspendDispatching, *work.Spec.SuspendDispatching, "WithSuspendDispatching(%v)", tt.suspendDispatching) + }) + } +} + +func TestWithPreserveResourcesOnDeletion(t *testing.T) { + tests := []struct { + name string + preserveResourcesOnDeletion bool + }{ + { + name: "PreserveResourcesOnDeletion: true", + preserveResourcesOnDeletion: true, + }, + { + name: "PreserveResourcesOnDeletion: false", + preserveResourcesOnDeletion: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + work := &workv1alpha1.Work{} + applyWorkOptions(work, []WorkOption{ + WithPreserveResourcesOnDeletion(tt.preserveResourcesOnDeletion), + }) + + assert.NotNilf(t, work.Spec.PreserveResourcesOnDeletion, "WithPreserveResourcesOnDeletion(%v)", tt.preserveResourcesOnDeletion) + assert.Equalf(t, tt.preserveResourcesOnDeletion, *work.Spec.PreserveResourcesOnDeletion, "WithPreserveResourcesOnDeletion(%v)", tt.preserveResourcesOnDeletion) + }) + } +} diff --git a/pkg/util/helper/workstatus_test.go b/pkg/util/helper/workstatus_test.go index bd5119524dc7..ced58ffe8db0 100644 --- a/pkg/util/helper/workstatus_test.go +++ b/pkg/util/helper/workstatus_test.go @@ -17,18 +17,689 @@ limitations under the License. package helper import ( + "context" + "strings" "testing" + "time" "github.com/stretchr/testify/assert" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1" workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2" ) +func TestAggregateResourceBindingWorkStatus(t *testing.T) { + scheme := setupScheme() + + // Helper functions to create binding + createBinding := func(name, bindingID string, clusters []string) *workv1alpha2.ResourceBinding { + return &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: name, + Labels: map[string]string{ + workv1alpha2.ResourceBindingPermanentIDLabel: bindingID, + }, + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "test-deployment", + Namespace: "default", + }, + Clusters: func() []workv1alpha2.TargetCluster { + var targetClusters []workv1alpha2.TargetCluster + for _, cluster := range clusters { + targetClusters = append(targetClusters, workv1alpha2.TargetCluster{Name: cluster}) + } + return targetClusters + }(), + }, + } + } + + // Helper functions to create work + createWork := func(name, namespace, bindingID string, applied bool, withStatus bool) *workv1alpha1.Work { + work := &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + Labels: map[string]string{ + workv1alpha2.ResourceBindingPermanentIDLabel: bindingID, + }, + }, + Spec: workv1alpha1.WorkSpec{ + Workload: workv1alpha1.WorkloadTemplate{ + Manifests: []workv1alpha1.Manifest{ + { + RawExtension: runtime.RawExtension{ + Raw: []byte(`{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"name":"test-deployment","namespace":"default"}}`), + }, + }, + }, + }, + }, + } + + if withStatus { + status := metav1.ConditionTrue + if !applied { + status = metav1.ConditionFalse + } + work.Status = workv1alpha1.WorkStatus{ + Conditions: []metav1.Condition{ + { + Type: workv1alpha1.WorkApplied, + Status: status, + Message: "test message", + }, + }, + ManifestStatuses: []workv1alpha1.ManifestStatus{ + { + Identifier: workv1alpha1.ResourceIdentifier{ + Group: "apps", + Version: "v1", + Kind: "Deployment", + Name: "test-deployment", + Namespace: "default", + }, + Status: &runtime.RawExtension{Raw: []byte(`{"replicas": 3}`)}, + Health: "Healthy", + }, + }, + } + } + return work + } + + tests := []struct { + name string + bindingID string + binding *workv1alpha2.ResourceBinding + works []*workv1alpha1.Work + expectedError bool + expectedStatus metav1.ConditionStatus + expectedApplied bool + expectedEvent string + }{ + { + name: "successful single work aggregation", + bindingID: "test-id-1", + binding: createBinding("binding-1", "test-id-1", []string{"member1"}), + works: []*workv1alpha1.Work{ + createWork("work-1", "karmada-es-member1", "test-id-1", true, true), + }, + expectedError: false, + expectedStatus: metav1.ConditionTrue, + expectedApplied: true, + expectedEvent: "Update ResourceBinding(default/binding-1) with AggregatedStatus successfully", + }, + { + name: "work not found", + bindingID: "test-id-2", + binding: createBinding("binding-2", "test-id-2", []string{"member1"}), + works: []*workv1alpha1.Work{}, + expectedError: false, + expectedStatus: metav1.ConditionFalse, + expectedApplied: false, + }, + { + name: "work not applied", + bindingID: "test-id-3", + binding: createBinding("binding-3", "test-id-3", []string{"member1"}), + works: []*workv1alpha1.Work{ + createWork("work-3", "karmada-es-member1", "test-id-3", false, true), + }, + expectedError: false, + expectedStatus: metav1.ConditionFalse, + expectedApplied: false, + }, + { + name: "multiple works for different clusters", + bindingID: "test-id-4", + binding: createBinding("binding-4", "test-id-4", []string{"member1", "member2"}), + works: []*workv1alpha1.Work{ + createWork("work-4-1", "karmada-es-member1", "test-id-4", true, true), + createWork("work-4-2", "karmada-es-member2", "test-id-4", true, true), + }, + expectedError: false, + expectedStatus: metav1.ConditionTrue, + expectedApplied: true, + }, + { + name: "work without status", + bindingID: "test-id-5", + binding: createBinding("binding-5", "test-id-5", []string{"member1"}), + works: []*workv1alpha1.Work{ + createWork("work-5", "karmada-es-member1", "test-id-5", true, false), + }, + expectedError: false, + expectedStatus: metav1.ConditionFalse, + expectedApplied: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + objects := []client.Object{tt.binding} + for _, work := range tt.works { + objects = append(objects, work) + } + + c := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(objects...). + WithStatusSubresource(tt.binding). + Build() + + recorder := record.NewFakeRecorder(10) + + err := AggregateResourceBindingWorkStatus(context.TODO(), c, tt.binding, recorder) + if tt.expectedError { + assert.Error(t, err) + return + } + assert.NoError(t, err) + + // Verify updated binding + updatedBinding := &workv1alpha2.ResourceBinding{} + err = c.Get(context.TODO(), client.ObjectKey{Namespace: tt.binding.Namespace, Name: tt.binding.Name}, updatedBinding) + assert.NoError(t, err) + + // Verify conditions + fullyAppliedCond := meta.FindStatusCondition(updatedBinding.Status.Conditions, workv1alpha2.FullyApplied) + if assert.NotNil(t, fullyAppliedCond) { + assert.Equal(t, tt.expectedStatus, fullyAppliedCond.Status) + } + + // Verify aggregated status + if tt.works != nil && len(tt.works) > 0 { + assert.Len(t, updatedBinding.Status.AggregatedStatus, len(tt.works)) + for _, status := range updatedBinding.Status.AggregatedStatus { + assert.Equal(t, tt.expectedApplied, status.Applied) + } + } + + // Verify events if expected + if tt.expectedEvent != "" { + select { + case event := <-recorder.Events: + assert.Contains(t, event, tt.expectedEvent) + default: + t.Error("Expected event not received") + } + } + }) + } +} + +func TestAggregateClusterResourceBindingWorkStatus(t *testing.T) { + scheme := setupScheme() + + // Helper function to create cluster binding + createClusterBinding := func(name, bindingID string, clusters []string) *workv1alpha2.ClusterResourceBinding { + return &workv1alpha2.ClusterResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Labels: map[string]string{ + workv1alpha2.ClusterResourceBindingPermanentIDLabel: bindingID, + }, + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "test-deployment", + }, + Clusters: func() []workv1alpha2.TargetCluster { + var targetClusters []workv1alpha2.TargetCluster + for _, cluster := range clusters { + targetClusters = append(targetClusters, workv1alpha2.TargetCluster{Name: cluster}) + } + return targetClusters + }(), + }, + } + } + + // Helper function to create work + createWork := func(name, namespace, bindingID string, applied bool, withStatus bool) *workv1alpha1.Work { + workObj := &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + Labels: map[string]string{ + workv1alpha2.ClusterResourceBindingPermanentIDLabel: bindingID, + }, + }, + Spec: workv1alpha1.WorkSpec{ + Workload: workv1alpha1.WorkloadTemplate{ + Manifests: []workv1alpha1.Manifest{ + { + RawExtension: runtime.RawExtension{ + Raw: []byte(`{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"name":"test-deployment"}}`), + }, + }, + }, + }, + }, + } + + if withStatus { + status := metav1.ConditionTrue + if !applied { + status = metav1.ConditionFalse + } + workObj.Status = workv1alpha1.WorkStatus{ + Conditions: []metav1.Condition{ + { + Type: workv1alpha1.WorkApplied, + Status: status, + Message: "test message", + }, + }, + ManifestStatuses: []workv1alpha1.ManifestStatus{ + { + Identifier: workv1alpha1.ResourceIdentifier{ + Group: "apps", + Version: "v1", + Kind: "Deployment", + Name: "test-deployment", + }, + Status: &runtime.RawExtension{Raw: []byte(`{"replicas": 3}`)}, + Health: "Healthy", + }, + }, + } + } + return workObj + } + + tests := []struct { + name string + bindingID string + binding *workv1alpha2.ClusterResourceBinding + works []*workv1alpha1.Work + expectedError bool + expectedStatus metav1.ConditionStatus + expectedApplied bool + expectedEvent string + }{ + { + name: "successful single work aggregation", + bindingID: "test-id-1", + binding: createClusterBinding("binding-1", "test-id-1", []string{"member1"}), + works: []*workv1alpha1.Work{ + createWork("work-1", "karmada-es-member1", "test-id-1", true, true), + }, + expectedError: false, + expectedStatus: metav1.ConditionTrue, + expectedApplied: true, + expectedEvent: "Update ClusterResourceBinding(binding-1) with AggregatedStatus successfully", + }, + { + name: "no works found", + bindingID: "test-id-2", + binding: createClusterBinding("binding-2", "test-id-2", []string{"member1"}), + works: []*workv1alpha1.Work{}, + expectedError: false, + expectedStatus: metav1.ConditionFalse, + expectedApplied: false, + }, + { + name: "work not applied", + bindingID: "test-id-3", + binding: createClusterBinding("binding-3", "test-id-3", []string{"member1"}), + works: []*workv1alpha1.Work{ + createWork("work-3", "karmada-es-member1", "test-id-3", false, true), + }, + expectedError: false, + expectedStatus: metav1.ConditionFalse, + expectedApplied: false, + }, + { + name: "multiple clusters with mixed status", + bindingID: "test-id-4", + binding: createClusterBinding("binding-4", "test-id-4", []string{"member1", "member2"}), + works: []*workv1alpha1.Work{ + createWork("work-4-1", "karmada-es-member1", "test-id-4", true, true), + createWork("work-4-2", "karmada-es-member2", "test-id-4", false, true), + }, + expectedError: false, + expectedStatus: metav1.ConditionFalse, + expectedApplied: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + objects := []client.Object{tt.binding} + for _, work := range tt.works { + objects = append(objects, work) + } + + c := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(objects...). + WithStatusSubresource(tt.binding). + Build() + + recorder := record.NewFakeRecorder(10) + + err := AggregateClusterResourceBindingWorkStatus(context.TODO(), c, tt.binding, recorder) + if tt.expectedError { + assert.Error(t, err) + return + } + assert.NoError(t, err) + + // Verify updated binding + updatedBinding := &workv1alpha2.ClusterResourceBinding{} + err = c.Get(context.TODO(), client.ObjectKey{Name: tt.binding.Name}, updatedBinding) + assert.NoError(t, err) + + // Verify conditions + fullyAppliedCond := meta.FindStatusCondition(updatedBinding.Status.Conditions, workv1alpha2.FullyApplied) + if assert.NotNil(t, fullyAppliedCond) { + assert.Equal(t, tt.expectedStatus, fullyAppliedCond.Status) + } + + // Verify aggregated status + if tt.works != nil && len(tt.works) > 0 { + assert.Len(t, updatedBinding.Status.AggregatedStatus, len(tt.works)) + // For multiple clusters case, verify specific cluster status + for _, status := range updatedBinding.Status.AggregatedStatus { + if strings.Contains(status.ClusterName, "member2") { + assert.Equal(t, tt.expectedApplied, status.Applied) + } + } + } + + // Verify events + if tt.expectedEvent != "" { + select { + case event := <-recorder.Events: + assert.Contains(t, event, tt.expectedEvent) + default: + t.Error("Expected event not received") + } + } + }) + } +} + +func TestBuildStatusRawExtension(t *testing.T) { + // Use a fixed time for deterministic tests + fixedTime := metav1.NewTime(time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC)) + + tests := []struct { + name string + status interface{} + wantErr bool + expectedRaw string + }{ + { + name: "simple pod status", + status: corev1.PodStatus{ + Phase: corev1.PodRunning, + }, + wantErr: false, + expectedRaw: `{"phase":"Running"}`, + }, + { + name: "complex pod status", + status: corev1.PodStatus{ + Phase: corev1.PodRunning, + Conditions: []corev1.PodCondition{ + { + Type: corev1.PodReady, + Status: corev1.ConditionTrue, + LastProbeTime: fixedTime, + LastTransitionTime: fixedTime, + }, + }, + }, + wantErr: false, + expectedRaw: `{"phase":"Running","conditions":[{"type":"Ready","status":"True","lastProbeTime":"2024-01-01T00:00:00Z","lastTransitionTime":"2024-01-01T00:00:00Z"}]}`, + }, + { + name: "nil status", + status: nil, + wantErr: false, + expectedRaw: `null`, + }, + { + name: "unmarshallable status", + status: make(chan int), + wantErr: true, + expectedRaw: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := BuildStatusRawExtension(tt.status) + if tt.wantErr { + assert.Error(t, err) + assert.Nil(t, got) + return + } + + assert.NoError(t, err) + assert.NotNil(t, got) + assert.JSONEq(t, tt.expectedRaw, string(got.Raw)) + }) + } +} + +func TestAssembleWorkStatus(t *testing.T) { + // Common test data + statusRaw := []byte(`{"replicas": 3}`) + baseManifest := workv1alpha1.Manifest{ + RawExtension: runtime.RawExtension{ + Raw: []byte(`{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"name":"test-deployment","namespace":"test-ns"}}`), + }, + } + + baseObjRef := workv1alpha2.ObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Namespace: "test-ns", + Name: "test-deployment", + } + + // Helper function to create a basic work + createWork := func(name string, manifest workv1alpha1.Manifest) workv1alpha1.Work { + return workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: "karmada-es-member1", + }, + Spec: workv1alpha1.WorkSpec{ + Workload: workv1alpha1.WorkloadTemplate{ + Manifests: []workv1alpha1.Manifest{manifest}, + }, + }, + } + } + + tests := []struct { + name string + works []workv1alpha1.Work + objRef workv1alpha2.ObjectReference + expectedItems []workv1alpha2.AggregatedStatusItem + wantErr bool + errorMessage string + }{ + { + name: "empty work list", + works: []workv1alpha1.Work{}, + objRef: baseObjRef, + expectedItems: []workv1alpha2.AggregatedStatusItem{}, + wantErr: false, + }, + { + name: "work with invalid manifest", + works: []workv1alpha1.Work{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-work", + Namespace: "karmada-es-member1", + }, + Spec: workv1alpha1.WorkSpec{ + Workload: workv1alpha1.WorkloadTemplate{ + Manifests: []workv1alpha1.Manifest{ + { + RawExtension: runtime.RawExtension{Raw: []byte(`invalid json`)}, + }, + }, + }, + }, + }, + }, + objRef: baseObjRef, + wantErr: true, + }, + { + name: "work being deleted", + works: []workv1alpha1.Work{ + func() workv1alpha1.Work { + w := createWork("test-work", baseManifest) + now := metav1.NewTime(time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC)) + w.DeletionTimestamp = &now + return w + }(), + }, + objRef: baseObjRef, + expectedItems: []workv1alpha2.AggregatedStatusItem{}, + wantErr: false, + }, + { + name: "work applied successfully with health status", + works: []workv1alpha1.Work{ + func() workv1alpha1.Work { + w := createWork("test-work", baseManifest) + w.Status = workv1alpha1.WorkStatus{ + Conditions: []metav1.Condition{ + { + Type: workv1alpha1.WorkApplied, + Status: metav1.ConditionTrue, + }, + }, + ManifestStatuses: []workv1alpha1.ManifestStatus{ + { + Identifier: workv1alpha1.ResourceIdentifier{ + Ordinal: 0, + Group: "apps", + Version: "v1", + Kind: "Deployment", + Namespace: "test-ns", + Name: "test-deployment", + }, + Status: &runtime.RawExtension{Raw: statusRaw}, + Health: "Healthy", + }, + }, + } + return w + }(), + }, + objRef: baseObjRef, + expectedItems: []workv1alpha2.AggregatedStatusItem{ + { + ClusterName: "member1", + Status: &runtime.RawExtension{Raw: statusRaw}, + Applied: true, + Health: workv1alpha2.ResourceHealthy, + }, + }, + wantErr: false, + }, + { + name: "work not applied with error message", + works: []workv1alpha1.Work{ + func() workv1alpha1.Work { + w := createWork("test-work", baseManifest) + w.Status = workv1alpha1.WorkStatus{ + Conditions: []metav1.Condition{ + { + Type: workv1alpha1.WorkApplied, + Status: metav1.ConditionFalse, + Message: "Failed to apply", + }, + }, + } + return w + }(), + }, + objRef: baseObjRef, + expectedItems: []workv1alpha2.AggregatedStatusItem{ + { + ClusterName: "member1", + Applied: false, + AppliedMessage: "Failed to apply", + Health: workv1alpha2.ResourceUnknown, + }, + }, + wantErr: false, + }, + { + name: "work with unknown condition status", + works: []workv1alpha1.Work{ + func() workv1alpha1.Work { + w := createWork("test-work", baseManifest) + w.Status = workv1alpha1.WorkStatus{ + Conditions: []metav1.Condition{ + { + Type: workv1alpha1.WorkApplied, + Status: metav1.ConditionUnknown, + Message: "Status unknown", + }, + }, + } + return w + }(), + }, + objRef: baseObjRef, + expectedItems: []workv1alpha2.AggregatedStatusItem{ + { + ClusterName: "member1", + Applied: false, + AppliedMessage: "Status unknown", + Health: workv1alpha2.ResourceUnknown, + }, + }, + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := assembleWorkStatus(tt.works, tt.objRef) + if tt.wantErr { + assert.Error(t, err) + if tt.errorMessage != "" { + assert.Contains(t, err.Error(), tt.errorMessage) + } + return + } + assert.NoError(t, err) + assert.Equal(t, tt.expectedItems, got) + }) + } +} + func TestGenerateFullyAppliedCondition(t *testing.T) { spec := workv1alpha2.ResourceBindingSpec{ Clusters: []workv1alpha2.TargetCluster{ @@ -45,14 +716,10 @@ func TestGenerateFullyAppliedCondition(t *testing.T) { expectedFalse := metav1.ConditionFalse resultTrue := generateFullyAppliedCondition(spec, statuses) - if resultTrue.Status != expectedTrue { - t.Errorf("generateFullyAppliedCondition with fully applied statuses returned %v, expected %v", resultTrue, expectedTrue) - } + assert.Equal(t, expectedTrue, resultTrue.Status, "generateFullyAppliedCondition with fully applied statuses") resultFalse := generateFullyAppliedCondition(spec, statuses[:1]) - if resultFalse.Status != expectedFalse { - t.Errorf("generateFullyAppliedCondition with partially applied statuses returned %v, expected %v", resultFalse, expectedFalse) - } + assert.Equal(t, expectedFalse, resultFalse.Status, "generateFullyAppliedCondition with partially applied statuses") } func TestWorksFullyApplied(t *testing.T) { @@ -149,9 +816,8 @@ func TestWorksFullyApplied(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if got := worksFullyApplied(tt.args.aggregatedStatuses, tt.args.targetClusters); got != tt.want { - t.Errorf("worksFullyApplied() = %v, want %v", got, tt.want) - } + got := worksFullyApplied(tt.args.aggregatedStatuses, tt.args.targetClusters) + assert.Equal(t, tt.want, got, "worksFullyApplied() result") }) } } @@ -195,31 +861,21 @@ func TestGetManifestIndex(t *testing.T) { manifestRef := ManifestReference{APIVersion: service.GetAPIVersion(), Kind: service.GetKind(), Namespace: service.GetNamespace(), Name: service.GetName()} index, err := GetManifestIndex(manifests, &manifestRef) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - if index != 0 { - t.Errorf("expected index 0, got %d", index) - } + assert.NoError(t, err) + assert.Equal(t, 0, index, "Service manifest index") }) t.Run("Deployment", func(t *testing.T) { manifestRef := ManifestReference{APIVersion: deployment.GetAPIVersion(), Kind: deployment.GetKind(), Namespace: deployment.GetNamespace(), Name: deployment.GetName()} index, err := GetManifestIndex(manifests, &manifestRef) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - if index != 1 { - t.Errorf("expected index 1, got %d", index) - } + assert.NoError(t, err) + assert.Equal(t, 1, index, "Deployment manifest index") }) t.Run("No match", func(t *testing.T) { _, err := GetManifestIndex(manifests, &ManifestReference{}) - if err == nil { - t.Errorf("expected error, got nil") - } + assert.Error(t, err, "Expected error for no match") }) } @@ -274,12 +930,8 @@ func TestEqualIdentifier(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { equal, err := equalIdentifier(tc.target, tc.ordinal, tc.workload) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - if equal != tc.expectedOutput { - t.Errorf("expected %v, got %v", tc.expectedOutput, equal) - } + assert.NoError(t, err) + assert.Equal(t, tc.expectedOutput, equal, "equalIdentifier() result") }) } } @@ -298,3 +950,15 @@ func TestIsResourceApplied(t *testing.T) { // Call IsResourceApplied and assert that it returns true assert.True(t, IsResourceApplied(workStatus)) } + +// Helper Functions + +// setupScheme initializes a new scheme +func setupScheme() *runtime.Scheme { + scheme := runtime.NewScheme() + _ = workv1alpha1.Install(scheme) + _ = workv1alpha2.Install(scheme) + _ = corev1.AddToScheme(scheme) + _ = appsv1.AddToScheme(scheme) + return scheme +} diff --git a/pkg/util/lifted/corev1printers_test.go b/pkg/util/lifted/corev1printers_test.go index 4f959e9f0812..623c4a60ff00 100644 --- a/pkg/util/lifted/corev1printers_test.go +++ b/pkg/util/lifted/corev1printers_test.go @@ -19,15 +19,1429 @@ package lifted import ( "fmt" "reflect" + "strings" "testing" + "time" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/diff" "github.com/karmada-io/karmada/pkg/printers" ) +// MockPrintHandler is a mock implementation of printers.PrintHandler +type MockPrintHandler struct { + mock.Mock +} + +func (m *MockPrintHandler) TableHandler(columnDefinitions []metav1.TableColumnDefinition, printFunc interface{}) error { + args := m.Called(columnDefinitions, printFunc) + return args.Error(0) +} + +func TestAddCoreV1Handlers(t *testing.T) { + testCases := []struct { + name string + expectedCalls int + columnChecks map[string][]string + printFuncTypes map[string]reflect.Type + }{ + { + name: "Verify handlers are added correctly", + expectedCalls: 4, + columnChecks: map[string][]string{ + "Pod": {"Name", "Ready", "Status", "Restarts", "Age", "IP", "Node", "Nominated Node", "Readiness Gates"}, + "Node": {"Name", "Status", "Roles", "Age", "Version", "Internal-IP", "External-IP", "OS-Image", "Kernel-Version", "Container-Runtime"}, + }, + printFuncTypes: map[string]reflect.Type{ + "PodList": reflect.TypeOf(func(*corev1.PodList, printers.GenerateOptions) ([]metav1.TableRow, error) { return nil, nil }), + "Pod": reflect.TypeOf(func(*corev1.Pod, printers.GenerateOptions) ([]metav1.TableRow, error) { return nil, nil }), + "Node": reflect.TypeOf(func(*corev1.Node, printers.GenerateOptions) ([]metav1.TableRow, error) { return nil, nil }), + "NodeList": reflect.TypeOf(func(*corev1.NodeList, printers.GenerateOptions) ([]metav1.TableRow, error) { return nil, nil }), + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + mockHandler := &MockPrintHandler{} + mockHandler.On("TableHandler", mock.Anything, mock.Anything).Return(nil) + + AddCoreV1Handlers(mockHandler) + + assert.Equal(t, tc.expectedCalls, len(mockHandler.Calls)) + + for i, call := range mockHandler.Calls { + columnDefinitions := call.Arguments[0].([]metav1.TableColumnDefinition) + printFunc := call.Arguments[1] + + resourceType := "" + switch i { + case 0: + resourceType = "PodList" + case 1: + resourceType = "Pod" + case 2: + resourceType = "Node" + case 3: + resourceType = "NodeList" + } + + // Check column definitions + if expectedColumns, ok := tc.columnChecks[strings.TrimSuffix(resourceType, "List")]; ok { + assert.Equal(t, len(expectedColumns), len(columnDefinitions)) + for j, name := range expectedColumns { + assert.Equal(t, name, columnDefinitions[j].Name) + assert.NotEmpty(t, columnDefinitions[j].Type) + assert.NotEmpty(t, columnDefinitions[j].Description) + } + } + + // Check print function type + if expectedType, ok := tc.printFuncTypes[resourceType]; ok { + assert.Equal(t, expectedType, reflect.TypeOf(printFunc)) + } + } + + mockHandler.AssertExpectations(t) + }) + } +} + +func TestPrintNode(t *testing.T) { + testCases := []struct { + name string + node *corev1.Node + options printers.GenerateOptions + expectedChecks func(*testing.T, []metav1.TableRow) + }{ + { + name: "Basic ready node", + node: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node1", + CreationTimestamp: metav1.Time{Time: time.Now().Add(-24 * time.Hour)}, + }, + Status: corev1.NodeStatus{ + Conditions: []corev1.NodeCondition{ + {Type: corev1.NodeReady, Status: corev1.ConditionTrue}, + }, + NodeInfo: corev1.NodeSystemInfo{ + KubeletVersion: "v1.20.0", + }, + }, + }, + options: printers.GenerateOptions{}, + expectedChecks: func(t *testing.T, rows []metav1.TableRow) { + assert.Len(t, rows, 1) + assert.Len(t, rows[0].Cells, 5) + assert.Equal(t, "node1", rows[0].Cells[0]) + assert.Contains(t, rows[0].Cells[1], "Ready") + assert.NotEmpty(t, rows[0].Cells[3]) // Only check it's not empty due to time dependency + assert.Equal(t, "v1.20.0", rows[0].Cells[4]) + }, + }, + { + name: "Node with roles", + node: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node2", + CreationTimestamp: metav1.Time{Time: time.Now().Add(-48 * time.Hour)}, + Labels: map[string]string{ + "node-role.kubernetes.io/control-plane": "", + "node-role.kubernetes.io/worker": "", + }, + }, + Status: corev1.NodeStatus{ + Conditions: []corev1.NodeCondition{ + {Type: corev1.NodeReady, Status: corev1.ConditionTrue}, + }, + NodeInfo: corev1.NodeSystemInfo{ + KubeletVersion: "v1.21.0", + }, + }, + }, + options: printers.GenerateOptions{}, + expectedChecks: func(t *testing.T, rows []metav1.TableRow) { + assert.Len(t, rows, 1) + assert.Len(t, rows[0].Cells, 5) + assert.Equal(t, "node2", rows[0].Cells[0]) + assert.Contains(t, rows[0].Cells[1], "Ready") + // Use Contains for roles as the order might not be guaranteed + assert.Contains(t, rows[0].Cells[2], "control-plane") + assert.Contains(t, rows[0].Cells[2], "worker") + assert.NotEmpty(t, rows[0].Cells[3]) + assert.Equal(t, "v1.21.0", rows[0].Cells[4]) + }, + }, + { + name: "Unschedulable node", + node: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node3", + CreationTimestamp: metav1.Time{Time: time.Now().Add(-72 * time.Hour)}, + }, + Spec: corev1.NodeSpec{ + Unschedulable: true, + }, + Status: corev1.NodeStatus{ + Conditions: []corev1.NodeCondition{ + {Type: corev1.NodeReady, Status: corev1.ConditionTrue}, + }, + NodeInfo: corev1.NodeSystemInfo{ + KubeletVersion: "v1.22.0", + }, + }, + }, + options: printers.GenerateOptions{}, + expectedChecks: func(t *testing.T, rows []metav1.TableRow) { + assert.Len(t, rows, 1) + assert.Len(t, rows[0].Cells, 5) + assert.Equal(t, "node3", rows[0].Cells[0]) + assert.Contains(t, rows[0].Cells[1], "SchedulingDisabled") + assert.NotEmpty(t, rows[0].Cells[3]) + assert.Equal(t, "v1.22.0", rows[0].Cells[4]) + }, + }, + { + name: "Node with missing OS, kernel, and container runtime info", + node: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node6", + CreationTimestamp: metav1.Time{Time: time.Now().Add(-144 * time.Hour)}, + }, + Status: corev1.NodeStatus{ + Conditions: []corev1.NodeCondition{ + {Type: corev1.NodeReady, Status: corev1.ConditionTrue}, + }, + NodeInfo: corev1.NodeSystemInfo{ + KubeletVersion: "v1.25.0", + // OSImage, KernelVersion, and ContainerRuntimeVersion are intentionally left empty + }, + Addresses: []corev1.NodeAddress{ + {Type: corev1.NodeInternalIP, Address: "192.168.1.2"}, + {Type: corev1.NodeExternalIP, Address: "203.0.113.2"}, + }, + }, + }, + options: printers.GenerateOptions{Wide: true}, + expectedChecks: func(t *testing.T, rows []metav1.TableRow) { + assert.Len(t, rows, 1) + assert.Len(t, rows[0].Cells, 10) // Wide option should produce 10 columns + assert.Equal(t, "node6", rows[0].Cells[0]) + assert.Contains(t, rows[0].Cells[1], "Ready") + assert.NotEmpty(t, rows[0].Cells[3]) + assert.Equal(t, "v1.25.0", rows[0].Cells[4]) + assert.Equal(t, "192.168.1.2", rows[0].Cells[5]) // Internal IP + assert.Equal(t, "203.0.113.2", rows[0].Cells[6]) // External IP + assert.Equal(t, "", rows[0].Cells[7]) // OSImage + assert.Equal(t, "", rows[0].Cells[8]) // KernelVersion + assert.Equal(t, "", rows[0].Cells[9]) // ContainerRuntimeVersion + }, + }, + { + name: "Node with wide option", + node: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node4", + CreationTimestamp: metav1.Time{Time: time.Now().Add(-96 * time.Hour)}, + }, + Status: corev1.NodeStatus{ + Conditions: []corev1.NodeCondition{ + {Type: corev1.NodeReady, Status: corev1.ConditionTrue}, + }, + NodeInfo: corev1.NodeSystemInfo{ + KubeletVersion: "v1.23.0", + OSImage: "Ubuntu 20.04", + KernelVersion: "5.4.0-42-generic", + ContainerRuntimeVersion: "docker://19.03.8", + }, + Addresses: []corev1.NodeAddress{ + {Type: corev1.NodeInternalIP, Address: "192.168.1.1"}, + {Type: corev1.NodeExternalIP, Address: "203.0.113.1"}, + }, + }, + }, + options: printers.GenerateOptions{Wide: true}, + expectedChecks: func(t *testing.T, rows []metav1.TableRow) { + assert.Len(t, rows, 1) + assert.Len(t, rows[0].Cells, 10) // Wide option should produce 10 columns + assert.Equal(t, "node4", rows[0].Cells[0]) + assert.Contains(t, rows[0].Cells[1], "Ready") + assert.NotEmpty(t, rows[0].Cells[3]) + assert.Equal(t, "v1.23.0", rows[0].Cells[4]) + assert.Equal(t, "192.168.1.1", rows[0].Cells[5]) // Internal IP + assert.Equal(t, "203.0.113.1", rows[0].Cells[6]) // External IP + assert.Equal(t, "Ubuntu 20.04", rows[0].Cells[7]) + assert.Equal(t, "5.4.0-42-generic", rows[0].Cells[8]) + assert.Equal(t, "docker://19.03.8", rows[0].Cells[9]) + }, + }, + { + name: "Node with no conditions", + node: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node5", + CreationTimestamp: metav1.Time{Time: time.Now().Add(-120 * time.Hour)}, + }, + Status: corev1.NodeStatus{ + NodeInfo: corev1.NodeSystemInfo{ + KubeletVersion: "v1.24.0", + }, + }, + }, + options: printers.GenerateOptions{}, + expectedChecks: func(t *testing.T, rows []metav1.TableRow) { + assert.Len(t, rows, 1) + assert.Len(t, rows[0].Cells, 5) + assert.Equal(t, "node5", rows[0].Cells[0]) + assert.Equal(t, "Unknown", rows[0].Cells[1]) + assert.Equal(t, "", rows[0].Cells[2]) + assert.NotEmpty(t, rows[0].Cells[3]) + assert.Equal(t, "v1.24.0", rows[0].Cells[4]) + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + rows, err := printNode(tc.node, tc.options) + assert.NoError(t, err) + + // Run the custom checks for this test case + tc.expectedChecks(t, rows) + + // Ensure the original node object is included in the output + assert.Equal(t, runtime.RawExtension{Object: tc.node}, rows[0].Object) + }) + } +} + +func TestGetNodeExternalIP(t *testing.T) { + tests := []struct { + name string + node *corev1.Node + expected string + }{ + { + name: "Node with external IP", + node: &corev1.Node{ + Status: corev1.NodeStatus{ + Addresses: []corev1.NodeAddress{ + {Type: corev1.NodeExternalIP, Address: "203.0.113.1"}, + {Type: corev1.NodeInternalIP, Address: "192.168.1.1"}, + }, + }, + }, + expected: "203.0.113.1", + }, + { + name: "Node without external IP", + node: &corev1.Node{ + Status: corev1.NodeStatus{ + Addresses: []corev1.NodeAddress{ + {Type: corev1.NodeInternalIP, Address: "192.168.1.1"}, + }, + }, + }, + expected: "", + }, + { + name: "Node with no addresses", + node: &corev1.Node{}, + expected: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := getNodeExternalIP(tt.node) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestGetNodeInternalIP(t *testing.T) { + tests := []struct { + name string + node *corev1.Node + expected string + }{ + { + name: "Node with internal IP", + node: &corev1.Node{ + Status: corev1.NodeStatus{ + Addresses: []corev1.NodeAddress{ + {Type: corev1.NodeExternalIP, Address: "203.0.113.1"}, + {Type: corev1.NodeInternalIP, Address: "192.168.1.1"}, + }, + }, + }, + expected: "192.168.1.1", + }, + { + name: "Node without internal IP", + node: &corev1.Node{ + Status: corev1.NodeStatus{ + Addresses: []corev1.NodeAddress{ + {Type: corev1.NodeExternalIP, Address: "203.0.113.1"}, + }, + }, + }, + expected: "", + }, + { + name: "Node with no addresses", + node: &corev1.Node{}, + expected: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := getNodeInternalIP(tt.node) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestFindNodeRoles(t *testing.T) { + tests := []struct { + name string + node *corev1.Node + expected []string + }{ + { + name: "Node with single role", + node: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "node-role.kubernetes.io/control-plane": "", + }, + }, + }, + expected: []string{"control-plane"}, + }, + { + name: "Node with multiple roles", + node: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "node-role.kubernetes.io/control-plane": "", + "node-role.kubernetes.io/worker": "", + }, + }, + }, + expected: []string{"control-plane", "worker"}, + }, + { + name: "Node with kubernetes.io/role label", + node: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "kubernetes.io/role": "special", + }, + }, + }, + expected: []string{"special"}, + }, + { + name: "Node with no role labels", + node: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "foo": "bar", + }, + }, + }, + expected: []string{}, + }, + { + name: "Node with special characters in role names", + node: &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "node-role.kubernetes.io/role-with-hyphen": "", + "node-role.kubernetes.io/role_with_underscore": "", + }, + }, + }, + expected: []string{"role-with-hyphen", "role_with_underscore"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := findNodeRoles(tt.node) + assert.ElementsMatch(t, tt.expected, result) // Order of roles can not be determined + }) + } +} + +func TestPrintNodeList(t *testing.T) { + testCases := []struct { + name string + nodeList *corev1.NodeList + options printers.GenerateOptions + expected func(*testing.T, []metav1.TableRow) + }{ + { + name: "Empty node list", + nodeList: &corev1.NodeList{}, + options: printers.GenerateOptions{}, + expected: func(t *testing.T, rows []metav1.TableRow) { + assert.Empty(t, rows) + }, + }, + { + name: "Single node", + nodeList: &corev1.NodeList{ + Items: []corev1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node1", + CreationTimestamp: metav1.Time{Time: time.Now().Add(-24 * time.Hour)}, + }, + Status: corev1.NodeStatus{ + Conditions: []corev1.NodeCondition{ + {Type: corev1.NodeReady, Status: corev1.ConditionTrue}, + }, + NodeInfo: corev1.NodeSystemInfo{ + KubeletVersion: "v1.20.0", + }, + }, + }, + }, + }, + options: printers.GenerateOptions{}, + expected: func(t *testing.T, rows []metav1.TableRow) { + assert.Len(t, rows, 1) + assert.Equal(t, "node1", rows[0].Cells[0]) + assert.Contains(t, rows[0].Cells[1], "Ready") + assert.NotEmpty(t, rows[0].Cells[3]) + assert.Equal(t, "v1.20.0", rows[0].Cells[4]) + }, + }, + { + name: "Multiple nodes with different states", + nodeList: &corev1.NodeList{ + Items: []corev1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node1", + CreationTimestamp: metav1.Time{Time: time.Now().Add(-24 * time.Hour)}, + }, + Status: corev1.NodeStatus{ + Conditions: []corev1.NodeCondition{ + {Type: corev1.NodeReady, Status: corev1.ConditionTrue}, + }, + NodeInfo: corev1.NodeSystemInfo{ + KubeletVersion: "v1.20.0", + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node2", + CreationTimestamp: metav1.Time{Time: time.Now().Add(-48 * time.Hour)}, + }, + Spec: corev1.NodeSpec{ + Unschedulable: true, + }, + Status: corev1.NodeStatus{ + Conditions: []corev1.NodeCondition{ + {Type: corev1.NodeReady, Status: corev1.ConditionFalse}, + }, + NodeInfo: corev1.NodeSystemInfo{ + KubeletVersion: "v1.21.0", + }, + }, + }, + }, + }, + options: printers.GenerateOptions{}, + expected: func(t *testing.T, rows []metav1.TableRow) { + assert.Len(t, rows, 2) + assert.Equal(t, "node1", rows[0].Cells[0]) + assert.Contains(t, rows[0].Cells[1], "Ready") + assert.Equal(t, "v1.20.0", rows[0].Cells[4]) + assert.Equal(t, "node2", rows[1].Cells[0]) + assert.Contains(t, rows[1].Cells[1], "NotReady") + assert.Contains(t, rows[1].Cells[1], "SchedulingDisabled") + assert.Equal(t, "v1.21.0", rows[1].Cells[4]) + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + rows, err := printNodeList(tc.nodeList, tc.options) + assert.NoError(t, err) + tc.expected(t, rows) + }) + } +} + +func TestPrintPodList(t *testing.T) { + testCases := []struct { + name string + podList *corev1.PodList + options printers.GenerateOptions + expected func(*testing.T, []metav1.TableRow) + }{ + { + name: "Empty pod list", + podList: &corev1.PodList{}, + options: printers.GenerateOptions{}, + expected: func(t *testing.T, rows []metav1.TableRow) { + assert.Empty(t, rows) + }, + }, + { + name: "Single running pod", + podList: &corev1.PodList{ + Items: []corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod1", + Namespace: "default", + CreationTimestamp: metav1.Time{Time: time.Now().Add(-1 * time.Hour)}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "container1"}, + }, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + ContainerStatuses: []corev1.ContainerStatus{ + { + Ready: true, + State: corev1.ContainerState{Running: &corev1.ContainerStateRunning{}}, + }, + }, + }, + }, + }, + }, + options: printers.GenerateOptions{}, + expected: func(t *testing.T, rows []metav1.TableRow) { + assert.Len(t, rows, 1) + assert.Equal(t, "pod1", rows[0].Cells[0]) + assert.Equal(t, "1/1", rows[0].Cells[1]) + assert.Equal(t, "Running", rows[0].Cells[2]) + assert.NotEmpty(t, rows[0].Cells[4]) + }, + }, + { + name: "Multiple pods with different states", + podList: &corev1.PodList{ + Items: []corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod1", + Namespace: "default", + CreationTimestamp: metav1.Time{Time: time.Now().Add(-1 * time.Hour)}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "container1"}, + }, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + ContainerStatuses: []corev1.ContainerStatus{ + { + Ready: true, + State: corev1.ContainerState{Running: &corev1.ContainerStateRunning{}}, + }, + }, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pod2", + Namespace: "kube-system", + CreationTimestamp: metav1.Time{Time: time.Now().Add(-2 * time.Hour)}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "container2"}, + }, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodPending, + ContainerStatuses: []corev1.ContainerStatus{ + { + Ready: false, + State: corev1.ContainerState{Waiting: &corev1.ContainerStateWaiting{Reason: "ContainerCreating"}}, + }, + }, + }, + }, + }, + }, + options: printers.GenerateOptions{}, + expected: func(t *testing.T, rows []metav1.TableRow) { + assert.Len(t, rows, 2) + assert.Equal(t, "pod1", rows[0].Cells[0]) + assert.Equal(t, "1/1", rows[0].Cells[1]) + assert.Equal(t, "Running", rows[0].Cells[2]) + assert.NotEmpty(t, rows[0].Cells[4]) + assert.Equal(t, "pod2", rows[1].Cells[0]) + assert.Equal(t, "0/1", rows[1].Cells[1]) + assert.Contains(t, []string{"Pending", "ContainerCreating"}, rows[1].Cells[2]) + assert.NotEmpty(t, rows[1].Cells[4]) + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + rows, err := printPodList(tc.podList, tc.options) + assert.NoError(t, err) + tc.expected(t, rows) + }) + } +} + +func TestPrintPod(t *testing.T) { + testCases := []struct { + name string + pod *corev1.Pod + options printers.GenerateOptions + expectedChecks func(*testing.T, []metav1.TableRow) + }{ + { + name: "Running pod", + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "running-pod", + CreationTimestamp: metav1.Time{Time: time.Now().Add(-1 * time.Hour)}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: "container1"}}, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + ContainerStatuses: []corev1.ContainerStatus{ + {Ready: true, State: corev1.ContainerState{Running: &corev1.ContainerStateRunning{}}}, + }, + }, + }, + options: printers.GenerateOptions{}, + expectedChecks: func(t *testing.T, rows []metav1.TableRow) { + assert.Len(t, rows, 1) + assert.Len(t, rows[0].Cells, 5) + assert.Equal(t, "running-pod", rows[0].Cells[0]) + assert.Equal(t, "1/1", rows[0].Cells[1]) + assert.Equal(t, "Running", rows[0].Cells[2]) + assert.Equal(t, int64(0), rows[0].Cells[3]) + assert.Regexp(t, `\d+[hm]`, rows[0].Cells[4]) + }, + }, + { + name: "Pending pod with ContainerCreating", + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "pending-pod", + CreationTimestamp: metav1.Time{Time: time.Now().Add(-30 * time.Minute)}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: "container1"}}, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodPending, + ContainerStatuses: []corev1.ContainerStatus{ + {Ready: false, State: corev1.ContainerState{Waiting: &corev1.ContainerStateWaiting{Reason: "ContainerCreating"}}}, + }, + }, + }, + options: printers.GenerateOptions{}, + expectedChecks: func(t *testing.T, rows []metav1.TableRow) { + assert.Len(t, rows, 1) + assert.Len(t, rows[0].Cells, 5) + assert.Equal(t, "pending-pod", rows[0].Cells[0]) + assert.Equal(t, "0/1", rows[0].Cells[1]) + assert.Equal(t, "ContainerCreating", rows[0].Cells[2]) + assert.Equal(t, int64(0), rows[0].Cells[3]) + assert.Regexp(t, `\d+m`, rows[0].Cells[4]) + }, + }, + { + name: "Succeeded pod", + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "succeeded-pod", + CreationTimestamp: metav1.Time{Time: time.Now().Add(-3 * time.Hour)}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: "container1"}}, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodSucceeded, + }, + }, + options: printers.GenerateOptions{}, + expectedChecks: func(t *testing.T, rows []metav1.TableRow) { + assert.Len(t, rows, 1) + assert.Equal(t, "succeeded-pod", rows[0].Cells[0]) + assert.Equal(t, "0/1", rows[0].Cells[1]) + assert.Equal(t, "Succeeded", rows[0].Cells[2]) + assert.Equal(t, int64(0), rows[0].Cells[3]) + assert.Regexp(t, `\d+h`, rows[0].Cells[4]) + assert.Equal(t, podSuccessConditions, rows[0].Conditions) + }, + }, + { + name: "Failed pod", + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "failed-pod", + CreationTimestamp: metav1.Time{Time: time.Now().Add(-2 * time.Hour)}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: "container1"}}, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodFailed, + ContainerStatuses: []corev1.ContainerStatus{ + {Ready: false, State: corev1.ContainerState{Terminated: &corev1.ContainerStateTerminated{Reason: "Error"}}}, + }, + }, + }, + options: printers.GenerateOptions{}, + expectedChecks: func(t *testing.T, rows []metav1.TableRow) { + assert.Len(t, rows, 1) + assert.Equal(t, "failed-pod", rows[0].Cells[0]) + assert.Equal(t, "0/1", rows[0].Cells[1]) + assert.Equal(t, "Error", rows[0].Cells[2]) + assert.Equal(t, int64(0), rows[0].Cells[3]) + assert.Regexp(t, `(\d+h|\d+m)`, rows[0].Cells[4]) // Match either hours or minutes + assert.Equal(t, podFailedConditions, rows[0].Conditions) + }, + }, + { + name: "Pod with multiple containers and restarts", + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "multi-container-pod", + CreationTimestamp: metav1.Time{Time: time.Now().Add(-3 * time.Hour)}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: "container1"}, {Name: "container2"}}, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + ContainerStatuses: []corev1.ContainerStatus{ + {Ready: true, RestartCount: 2, State: corev1.ContainerState{Running: &corev1.ContainerStateRunning{}}}, + {Ready: true, RestartCount: 1, State: corev1.ContainerState{Running: &corev1.ContainerStateRunning{}}}, + }, + }, + }, + options: printers.GenerateOptions{}, + expectedChecks: func(t *testing.T, rows []metav1.TableRow) { + assert.Len(t, rows, 1) + assert.Len(t, rows[0].Cells, 5) + assert.Equal(t, "multi-container-pod", rows[0].Cells[0]) + assert.Equal(t, "2/2", rows[0].Cells[1]) + assert.Equal(t, "Running", rows[0].Cells[2]) + assert.Equal(t, int64(3), rows[0].Cells[3]) + assert.Regexp(t, `\d+h`, rows[0].Cells[4]) + }, + }, + { + name: "Pod with readiness gates", + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "readiness-gate-pod", + CreationTimestamp: metav1.Time{Time: time.Now().Add(-4 * time.Hour)}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: "container1"}}, + ReadinessGates: []corev1.PodReadinessGate{ + {ConditionType: "custom-condition"}, + }, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + ContainerStatuses: []corev1.ContainerStatus{ + {Ready: true, State: corev1.ContainerState{Running: &corev1.ContainerStateRunning{}}}, + }, + Conditions: []corev1.PodCondition{ + {Type: "custom-condition", Status: corev1.ConditionTrue}, + }, + }, + }, + options: printers.GenerateOptions{Wide: true}, + expectedChecks: func(t *testing.T, rows []metav1.TableRow) { + assert.Len(t, rows, 1) + assert.Len(t, rows[0].Cells, 9) + assert.Equal(t, "readiness-gate-pod", rows[0].Cells[0]) + assert.Equal(t, "1/1", rows[0].Cells[1]) + assert.Equal(t, "Running", rows[0].Cells[2]) + assert.Equal(t, int64(0), rows[0].Cells[3]) + assert.Regexp(t, `\d+h`, rows[0].Cells[4]) + assert.Equal(t, "", rows[0].Cells[5]) // IP + assert.Equal(t, "", rows[0].Cells[6]) // Node + assert.Equal(t, "", rows[0].Cells[7]) // Nominated Node + assert.Equal(t, "1/1", rows[0].Cells[8]) // Readiness Gates + }, + }, + { + name: "Pod with init container - waiting", + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "init-pod-waiting", + CreationTimestamp: metav1.Time{Time: time.Now().Add(-1 * time.Hour)}, + }, + Spec: corev1.PodSpec{ + InitContainers: []corev1.Container{{Name: "init-container"}}, + Containers: []corev1.Container{{Name: "main-container"}}, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodPending, + InitContainerStatuses: []corev1.ContainerStatus{ + { + Name: "init-container", + Ready: false, + State: corev1.ContainerState{Waiting: &corev1.ContainerStateWaiting{Reason: "ContainerCreating"}}, + }, + }, + }, + }, + options: printers.GenerateOptions{}, + expectedChecks: func(t *testing.T, rows []metav1.TableRow) { + assert.Len(t, rows, 1) + assert.Len(t, rows[0].Cells, 5) + assert.Equal(t, "init-pod-waiting", rows[0].Cells[0]) + assert.Equal(t, "0/1", rows[0].Cells[1]) + assert.Equal(t, "Init:ContainerCreating", rows[0].Cells[2]) + assert.Equal(t, int64(0), rows[0].Cells[3]) + assert.Regexp(t, `\d+[hm]`, rows[0].Cells[4]) + }, + }, + { + name: "Terminating pod", + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "terminating-pod", + CreationTimestamp: metav1.Time{Time: time.Now().Add(-6 * time.Hour)}, + DeletionTimestamp: &metav1.Time{Time: time.Now().Add(-5 * time.Minute)}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: "container1"}}, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + ContainerStatuses: []corev1.ContainerStatus{ + {Ready: true, State: corev1.ContainerState{Running: &corev1.ContainerStateRunning{}}}, + }, + }, + }, + options: printers.GenerateOptions{}, + expectedChecks: func(t *testing.T, rows []metav1.TableRow) { + assert.Len(t, rows, 1) + assert.Len(t, rows[0].Cells, 5) + assert.Equal(t, "terminating-pod", rows[0].Cells[0]) + assert.Equal(t, "1/1", rows[0].Cells[1]) + assert.Equal(t, "Terminating", rows[0].Cells[2]) + assert.Equal(t, int64(0), rows[0].Cells[3]) + assert.Regexp(t, `\d+h`, rows[0].Cells[4]) + }, + }, + { + name: "Node unreachable pod", + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "unreachable-pod", + CreationTimestamp: metav1.Time{Time: time.Now().Add(-7 * time.Hour)}, + DeletionTimestamp: &metav1.Time{Time: time.Now().Add(-10 * time.Minute)}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: "container1"}}, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + Reason: NodeUnreachablePodReason, + }, + }, + options: printers.GenerateOptions{}, + expectedChecks: func(t *testing.T, rows []metav1.TableRow) { + assert.Len(t, rows, 1) + assert.Equal(t, "unreachable-pod", rows[0].Cells[0]) + assert.Equal(t, "0/1", rows[0].Cells[1]) + assert.Equal(t, "Unknown", rows[0].Cells[2]) + assert.Equal(t, int64(0), rows[0].Cells[3]) + assert.Regexp(t, `\d+h`, rows[0].Cells[4]) + }, + }, + { + name: "Pod with init container terminated with signal", + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "init-signal-pod", + CreationTimestamp: metav1.Time{Time: time.Now().Add(-30 * time.Minute)}, + }, + Spec: corev1.PodSpec{ + InitContainers: []corev1.Container{{Name: "init-container"}}, + Containers: []corev1.Container{{Name: "main-container"}}, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodPending, + InitContainerStatuses: []corev1.ContainerStatus{ + { + Name: "init-container", + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + Signal: 9, + }, + }, + }, + }, + }, + }, + options: printers.GenerateOptions{}, + expectedChecks: func(t *testing.T, rows []metav1.TableRow) { + assert.Len(t, rows, 1) + assert.Equal(t, "init-signal-pod", rows[0].Cells[0]) + assert.Equal(t, "0/1", rows[0].Cells[1]) + assert.Equal(t, "Pending", rows[0].Cells[2]) + }, + }, + { + name: "Pod with init container terminated with exit code", + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "init-exit-code-pod", + CreationTimestamp: metav1.Time{Time: time.Now().Add(-35 * time.Minute)}, + }, + Spec: corev1.PodSpec{ + InitContainers: []corev1.Container{{Name: "init-container"}}, + Containers: []corev1.Container{{Name: "main-container"}}, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodPending, + InitContainerStatuses: []corev1.ContainerStatus{ + { + Name: "init-container", + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + ExitCode: 1, + }, + }, + }, + }, + }, + }, + options: printers.GenerateOptions{}, + expectedChecks: func(t *testing.T, rows []metav1.TableRow) { + assert.Len(t, rows, 1) + assert.Equal(t, "init-exit-code-pod", rows[0].Cells[0]) + assert.Equal(t, "0/1", rows[0].Cells[1]) + assert.Equal(t, "Init:ExitCode:1", rows[0].Cells[2]) + }, + }, + { + name: "Pod with init container terminated with reason", + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "init-reason-pod", + CreationTimestamp: metav1.Time{Time: time.Now().Add(-40 * time.Minute)}, + }, + Spec: corev1.PodSpec{ + InitContainers: []corev1.Container{{Name: "init-container"}}, + Containers: []corev1.Container{{Name: "main-container"}}, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodPending, + InitContainerStatuses: []corev1.ContainerStatus{ + { + Name: "init-container", + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + Reason: "Error", + }, + }, + }, + }, + }, + }, + options: printers.GenerateOptions{}, + expectedChecks: func(t *testing.T, rows []metav1.TableRow) { + assert.Len(t, rows, 1) + assert.Equal(t, "init-reason-pod", rows[0].Cells[0]) + assert.Equal(t, "0/1", rows[0].Cells[1]) + assert.Equal(t, "Pending", rows[0].Cells[2]) + }, + }, + { + name: "Pod with multiple init containers, some pending", + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "multi-init-pod", + CreationTimestamp: metav1.Time{Time: time.Now().Add(-45 * time.Minute)}, + }, + Spec: corev1.PodSpec{ + InitContainers: []corev1.Container{ + {Name: "init-container-1"}, + {Name: "init-container-2"}, + {Name: "init-container-3"}, + }, + Containers: []corev1.Container{{Name: "main-container"}}, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodPending, + InitContainerStatuses: []corev1.ContainerStatus{ + { + Name: "init-container-1", + Ready: true, + State: corev1.ContainerState{Terminated: &corev1.ContainerStateTerminated{ExitCode: 0}}, + }, + { + Name: "init-container-2", + Ready: false, + State: corev1.ContainerState{Waiting: &corev1.ContainerStateWaiting{}}, + }, + { + Name: "init-container-3", + Ready: false, + State: corev1.ContainerState{Waiting: &corev1.ContainerStateWaiting{}}, + }, + }, + }, + }, + options: printers.GenerateOptions{}, + expectedChecks: func(t *testing.T, rows []metav1.TableRow) { + assert.Len(t, rows, 1) + assert.Equal(t, "multi-init-pod", rows[0].Cells[0]) + assert.Equal(t, "0/1", rows[0].Cells[1]) + assert.Equal(t, "Init:1/3", rows[0].Cells[2]) + }, + }, + { + name: "Pod with container terminated with signal", + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "terminated-signal-pod", + CreationTimestamp: metav1.Time{Time: time.Now().Add(-1 * time.Hour)}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: "container1"}}, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + ContainerStatuses: []corev1.ContainerStatus{ + { + Name: "container1", + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + Signal: 15, + }, + }, + }, + }, + }, + }, + options: printers.GenerateOptions{}, + expectedChecks: func(t *testing.T, rows []metav1.TableRow) { + assert.Len(t, rows, 1) + assert.Equal(t, "terminated-signal-pod", rows[0].Cells[0]) + assert.Equal(t, "0/1", rows[0].Cells[1]) + assert.Equal(t, "Signal:15", rows[0].Cells[2]) + }, + }, + { + name: "Pod with container terminated with exit code", + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "terminated-exit-pod", + CreationTimestamp: metav1.Time{Time: time.Now().Add(-2 * time.Hour)}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: "container1"}}, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + ContainerStatuses: []corev1.ContainerStatus{ + { + Name: "container1", + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + ExitCode: 2, + }, + }, + }, + }, + }, + }, + options: printers.GenerateOptions{}, + expectedChecks: func(t *testing.T, rows []metav1.TableRow) { + assert.Len(t, rows, 1) + assert.Equal(t, "terminated-exit-pod", rows[0].Cells[0]) + assert.Equal(t, "0/1", rows[0].Cells[1]) + assert.Equal(t, "ExitCode:2", rows[0].Cells[2]) + }, + }, + { + name: "Running pod with ready condition", + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "ready-pod", + CreationTimestamp: metav1.Time{Time: time.Now().Add(-3 * time.Hour)}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: "container1"}}, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + Conditions: []corev1.PodCondition{ + {Type: corev1.PodReady, Status: corev1.ConditionTrue}, + }, + ContainerStatuses: []corev1.ContainerStatus{ + { + Name: "container1", + Ready: true, + State: corev1.ContainerState{Running: &corev1.ContainerStateRunning{}}, + }, + }, + }, + }, + options: printers.GenerateOptions{}, + expectedChecks: func(t *testing.T, rows []metav1.TableRow) { + assert.Len(t, rows, 1) + assert.Equal(t, "ready-pod", rows[0].Cells[0]) + assert.Equal(t, "1/1", rows[0].Cells[1]) + assert.Equal(t, "Running", rows[0].Cells[2]) + }, + }, + { + name: "Running pod without ready condition", + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "not-ready-pod", + CreationTimestamp: metav1.Time{Time: time.Now().Add(-4 * time.Hour)}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: "container1"}}, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + ContainerStatuses: []corev1.ContainerStatus{ + { + Name: "container1", + Ready: false, + State: corev1.ContainerState{Running: &corev1.ContainerStateRunning{}}, + }, + }, + }, + }, + options: printers.GenerateOptions{}, + expectedChecks: func(t *testing.T, rows []metav1.TableRow) { + assert.Len(t, rows, 1) + assert.Equal(t, "not-ready-pod", rows[0].Cells[0]) + assert.Equal(t, "0/1", rows[0].Cells[1]) + assert.Equal(t, "Running", rows[0].Cells[2]) + }, + }, + { + name: "Running pod with container not ready", + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "running-not-ready-pod", + CreationTimestamp: metav1.Time{Time: time.Now().Add(-55 * time.Minute)}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: "main-container"}}, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + ContainerStatuses: []corev1.ContainerStatus{ + { + Name: "main-container", + Ready: false, + State: corev1.ContainerState{Running: &corev1.ContainerStateRunning{}}, + }, + }, + }, + }, + options: printers.GenerateOptions{}, + expectedChecks: func(t *testing.T, rows []metav1.TableRow) { + assert.Len(t, rows, 1) + assert.Equal(t, "running-not-ready-pod", rows[0].Cells[0]) + assert.Equal(t, "0/1", rows[0].Cells[1]) + assert.Equal(t, "Running", rows[0].Cells[2]) + }, + }, + { + name: "Completed pod with running container", + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "completed-running-pod", + CreationTimestamp: metav1.Time{Time: time.Now().Add(-50 * time.Minute)}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: "main-container"}}, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + Conditions: []corev1.PodCondition{ + {Type: corev1.PodReady, Status: corev1.ConditionTrue}, + }, + ContainerStatuses: []corev1.ContainerStatus{ + { + Name: "main-container", + Ready: true, + State: corev1.ContainerState{Running: &corev1.ContainerStateRunning{}}, + }, + }, + }, + }, + options: printers.GenerateOptions{}, + expectedChecks: func(t *testing.T, rows []metav1.TableRow) { + assert.Len(t, rows, 1) + assert.Equal(t, "completed-running-pod", rows[0].Cells[0]) + assert.Equal(t, "1/1", rows[0].Cells[1]) + assert.Equal(t, "Running", rows[0].Cells[2]) + }, + }, + { + name: "Pod with multiple IPs", + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "multi-ip-pod", + CreationTimestamp: metav1.Time{Time: time.Now().Add(-5 * time.Hour)}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{Name: "container1"}}, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + PodIPs: []corev1.PodIP{ + {IP: "192.168.1.10"}, + {IP: "fd00::10"}, + }, + ContainerStatuses: []corev1.ContainerStatus{ + { + Name: "container1", + Ready: true, + State: corev1.ContainerState{Running: &corev1.ContainerStateRunning{}}, + }, + }, + }, + }, + options: printers.GenerateOptions{Wide: true}, + expectedChecks: func(t *testing.T, rows []metav1.TableRow) { + assert.Len(t, rows, 1) + assert.Equal(t, "multi-ip-pod", rows[0].Cells[0]) + assert.Equal(t, "1/1", rows[0].Cells[1]) + assert.Equal(t, "Running", rows[0].Cells[2]) + assert.Equal(t, "192.168.1.10", rows[0].Cells[5]) // IP column in wide output + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + rows, err := printPod(tc.pod, tc.options) + assert.NoError(t, err) + tc.expectedChecks(t, rows) + assert.Equal(t, runtime.RawExtension{Object: tc.pod}, rows[0].Object) + }) + } +} + +func TestHasPodReadyCondition(t *testing.T) { + testCases := []struct { + name string + conditions []corev1.PodCondition + expected bool + }{ + { + name: "Empty conditions", + conditions: []corev1.PodCondition{}, + expected: false, + }, + { + name: "Ready condition is true", + conditions: []corev1.PodCondition{ + { + Type: corev1.PodReady, + Status: corev1.ConditionTrue, + }, + }, + expected: true, + }, + { + name: "Ready condition is false", + conditions: []corev1.PodCondition{ + { + Type: corev1.PodReady, + Status: corev1.ConditionFalse, + }, + }, + expected: false, + }, + { + name: "Ready condition is unknown", + conditions: []corev1.PodCondition{ + { + Type: corev1.PodReady, + Status: corev1.ConditionUnknown, + }, + }, + expected: false, + }, + { + name: "Multiple conditions, Ready is true", + conditions: []corev1.PodCondition{ + { + Type: corev1.PodInitialized, + Status: corev1.ConditionTrue, + }, + { + Type: corev1.PodReady, + Status: corev1.ConditionTrue, + }, + { + Type: corev1.ContainersReady, + Status: corev1.ConditionTrue, + }, + }, + expected: true, + }, + { + name: "Multiple conditions, Ready is false", + conditions: []corev1.PodCondition{ + { + Type: corev1.PodInitialized, + Status: corev1.ConditionTrue, + }, + { + Type: corev1.PodReady, + Status: corev1.ConditionFalse, + }, + { + Type: corev1.ContainersReady, + Status: corev1.ConditionTrue, + }, + }, + expected: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result := hasPodReadyCondition(tc.conditions) + assert.Equal(t, tc.expected, result, "hasPodReadyCondition returned unexpected result") + }) + } +} + func TestPrintCoreV1(t *testing.T) { testCases := []struct { pod corev1.Pod diff --git a/pkg/util/lifted/podtemplate_test.go b/pkg/util/lifted/podtemplate_test.go new file mode 100644 index 000000000000..4dd54442724e --- /dev/null +++ b/pkg/util/lifted/podtemplate_test.go @@ -0,0 +1,291 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package lifted + +import ( + "testing" + + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" +) + +func TestGetPodsLabelSet(t *testing.T) { + tests := []struct { + name string + template *corev1.PodTemplateSpec + expected labels.Set + }{ + { + name: "Empty labels", + template: &corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{}, + }, + expected: labels.Set{}, + }, + { + name: "With labels", + template: &corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "test", + "env": "prod", + }, + }, + }, + expected: labels.Set{ + "app": "test", + "env": "prod", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := getPodsLabelSet(tt.template) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestGetPodsFinalizers(t *testing.T) { + tests := []struct { + name string + template *corev1.PodTemplateSpec + expected []string + }{ + { + name: "No finalizers", + template: &corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{}, + }, + expected: []string{}, + }, + { + name: "With finalizers", + template: &corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Finalizers: []string{"finalizer1", "finalizer2"}, + }, + }, + expected: []string{"finalizer1", "finalizer2"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := getPodsFinalizers(tt.template) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestGetPodsAnnotationSet(t *testing.T) { + tests := []struct { + name string + template *corev1.PodTemplateSpec + expected labels.Set + }{ + { + name: "Empty annotations", + template: &corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{}, + }, + expected: labels.Set{}, + }, + { + name: "With annotations", + template: &corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "key1": "value1", + "key2": "value2", + }, + }, + }, + expected: labels.Set{ + "key1": "value1", + "key2": "value2", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := getPodsAnnotationSet(tt.template) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestGetPodsPrefix(t *testing.T) { + tests := []struct { + name string + controllerName string + expected string + }{ + { + name: "Short name", + controllerName: "test", + expected: "test-", + }, + { + name: "Long name", + controllerName: "very-long-controller-name-that-exceeds-the-limit", + expected: "very-long-controller-name-that-exceeds-the-limit-", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := getPodsPrefix(tt.controllerName) + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestGetPodFromTemplate(t *testing.T) { + tests := []struct { + name string + template *corev1.PodTemplateSpec + parentObject runtime.Object + controllerRef *metav1.OwnerReference + expectedError bool + expectedErrMsg string + validateResult func(*testing.T, *corev1.Pod) + }{ + { + name: "Valid template", + template: &corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"app": "test"}, + Annotations: map[string]string{"key": "value"}, + Finalizers: []string{"finalizer1"}, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "test-container", Image: "test-image"}, + }, + }, + }, + parentObject: &corev1.ReplicationController{ + ObjectMeta: metav1.ObjectMeta{ + Name: "parent", + Namespace: "default", + }, + }, + controllerRef: &metav1.OwnerReference{ + APIVersion: "v1", + Kind: "ReplicationController", + Name: "parent", + UID: "test-uid", + }, + expectedError: false, + validateResult: func(t *testing.T, pod *corev1.Pod) { + assert.Equal(t, "default", pod.Namespace) + assert.Equal(t, "parent-", pod.GenerateName) + assert.Equal(t, map[string]string{"app": "test"}, pod.Labels) + assert.Equal(t, map[string]string{"key": "value"}, pod.Annotations) + assert.Equal(t, []string{"finalizer1"}, pod.Finalizers) + assert.Len(t, pod.OwnerReferences, 1) + assert.Equal(t, "parent", pod.OwnerReferences[0].Name) + assert.Len(t, pod.Spec.Containers, 1) + assert.Equal(t, "test-container", pod.Spec.Containers[0].Name) + }, + }, + { + name: "Parent object without name", + template: &corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{}, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "test-container", Image: "test-image"}, + }, + }, + }, + parentObject: &corev1.ReplicationController{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + }, + }, + controllerRef: nil, + expectedError: false, + validateResult: func(t *testing.T, pod *corev1.Pod) { + assert.Equal(t, "default", pod.Namespace) + assert.Empty(t, pod.GenerateName) + assert.Empty(t, pod.Labels) + assert.Empty(t, pod.Annotations) + assert.Empty(t, pod.Finalizers) + assert.Empty(t, pod.OwnerReferences) + assert.Len(t, pod.Spec.Containers, 1) + assert.Equal(t, "test-container", pod.Spec.Containers[0].Name) + }, + }, + { + name: "Parent object without ObjectMeta", + template: &corev1.PodTemplateSpec{}, + parentObject: &struct{ runtime.Object }{}, + controllerRef: nil, + expectedError: true, + expectedErrMsg: "parentObject does not have ObjectMeta", + validateResult: func(t *testing.T, pod *corev1.Pod) { + assert.Nil(t, pod) + }, + }, + { + name: "Empty template", + template: &corev1.PodTemplateSpec{}, + parentObject: &corev1.ReplicationController{ + ObjectMeta: metav1.ObjectMeta{Name: "parent", Namespace: "default"}, + }, + controllerRef: nil, + expectedError: false, + validateResult: func(t *testing.T, pod *corev1.Pod) { + assert.NotNil(t, pod) + assert.Equal(t, "default", pod.Namespace) + assert.Equal(t, "parent-", pod.GenerateName) + assert.Empty(t, pod.Labels) + assert.Empty(t, pod.Annotations) + assert.Empty(t, pod.Finalizers) + assert.Empty(t, pod.OwnerReferences) + assert.Empty(t, pod.Spec.Containers) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := GetPodFromTemplate(tt.template, tt.parentObject, tt.controllerRef) + + if tt.expectedError { + assert.Error(t, err) + assert.Nil(t, result) + if tt.expectedErrMsg != "" { + assert.Contains(t, err.Error(), tt.expectedErrMsg) + } + } else { + assert.NoError(t, err) + assert.NotNil(t, result) + if tt.validateResult != nil { + tt.validateResult(t, result) + } + } + }) + } +} diff --git a/pkg/util/lifted/scheduler/cache/cache_test.go b/pkg/util/lifted/scheduler/cache/cache_test.go new file mode 100644 index 000000000000..62c459e163c3 --- /dev/null +++ b/pkg/util/lifted/scheduler/cache/cache_test.go @@ -0,0 +1,1070 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + + "github.com/karmada-io/karmada/pkg/util/lifted/scheduler/framework" +) + +// TestNode represents a test node configuration +type TestNode struct { + name string + labels map[string]string + images []corev1.ContainerImage + resources corev1.ResourceList +} + +// TestPod represents a test pod configuration +type TestPod struct { + name string + namespace string + uid string + nodeName string + labels map[string]string + volumes []corev1.Volume + affinity *corev1.Affinity +} + +func TestCacheNode(t *testing.T) { + tests := []struct { + name string + nodes []TestNode + ops func(*testing.T, Cache, []*corev1.Node) + verify func(*testing.T, Cache) + wantErr bool + }{ + { + name: "add single node", + nodes: []TestNode{ + { + name: "node1", + resources: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + }, + }, + }, + ops: func(t *testing.T, c Cache, nodes []*corev1.Node) { + nodeInfo := c.AddNode(nodes[0]) + require.NotNil(t, nodeInfo) + assert.Equal(t, nodes[0].Name, nodeInfo.Node().Name) + }, + verify: func(t *testing.T, c Cache) { + assert.Equal(t, 1, c.NodeCount()) + }, + }, + { + name: "update node resources", + nodes: []TestNode{ + { + name: "node1", + resources: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1"), + }, + }, + }, + ops: func(t *testing.T, c Cache, nodes []*corev1.Node) { + c.AddNode(nodes[0]) + + // Update node with new resources + updatedNode := nodes[0].DeepCopy() + updatedNode.Status.Allocatable = corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("2"), + } + + nodeInfo := c.UpdateNode(nodes[0], updatedNode) + require.NotNil(t, nodeInfo) + assert.Equal(t, "2", nodeInfo.Node().Status.Allocatable.Cpu().String()) + }, + verify: func(t *testing.T, c Cache) { + assert.Equal(t, 1, c.NodeCount()) + }, + }, + { + name: "remove node", + nodes: []TestNode{ + {name: "node1"}, + }, + ops: func(t *testing.T, c Cache, nodes []*corev1.Node) { + c.AddNode(nodes[0]) + err := c.RemoveNode(nodes[0]) + assert.NoError(t, err) + }, + verify: func(t *testing.T, c Cache) { + assert.Equal(t, 0, c.NodeCount()) + }, + }, + { + name: "remove non-existent node", + nodes: []TestNode{ + {name: "node1"}, + }, + ops: func(t *testing.T, c Cache, nodes []*corev1.Node) { + err := c.RemoveNode(nodes[0]) + assert.Error(t, err) + }, + verify: func(t *testing.T, c Cache) { + assert.Equal(t, 0, c.NodeCount()) + }, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cache := newCache(time.Second, time.Second, nil) + nodes := make([]*corev1.Node, len(tt.nodes)) + for i, n := range tt.nodes { + nodes[i] = newTestNode(n) + } + + tt.ops(t, cache, nodes) + tt.verify(t, cache) + }) + } +} + +func TestCachePod(t *testing.T) { + tests := []struct { + name string + node TestNode + pods []TestPod + ops func(*testing.T, Cache, *corev1.Node, []*corev1.Pod) + verify func(*testing.T, Cache) + wantErr bool + }{ + { + name: "add pod", + node: TestNode{name: "node1"}, + pods: []TestPod{ + { + name: "pod1", + namespace: "default", + uid: "pod1", + nodeName: "node1", + }, + }, + ops: func(t *testing.T, c Cache, node *corev1.Node, pods []*corev1.Pod) { + c.AddNode(node) + err := c.AddPod(pods[0]) + assert.NoError(t, err) + }, + verify: func(t *testing.T, c Cache) { + count, err := c.PodCount() + assert.NoError(t, err) + assert.Equal(t, 1, count) + }, + }, + { + name: "assume pod", + node: TestNode{name: "node1"}, + pods: []TestPod{ + { + name: "pod1", + namespace: "default", + uid: "pod1", + nodeName: "node1", + }, + }, + ops: func(t *testing.T, c Cache, node *corev1.Node, pods []*corev1.Pod) { + c.AddNode(node) + err := c.AssumePod(pods[0]) + assert.NoError(t, err) + + assumed, err := c.IsAssumedPod(pods[0]) + assert.NoError(t, err) + assert.True(t, assumed) + }, + verify: func(t *testing.T, c Cache) { + count, err := c.PodCount() + assert.NoError(t, err) + assert.Equal(t, 1, count) + }, + }, + { + name: "forget assumed pod", + node: TestNode{name: "node1"}, + pods: []TestPod{ + { + name: "pod1", + namespace: "default", + uid: "pod1", + nodeName: "node1", + }, + }, + ops: func(t *testing.T, c Cache, node *corev1.Node, pods []*corev1.Pod) { + c.AddNode(node) + _ = c.AssumePod(pods[0]) + err := c.ForgetPod(pods[0]) + assert.NoError(t, err) + + assumed, err := c.IsAssumedPod(pods[0]) + assert.NoError(t, err) + assert.False(t, assumed) + }, + verify: func(t *testing.T, c Cache) { + count, err := c.PodCount() + assert.NoError(t, err) + assert.Equal(t, 0, count) + }, + }, + { + name: "update pod", + node: TestNode{name: "node1"}, + pods: []TestPod{ + { + name: "pod1", + namespace: "default", + uid: "pod1", + nodeName: "node1", + }, + }, + ops: func(t *testing.T, c Cache, node *corev1.Node, pods []*corev1.Pod) { + c.AddNode(node) + _ = c.AddPod(pods[0]) + + updatedPod := pods[0].DeepCopy() + updatedPod.Labels = map[string]string{"key": "value"} + + err := c.UpdatePod(pods[0], updatedPod) + assert.NoError(t, err) + + pod, err := c.GetPod(updatedPod) + assert.NoError(t, err) + assert.Equal(t, "value", pod.Labels["key"]) + }, + verify: func(t *testing.T, c Cache) { + count, err := c.PodCount() + assert.NoError(t, err) + assert.Equal(t, 1, count) + }, + }, + { + name: "remove pod", + node: TestNode{name: "node1"}, + pods: []TestPod{ + { + name: "pod1", + namespace: "default", + uid: "pod1", + nodeName: "node1", + }, + }, + ops: func(t *testing.T, c Cache, node *corev1.Node, pods []*corev1.Pod) { + c.AddNode(node) + err := c.AddPod(pods[0]) + assert.NoError(t, err) + + err = c.RemovePod(pods[0]) + assert.NoError(t, err) + }, + verify: func(t *testing.T, c Cache) { + count, err := c.PodCount() + assert.NoError(t, err) + assert.Equal(t, 0, count) + }, + }, + { + name: "remove non-existent pod", + node: TestNode{name: "node1"}, + pods: []TestPod{ + { + name: "pod1", + namespace: "default", + uid: "pod1", + nodeName: "node1", + }, + }, + ops: func(t *testing.T, c Cache, node *corev1.Node, pods []*corev1.Pod) { + c.AddNode(node) + err := c.RemovePod(pods[0]) + assert.Error(t, err) + assert.Contains(t, err.Error(), "is not found in scheduler cache") + }, + verify: func(t *testing.T, c Cache) { + count, err := c.PodCount() + assert.NoError(t, err) + assert.Equal(t, 0, count) + }, + }, + { + name: "remove pod with empty node name", + node: TestNode{name: "node1"}, + pods: []TestPod{ + { + name: "pod1", + namespace: "default", + uid: "pod1", + nodeName: "node1", + }, + }, + ops: func(t *testing.T, c Cache, node *corev1.Node, pods []*corev1.Pod) { + c.AddNode(node) + err := c.AddPod(pods[0]) + assert.NoError(t, err) + + // Create a pod with same identity but empty node name + podWithEmptyNode := pods[0].DeepCopy() + podWithEmptyNode.Spec.NodeName = "" + + err = c.RemovePod(podWithEmptyNode) + assert.NoError(t, err) + }, + verify: func(t *testing.T, c Cache) { + count, err := c.PodCount() + assert.NoError(t, err) + assert.Equal(t, 0, count) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cache := newCache(time.Hour, time.Second, nil) // Using longer TTL to avoid expiration + node := newTestNode(tt.node) + pods := make([]*corev1.Pod, len(tt.pods)) + for i, p := range tt.pods { + pods[i] = newTestPod(p) + } + tt.ops(t, cache, node, pods) + tt.verify(t, cache) + }) + } +} + +func TestCacheSnapshot(t *testing.T) { + tests := []struct { + name string + node TestNode + pods []TestPod + setup func(*testing.T, Cache, *corev1.Node, []*corev1.Pod) + verifySnapshot func(*testing.T, *Snapshot) + }{ + { + name: "basic snapshot", + node: TestNode{name: "node1"}, + pods: []TestPod{ + { + name: "pod1", + namespace: "default", + uid: "pod1", + nodeName: "node1", + }, + }, + setup: func(t *testing.T, c Cache, node *corev1.Node, pods []*corev1.Pod) { + c.AddNode(node) + err := c.AddPod(pods[0]) + assert.NoError(t, err) + }, + verifySnapshot: func(t *testing.T, snapshot *Snapshot) { + assert.Equal(t, 1, len(snapshot.nodeInfoMap)) + assert.Equal(t, 1, len(snapshot.nodeInfoList)) + assert.Greater(t, snapshot.generation, int64(0)) + }, + }, + { + name: "snapshot with pod affinity", + node: TestNode{name: "node1"}, + pods: []TestPod{ + { + name: "pod1", + namespace: "default", + uid: "pod1", + nodeName: "node1", + affinity: &corev1.Affinity{ + PodAffinity: &corev1.PodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "app", + Operator: metav1.LabelSelectorOpIn, + Values: []string{"web"}, + }, + }, + }, + TopologyKey: "kubernetes.io/hostname", + }, + }, + }, + }, + }, + }, + setup: func(t *testing.T, c Cache, node *corev1.Node, pods []*corev1.Pod) { + c.AddNode(node) + err := c.AddPod(pods[0]) + assert.NoError(t, err) + }, + verifySnapshot: func(t *testing.T, snapshot *Snapshot) { + assert.Equal(t, 1, len(snapshot.havePodsWithAffinityNodeInfoList)) + }, + }, + { + name: "snapshot with pod anti-affinity", + node: TestNode{name: "node1"}, + pods: []TestPod{ + { + name: "pod1", + namespace: "default", + uid: "pod1", + nodeName: "node1", + affinity: &corev1.Affinity{ + PodAntiAffinity: &corev1.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "app", + Operator: metav1.LabelSelectorOpIn, + Values: []string{"web"}, + }, + }, + }, + TopologyKey: "kubernetes.io/hostname", + }, + }, + }, + }, + }, + }, + setup: func(t *testing.T, c Cache, node *corev1.Node, pods []*corev1.Pod) { + c.AddNode(node) + err := c.AddPod(pods[0]) + assert.NoError(t, err) + }, + verifySnapshot: func(t *testing.T, snapshot *Snapshot) { + assert.Equal(t, 1, len(snapshot.havePodsWithRequiredAntiAffinityNodeInfoList)) + }, + }, + { + name: "snapshot with PVC reference", + node: TestNode{name: "node1"}, + pods: []TestPod{ + { + name: "pod1", + namespace: "default", + uid: "pod1", + nodeName: "node1", + volumes: []corev1.Volume{ + { + Name: "pvc-volume", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "test-pvc", + }, + }, + }, + }, + }, + }, + setup: func(t *testing.T, c Cache, node *corev1.Node, pods []*corev1.Pod) { + c.AddNode(node) + err := c.AddPod(pods[0]) + assert.NoError(t, err) + }, + verifySnapshot: func(t *testing.T, snapshot *Snapshot) { + assert.True(t, snapshot.usedPVCSet.Has("default/test-pvc")) + }, + }, + { + name: "snapshot with deleted node", + node: TestNode{name: "node1"}, + pods: []TestPod{ + { + name: "pod1", + namespace: "default", + uid: "pod1", + nodeName: "node1", + }, + }, + setup: func(t *testing.T, c Cache, node *corev1.Node, pods []*corev1.Pod) { + c.AddNode(node) + err := c.AddPod(pods[0]) + assert.NoError(t, err) + snapshot1 := NewSnapshot(nil, nil) + err = c.UpdateSnapshot(snapshot1) + assert.NoError(t, err) + + // Remove node and verify that removeDeletedNodesFromSnapshot is triggered + err = c.RemoveNode(node) + assert.NoError(t, err) + }, + verifySnapshot: func(t *testing.T, snapshot *Snapshot) { + assert.Equal(t, 0, len(snapshot.nodeInfoMap)) + }, + }, + { + name: "snapshot consistency check", + node: TestNode{name: "node1"}, + setup: func(_ *testing.T, _ Cache, _ *corev1.Node, _ []*corev1.Pod) { + // Don't add the node, this will create an inconsistent state + }, + verifySnapshot: func(t *testing.T, snapshot *Snapshot) { + // The snapshot should be empty but consistent + assert.Equal(t, 0, len(snapshot.nodeInfoMap)) + assert.Equal(t, 0, len(snapshot.nodeInfoList)) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cache := newCache(time.Hour, time.Second, nil) + node := newTestNode(tt.node) + pods := make([]*corev1.Pod, len(tt.pods)) + for i, p := range tt.pods { + pods[i] = newTestPod(p) + } + + tt.setup(t, cache, node, pods) + + snapshot := NewSnapshot(nil, nil) + err := cache.UpdateSnapshot(snapshot) + assert.NoError(t, err) + + tt.verifySnapshot(t, snapshot) + }) + } +} + +func TestUpdateNodeInfoSnapshotList(t *testing.T) { + node1 := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node1", + }, + } + node2 := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node2", + }, + } + + // Create a cache + cache := newCache(time.Hour, time.Second, nil) + cache.nodeTree.addNode(node1) + cache.nodeTree.addNode(node2) + + // Create and populate nodeInfoMap in snapshot + snapshot := NewSnapshot(nil, nil) + snapshot.nodeInfoMap = make(map[string]*framework.NodeInfo) + + // Create NodeInfos + info1 := framework.NewNodeInfo() + info2 := framework.NewNodeInfo() + info1.SetNode(node1) + info2.SetNode(node2) + + // Add them to snapshot + snapshot.nodeInfoMap["node1"] = info1 + snapshot.nodeInfoMap["node2"] = info2 + + cache.updateNodeInfoSnapshotList(snapshot, true) + + assert.Equal(t, 2, len(snapshot.nodeInfoList), "should have both nodes in list") + assert.Equal(t, 0, len(snapshot.havePodsWithAffinityNodeInfoList), "should have no nodes with affinity") + assert.Equal(t, 0, len(snapshot.havePodsWithRequiredAntiAffinityNodeInfoList), "should have no nodes with anti-affinity") + assert.Empty(t, snapshot.usedPVCSet, "should have no PVC references") +} + +func TestRemoveDeletedNodesFromSnapshot(t *testing.T) { + cache := newCache(time.Hour, time.Second, nil) + + node1 := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node1", + }, + } + node2 := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node2", + }, + } + + // Set up the cache nodeTree + cache.nodeTree.addNode(node1) + cache.nodeTree.addNode(node2) + + // Create node infos in cache + nodeInfo1 := framework.NewNodeInfo() + nodeInfo2 := framework.NewNodeInfo() + nodeInfo1.SetNode(node1) + nodeInfo2.SetNode(node2) + cache.nodes["node1"] = newNodeInfoListItem(nodeInfo1) + cache.nodes["node2"] = newNodeInfoListItem(nodeInfo2) + + // Create snapshot with both nodes + snapshot := NewSnapshot(nil, nil) + snapshot.nodeInfoMap = map[string]*framework.NodeInfo{ + "node1": nodeInfo1.Clone(), + "node2": nodeInfo2.Clone(), + } + + // Initial state check + assert.Equal(t, 2, len(snapshot.nodeInfoMap), "should start with two nodes in snapshot") + + // Remove node1 from cache + delete(cache.nodes, "node1") + err := cache.nodeTree.removeNode(node1) + require.NoError(t, err) + + // Run the function being tested + cache.removeDeletedNodesFromSnapshot(snapshot) + + // Verify the results + assert.Equal(t, 1, len(snapshot.nodeInfoMap), "should have one node after removal") + _, exists := snapshot.nodeInfoMap["node2"] + assert.True(t, exists, "node2 should still exist in snapshot") + _, exists = snapshot.nodeInfoMap["node1"] + assert.False(t, exists, "node1 should be removed from snapshot") +} + +func TestCacheImageStates(t *testing.T) { + tests := []struct { + name string + node TestNode + verify func(*testing.T, Cache) + }{ + { + name: "add node with images", + node: TestNode{ + name: "node1", + images: []corev1.ContainerImage{ + { + Names: []string{"test-image:latest"}, + SizeBytes: 1000, + }, + }, + }, + verify: func(t *testing.T, c Cache) { + nodeInfo := c.AddNode(newTestNode(TestNode{ + name: "node1", + images: []corev1.ContainerImage{ + { + Names: []string{"test-image:latest"}, + SizeBytes: 1000, + }, + }, + })) + + require.NotNil(t, nodeInfo) + assert.Equal(t, 1, len(nodeInfo.ImageStates)) + + imageState, ok := nodeInfo.ImageStates["test-image:latest"] + assert.True(t, ok) + assert.Equal(t, int64(1000), imageState.Size) + assert.Equal(t, 1, imageState.NumNodes) + }, + }, + { + name: "remove node images", + node: TestNode{ + name: "node1", + images: []corev1.ContainerImage{ + { + Names: []string{"test-image:latest"}, + SizeBytes: 1000, + }, + }, + }, + verify: func(t *testing.T, c Cache) { + node := newTestNode(TestNode{ + name: "node1", + images: []corev1.ContainerImage{ + { + Names: []string{"test-image:latest"}, + SizeBytes: 1000, + }, + }, + }) + + c.AddNode(node) + + err := c.RemoveNode(node) + require.NoError(t, err, "failed to remove node") + + // Verify imageStates is empty + impl := c.(*cacheImpl) + assert.Equal(t, 0, len(impl.imageStates)) + }, + }, + { + name: "update node images", + node: TestNode{ + name: "node1", + images: []corev1.ContainerImage{ + { + Names: []string{"test-image:v1"}, + SizeBytes: 1000, + }, + }, + }, + verify: func(t *testing.T, c Cache) { + oldNode := newTestNode(TestNode{ + name: "node1", + images: []corev1.ContainerImage{ + { + Names: []string{"test-image:v1"}, + SizeBytes: 1000, + }, + }, + }) + + newNode := oldNode.DeepCopy() + newNode.Status.Images = []corev1.ContainerImage{ + { + Names: []string{"test-image:v2"}, + SizeBytes: 2000, + }, + } + + c.AddNode(oldNode) + + nodeInfo := c.UpdateNode(oldNode, newNode) + + // Verify new image state + require.NotNil(t, nodeInfo) + assert.Equal(t, 1, len(nodeInfo.ImageStates)) + + imageState, ok := nodeInfo.ImageStates["test-image:v2"] + assert.True(t, ok) + assert.Equal(t, int64(2000), imageState.Size) + assert.Equal(t, 1, imageState.NumNodes) + + // Verify old image was removed + _, ok = nodeInfo.ImageStates["test-image:v1"] + assert.False(t, ok) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cache := newCache(time.Second, time.Second, nil) + tt.verify(t, cache) + }) + } +} + +func TestCacheAssumedPodExpiration(t *testing.T) { + tests := []struct { + name string + ttl time.Duration + setup func(*testing.T, Cache, *corev1.Node, *corev1.Pod) + validate func(*testing.T, Cache, *corev1.Pod) + }{ + { + name: "assumed pod expires", + ttl: time.Second, + setup: func(t *testing.T, c Cache, node *corev1.Node, pod *corev1.Pod) { + c.AddNode(node) + err := c.AssumePod(pod) + require.NoError(t, err) + + err = c.FinishBinding(pod) + require.NoError(t, err) + + // Wait for TTL to expire and add extra time for cleanup + time.Sleep(2 * time.Second) + + // Trigger cleanup explicitly + impl := c.(*cacheImpl) + now := time.Now() + impl.cleanupAssumedPods(now) + }, + validate: func(t *testing.T, c Cache, pod *corev1.Pod) { + assumed, err := c.IsAssumedPod(pod) + assert.NoError(t, err) + assert.False(t, assumed, "pod should no longer be assumed after expiration") + + _, err = c.GetPod(pod) + assert.Error(t, err, "pod should be removed from cache after expiration") + }, + }, + { + name: "assumed pod does not expire with zero TTL", + ttl: 0, + setup: func(t *testing.T, c Cache, node *corev1.Node, pod *corev1.Pod) { + c.AddNode(node) + err := c.AssumePod(pod) + require.NoError(t, err) + + err = c.FinishBinding(pod) + require.NoError(t, err) + + // Wait some time + time.Sleep(2 * time.Second) + + // Trigger cleanup explicitly + impl := c.(*cacheImpl) + now := time.Now() + impl.cleanupAssumedPods(now) + }, + validate: func(t *testing.T, c Cache, pod *corev1.Pod) { + assumed, err := c.IsAssumedPod(pod) + assert.NoError(t, err) + assert.True(t, assumed, "pod should remain assumed with zero TTL") + + foundPod, err := c.GetPod(pod) + assert.NoError(t, err) + assert.Equal(t, pod.UID, foundPod.UID) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create stop channel to prevent background cleanup + stop := make(chan struct{}) + defer close(stop) + + cache := newCache(tt.ttl, time.Second, stop) + node := newTestNode(TestNode{name: "node1"}) + pod := newTestPod(TestPod{ + name: "pod1", + namespace: "default", + uid: "pod1", + nodeName: "node1", + }) + + tt.setup(t, cache, node, pod) + tt.validate(t, cache, pod) + }) + } +} + +func TestCacheDump(t *testing.T) { + cache := newCache(time.Second, time.Second, nil) + + node := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-node", + }, + } + cache.AddNode(node) + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "test-pod", + UID: types.UID("test-pod"), + }, + Spec: corev1.PodSpec{ + NodeName: "test-node", + }, + } + _ = cache.AssumePod(pod) + + dump := cache.Dump() + assert.Len(t, dump.Nodes, 1, "Expected 1 node in dump") + assert.Len(t, dump.AssumedPods, 1, "Expected 1 assumed pod in dump") +} + +func TestNew(t *testing.T) { + tests := []struct { + name string + ttl time.Duration + verify func(*testing.T, Cache) + }{ + { + name: "create cache with zero TTL", + ttl: 0, + verify: func(t *testing.T, c Cache) { + impl := c.(*cacheImpl) + assert.Equal(t, time.Duration(0), impl.ttl) + assert.Equal(t, cleanAssumedPeriod, impl.period) + assert.NotNil(t, impl.stop) + }, + }, + { + name: "create cache with non-zero TTL", + ttl: 5 * time.Minute, + verify: func(t *testing.T, c Cache) { + impl := c.(*cacheImpl) + assert.Equal(t, 5*time.Minute, impl.ttl) + assert.Equal(t, cleanAssumedPeriod, impl.period) + assert.NotNil(t, impl.stop) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + stop := make(chan struct{}) + defer close(stop) + + cache := New(tt.ttl, stop) + tt.verify(t, cache) + }) + } +} + +func TestCacheRun(t *testing.T) { + tests := []struct { + name string + setup func(*testing.T, *cacheImpl) + validate func(*testing.T, *cacheImpl) + }{ + { + name: "cleanup of expired assumed pods", + setup: func(t *testing.T, c *cacheImpl) { + node := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node1", + }, + } + c.AddNode(node) + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "default", + UID: "test-pod-uid", + }, + Spec: corev1.PodSpec{ + NodeName: "node1", + }, + } + + err := c.AssumePod(pod) + assert.NoError(t, err) + + // Mark it as finished binding + err = c.FinishBinding(pod) + assert.NoError(t, err) + + assumed, err := c.IsAssumedPod(pod) + assert.NoError(t, err) + assert.True(t, assumed) + }, + validate: func(t *testing.T, c *cacheImpl) { + // Wait for cleanup to happen + time.Sleep(2 * time.Second) + + // Count pods - should be 0 after cleanup + count, err := c.PodCount() + assert.NoError(t, err) + assert.Equal(t, 0, count) + + // Verify assumedPods is empty + assert.Equal(t, 0, len(c.assumedPods)) + }, + }, + { + name: "non-expired assumed pods remain", + setup: func(t *testing.T, c *cacheImpl) { + node := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node1", + }, + } + c.AddNode(node) + + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "default", + UID: "test-pod-uid", + }, + Spec: corev1.PodSpec{ + NodeName: "node1", + }, + } + + err := c.AssumePod(pod) + assert.NoError(t, err) + + // Mark it as finished binding but don't expire it + err = c.FinishBinding(pod) + assert.NoError(t, err) + + // Set a future deadline + future := time.Now().Add(1 * time.Hour) + c.podStates[string(pod.UID)].deadline = &future + }, + validate: func(t *testing.T, c *cacheImpl) { + // Wait for potential cleanup + time.Sleep(2 * time.Second) + + // Count should still be 1 + count, err := c.PodCount() + assert.NoError(t, err) + assert.Equal(t, 1, count) + + // Should still be in assumedPods + assert.Equal(t, 1, len(c.assumedPods)) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + stop := make(chan struct{}) + cache := newCache(1*time.Second, 500*time.Millisecond, stop) + + tt.setup(t, cache) + + cache.run() + + tt.validate(t, cache) + + // Cleanup + close(stop) + }) + } +} + +// Helper Functions + +func newTestNode(config TestNode) *corev1.Node { + return &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: config.name, + Labels: config.labels, + }, + Status: corev1.NodeStatus{ + Images: config.images, + Allocatable: config.resources, + }, + } +} + +func newTestPod(config TestPod) *corev1.Pod { + return &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: config.name, + Namespace: config.namespace, + UID: types.UID(config.uid), + Labels: config.labels, + }, + Spec: corev1.PodSpec{ + NodeName: config.nodeName, + Volumes: config.volumes, + Affinity: config.affinity, + }, + } +} diff --git a/pkg/util/lifted/scheduler/cache/snapshot_test.go b/pkg/util/lifted/scheduler/cache/snapshot_test.go new file mode 100644 index 000000000000..0cb5f1b3372e --- /dev/null +++ b/pkg/util/lifted/scheduler/cache/snapshot_test.go @@ -0,0 +1,493 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "testing" + + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestNewEmptySnapshot(t *testing.T) { + t.Run("verify empty snapshot initialization", func(t *testing.T) { + s := NewEmptySnapshot() + assert.NotNil(t, s) + assert.NotNil(t, s.nodeInfoMap) + assert.NotNil(t, s.usedPVCSet) + assert.Empty(t, s.nodeInfoList) + assert.Empty(t, s.havePodsWithAffinityNodeInfoList) + assert.Empty(t, s.havePodsWithRequiredAntiAffinityNodeInfoList) + }) +} + +func TestNewSnapshot(t *testing.T) { + tests := []struct { + name string + nodes []*corev1.Node + pods []*corev1.Pod + expectedNodeCount int + expectedPVCCount int + expectedAffinityCount int + expectedAntiAffinityCount int + expectedImageCount int + }{ + { + name: "empty snapshot", + expectedNodeCount: 0, + expectedPVCCount: 0, + expectedAffinityCount: 0, + expectedAntiAffinityCount: 0, + expectedImageCount: 0, + }, + { + name: "single node without pods", + nodes: []*corev1.Node{ + makeNode("node1", makeResourceList(1000, 2000)), + }, + expectedNodeCount: 1, + expectedPVCCount: 0, + expectedAffinityCount: 0, + expectedAntiAffinityCount: 0, + expectedImageCount: 0, + }, + { + name: "multiple nodes with various pod configurations", + nodes: []*corev1.Node{ + makeNode("node1", makeResourceList(1000, 2000)), + makeNode("node2", makeResourceList(2000, 4000)), + }, + pods: []*corev1.Pod{ + makePod("pod1", "node1", true, false), // with affinity + makePod("pod2", "node2", false, true), // with anti-affinity + makePodWithPVC("pod3", "node1", "pvc1"), + }, + expectedNodeCount: 2, + expectedPVCCount: 1, + expectedAffinityCount: 2, + expectedAntiAffinityCount: 1, + expectedImageCount: 0, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s := NewSnapshot(tt.pods, tt.nodes) + assert.NotNil(t, s) + assert.Equal(t, tt.expectedNodeCount, len(s.nodeInfoMap)) + assert.Equal(t, tt.expectedNodeCount, len(s.nodeInfoList)) + assert.Equal(t, tt.expectedPVCCount, s.usedPVCSet.Len()) + + affinityList, err := s.HavePodsWithAffinityList() + assert.NoError(t, err) + assert.Equal(t, tt.expectedAffinityCount, len(affinityList)) + + antiAffinityList, err := s.HavePodsWithRequiredAntiAffinityList() + assert.NoError(t, err) + assert.Equal(t, tt.expectedAntiAffinityCount, len(antiAffinityList)) + + if tt.expectedImageCount > 0 { + nodeInfo, err := s.Get("node1") + assert.NoError(t, err) + assert.Equal(t, tt.expectedImageCount, len(nodeInfo.ImageStates)) + } + }) + } +} + +func TestNodeInfoOperations(t *testing.T) { + tests := []struct { + name string + nodes []*corev1.Node + pods []*corev1.Pod + operations func(t *testing.T, s *Snapshot) + }{ + { + name: "basic node operations", + nodes: []*corev1.Node{ + makeNode("node1", makeResourceList(1000, 2000)), + }, + pods: []*corev1.Pod{ + makePod("pod1", "node1", false, false), + }, + operations: func(t *testing.T, s *Snapshot) { + nodeInfos := s.NodeInfos() + assert.NotNil(t, nodeInfos) + + // Test List + list, err := nodeInfos.List() + assert.NoError(t, err) + assert.Equal(t, 1, len(list)) + + // Test Get existing node + info, err := nodeInfos.Get("node1") + assert.NoError(t, err) + assert.NotNil(t, info) + assert.Equal(t, "node1", info.Node().Name) + + // Test Get non-existent node + info, err = nodeInfos.Get("node-non-existent") + assert.Error(t, err) + assert.Nil(t, info) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s := NewSnapshot(tt.pods, tt.nodes) + tt.operations(t, s) + }) + } +} + +func TestPVCOperations(t *testing.T) { + tests := []struct { + name string + nodes []*corev1.Node + pods []*corev1.Pod + checks func(t *testing.T, s *Snapshot) + }{ + { + name: "PVC tracking scenarios", + nodes: []*corev1.Node{ + makeNode("node1", makeResourceList(1000, 2000)), + }, + pods: []*corev1.Pod{ + makePodWithPVC("scheduled-pod", "node1", "pvc1"), + makePodWithPVC("unscheduled-pod", "", "pvc2"), + makePodWithoutPVC("pod-without-pvc", "node1"), + }, + checks: func(t *testing.T, s *Snapshot) { + assert.True(t, s.IsPVCUsedByPods("default/pvc1")) + assert.False(t, s.IsPVCUsedByPods("default/pvc2")) + assert.False(t, s.IsPVCUsedByPods("default/non-existent")) + assert.Equal(t, 1, s.usedPVCSet.Len()) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s := NewSnapshot(tt.pods, tt.nodes) + tt.checks(t, s) + }) + } +} + +func TestImageHandling(t *testing.T) { + tests := []struct { + name string + nodes []*corev1.Node + expectedImageStates map[string]struct { + size int64 + numNodes int + } + }{ + { + name: "single node with multiple image tags", + nodes: []*corev1.Node{ + makeNodeWithImages("node1", makeResourceList(1000, 2000), []corev1.ContainerImage{ + { + Names: []string{"image1:latest", "image1:v1"}, + SizeBytes: 1000, + }, + }), + }, + expectedImageStates: map[string]struct { + size int64 + numNodes int + }{ + "image1:latest": {1000, 1}, + "image1:v1": {1000, 1}, + }, + }, + { + name: "multiple nodes with same image", + nodes: []*corev1.Node{ + makeNodeWithImages("node1", makeResourceList(1000, 2000), []corev1.ContainerImage{ + { + Names: []string{"image1:latest"}, + SizeBytes: 1000, + }, + }), + makeNodeWithImages("node2", makeResourceList(1000, 2000), []corev1.ContainerImage{ + { + Names: []string{"image1:latest"}, + SizeBytes: 1000, + }, + }), + }, + expectedImageStates: map[string]struct { + size int64 + numNodes int + }{ + "image1:latest": {1000, 2}, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s := NewSnapshot(nil, tt.nodes) + nodeInfo, err := s.Get(tt.nodes[0].Name) + assert.NoError(t, err) + + for imageName, expected := range tt.expectedImageStates { + state := nodeInfo.ImageStates[imageName] + assert.Equal(t, expected.size, state.Size) + assert.Equal(t, expected.numNodes, state.NumNodes) + } + }) + } +} + +func TestStorageInfos(t *testing.T) { + tests := []struct { + name string + nodes []*corev1.Node + pods []*corev1.Pod + pvcChecks map[string]bool // map of PVC keys to expected existence + }{ + { + name: "empty snapshot", + pvcChecks: map[string]bool{ + "default/non-existent-pvc": false, + }, + }, + { + name: "snapshot with PVCs", + nodes: []*corev1.Node{ + makeNode("node1", makeResourceList(1000, 2000)), + }, + pods: []*corev1.Pod{ + makePodWithPVC("pod1", "node1", "pvc1"), + makePodWithPVC("pod2", "node1", "pvc2"), + makePodWithPVC("pod3", "", "pvc3"), // Unscheduled pod + }, + pvcChecks: map[string]bool{ + "default/pvc1": true, + "default/pvc2": true, + "default/pvc3": false, // false as pod is unscheduled + "default/non-existent-pvc": false, + }, + }, + { + name: "snapshot with mixed volume types", + nodes: []*corev1.Node{ + makeNode("node1", makeResourceList(1000, 2000)), + }, + pods: []*corev1.Pod{ + makePodWithPVC("pod1", "node1", "pvc1"), + makePodWithoutPVC("pod2", "node1"), + makePodWithPVC("pod3", "node1", "pvc3"), + }, + pvcChecks: map[string]bool{ + "default/pvc1": true, + "default/pvc3": true, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s := NewSnapshot(tt.pods, tt.nodes) + storageInfos := s.StorageInfos() + + assert.NotNil(t, storageInfos, "StorageInfos should not return nil") + + // Check PVC existence + for pvcKey, shouldExist := range tt.pvcChecks { + exists := storageInfos.IsPVCUsedByPods(pvcKey) + assert.Equal(t, shouldExist, exists, "PVC %s existence mismatch", pvcKey) + } + }) + } +} + +func TestNumNodes(t *testing.T) { + tests := []struct { + name string + nodes []*corev1.Node + pods []*corev1.Pod + expectedNumNodes int + }{ + { + name: "empty snapshot", + expectedNumNodes: 0, + }, + { + name: "single node", + nodes: []*corev1.Node{ + makeNode("node1", makeResourceList(1000, 2000)), + }, + expectedNumNodes: 1, + }, + { + name: "multiple nodes", + nodes: []*corev1.Node{ + makeNode("node1", makeResourceList(1000, 2000)), + makeNode("node2", makeResourceList(2000, 4000)), + makeNode("node3", makeResourceList(3000, 6000)), + }, + expectedNumNodes: 3, + }, + { + name: "nodes with pods", + nodes: []*corev1.Node{ + makeNode("node1", makeResourceList(1000, 2000)), + makeNode("node2", makeResourceList(2000, 4000)), + }, + pods: []*corev1.Pod{ + makePod("pod1", "node1", false, false), + makePod("pod2", "node2", false, false), + }, + expectedNumNodes: 2, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s := NewSnapshot(tt.pods, tt.nodes) + + numNodes := s.NumNodes() + assert.Equal(t, tt.expectedNumNodes, numNodes, "NumNodes() returned incorrect count") + + assert.Equal(t, tt.expectedNumNodes, len(s.nodeInfoList), "nodeInfoList length mismatch") + + assert.Equal(t, tt.expectedNumNodes, len(s.nodeInfoMap), "nodeInfoMap length mismatch") + + // Verify List method returns same count + list, err := s.List() + assert.NoError(t, err) + assert.Equal(t, tt.expectedNumNodes, len(list), "List() returned incorrect count") + }) + } +} + +// Helper functions + +func makeNode(name string, res corev1.ResourceList) *corev1.Node { + return &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{Name: name}, + Status: corev1.NodeStatus{ + Capacity: res, + Allocatable: res, + }, + } +} + +func makeNodeWithImages(name string, res corev1.ResourceList, images []corev1.ContainerImage) *corev1.Node { + node := makeNode(name, res) + node.Status.Images = images + return node +} + +func makeResourceList(cpu, memory int64) corev1.ResourceList { + return corev1.ResourceList{ + corev1.ResourceCPU: *resource.NewMilliQuantity(cpu, resource.DecimalSI), + corev1.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI), + } +} + +func makePod(name, nodeName string, hasAffinity, hasAntiAffinity bool) *corev1.Pod { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: "default", + }, + Spec: corev1.PodSpec{ + NodeName: nodeName, + }, + } + + if hasAffinity || hasAntiAffinity { + pod.Spec.Affinity = &corev1.Affinity{} + + if hasAffinity { + pod.Spec.Affinity.PodAffinity = &corev1.PodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"key": "value"}, + }, + TopologyKey: "kubernetes.io/hostname", + }, + }, + } + } + + if hasAntiAffinity { + pod.Spec.Affinity.PodAntiAffinity = &corev1.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"key": "value"}, + }, + TopologyKey: "kubernetes.io/hostname", + }, + }, + } + } + } + + return pod +} + +func makePodWithPVC(name, nodeName, pvcName string) *corev1.Pod { + return &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: "default", + }, + Spec: corev1.PodSpec{ + NodeName: nodeName, + Volumes: []corev1.Volume{ + { + Name: "vol1", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: pvcName, + }, + }, + }, + }, + }, + } +} + +func makePodWithoutPVC(name, nodeName string) *corev1.Pod { + return &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: "default", + }, + Spec: corev1.PodSpec{ + NodeName: nodeName, + Volumes: []corev1.Volume{ + { + Name: "vol1", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, + }, + }, + } +} diff --git a/pkg/util/lifted/scheduler/framework/types_test.go b/pkg/util/lifted/scheduler/framework/types_test.go new file mode 100644 index 000000000000..f01148a68606 --- /dev/null +++ b/pkg/util/lifted/scheduler/framework/types_test.go @@ -0,0 +1,1414 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + + "github.com/karmada-io/karmada/pkg/util" +) + +func TestClusterEventIsWildCard(t *testing.T) { + tests := []struct { + name string + event ClusterEvent + expected bool + }{ + { + name: "WildCard event", + event: ClusterEvent{Resource: WildCard, ActionType: All}, + expected: true, + }, + { + name: "Non-WildCard resource", + event: ClusterEvent{Resource: Pod, ActionType: All}, + expected: false, + }, + { + name: "Non-WildCard action", + event: ClusterEvent{Resource: WildCard, ActionType: Add}, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := tt.event.IsWildCard() + assert.Equal(t, tt.expected, result) + }) + } +} + +func TestQueuedPodInfoDeepCopy(t *testing.T) { + now := time.Now() + qpi := &QueuedPodInfo{ + PodInfo: &PodInfo{ + Pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + }, + }, + }, + Timestamp: now, + Attempts: 2, + InitialAttemptTimestamp: now.Add(-time.Hour), + UnschedulablePlugins: sets.NewString("plugin1", "plugin2"), + Gated: true, + } + podInfoCopy := qpi.DeepCopy() + assert.Equal(t, qpi.PodInfo.Pod.Name, podInfoCopy.PodInfo.Pod.Name) + assert.Equal(t, qpi.Timestamp, podInfoCopy.Timestamp) + assert.Equal(t, qpi.Attempts, podInfoCopy.Attempts) + assert.Equal(t, qpi.InitialAttemptTimestamp, podInfoCopy.InitialAttemptTimestamp) + assert.Equal(t, qpi.UnschedulablePlugins, podInfoCopy.UnschedulablePlugins) + assert.Equal(t, qpi.Gated, podInfoCopy.Gated) +} + +func TestPodInfoDeepCopy(t *testing.T) { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "default", + UID: types.UID("test-uid"), + }, + } + + podInfo, _ := NewPodInfo(pod) + copiedPodInfo := podInfo.DeepCopy() + + assert.NotSame(t, podInfo, copiedPodInfo) + assert.Equal(t, podInfo.Pod.Name, copiedPodInfo.Pod.Name) + assert.Equal(t, podInfo.Pod.Namespace, copiedPodInfo.Pod.Namespace) + assert.Equal(t, podInfo.Pod.UID, copiedPodInfo.Pod.UID) +} + +func TestPodInfoUpdate(t *testing.T) { + originalPod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "default", + UID: types.UID("test-uid"), + }, + } + + updatedPod := originalPod.DeepCopy() + updatedPod.Spec.NodeName = "node-1" + + podInfo, _ := NewPodInfo(originalPod) + err := podInfo.Update(updatedPod) + + assert.NoError(t, err) + assert.Equal(t, updatedPod, podInfo.Pod) + assert.Equal(t, "node-1", podInfo.Pod.Spec.NodeName) +} + +func TestAffinityTermMatches(t *testing.T) { + tests := []struct { + name string + affinityTerm AffinityTerm + pod *corev1.Pod + nsLabels labels.Set + expectedMatches bool + }{ + { + name: "Matches namespace and labels", + affinityTerm: AffinityTerm{ + Namespaces: sets.NewString("test-ns"), + Selector: labels.SelectorFromSet(labels.Set{"app": "web"}), + TopologyKey: "kubernetes.io/hostname", + NamespaceSelector: labels.Nothing(), + }, + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test-ns", + Labels: map[string]string{"app": "web"}, + }, + }, + nsLabels: labels.Set{}, + expectedMatches: true, + }, + { + name: "Matches namespace selector", + affinityTerm: AffinityTerm{ + Namespaces: sets.NewString(), + NamespaceSelector: labels.SelectorFromSet(labels.Set{"env": "prod"}), + Selector: labels.SelectorFromSet(labels.Set{"app": "db"}), + TopologyKey: "kubernetes.io/hostname", + }, + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "prod-ns", + Labels: map[string]string{"app": "db"}, + }, + }, + nsLabels: labels.Set{"env": "prod"}, + expectedMatches: true, + }, + { + name: "Does not match namespace", + affinityTerm: AffinityTerm{ + Namespaces: sets.NewString("test-ns"), + Selector: labels.SelectorFromSet(labels.Set{"app": "web"}), + TopologyKey: "kubernetes.io/hostname", + NamespaceSelector: labels.Nothing(), + }, + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "other-ns", + Labels: map[string]string{"app": "web"}, + }, + }, + nsLabels: labels.Set{}, + expectedMatches: false, + }, + { + name: "Does not match labels", + affinityTerm: AffinityTerm{ + Namespaces: sets.NewString("test-ns"), + Selector: labels.SelectorFromSet(labels.Set{"app": "web"}), + TopologyKey: "kubernetes.io/hostname", + NamespaceSelector: labels.Nothing(), + }, + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test-ns", + Labels: map[string]string{"app": "db"}, + }, + }, + nsLabels: labels.Set{}, + expectedMatches: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + matches := tt.affinityTerm.Matches(tt.pod, tt.nsLabels) + assert.Equal(t, tt.expectedMatches, matches, "Unexpected match result") + }) + } +} + +func TestGetAffinityTerms(t *testing.T) { + tests := []struct { + name string + pod *corev1.Pod + affinityTerms []corev1.PodAffinityTerm + expectedTerms int + expectedErrMsg string + }{ + { + name: "Valid affinity terms", + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test-ns", + }, + }, + affinityTerms: []corev1.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "web"}, + }, + TopologyKey: "kubernetes.io/hostname", + }, + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "db"}, + }, + TopologyKey: "kubernetes.io/zone", + }, + }, + expectedTerms: 2, + expectedErrMsg: "", + }, + { + name: "Invalid label selector", + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test-ns", + }, + }, + affinityTerms: []corev1.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "app", + Operator: "InvalidOperator", + Values: []string{"web"}, + }, + }, + }, + TopologyKey: "kubernetes.io/hostname", + }, + }, + expectedTerms: 0, + expectedErrMsg: "\"InvalidOperator\" is not a valid label selector operator", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + terms, err := getAffinityTerms(tt.pod, tt.affinityTerms) + + if tt.expectedErrMsg != "" { + assert.Error(t, err) + assert.Contains(t, err.Error(), tt.expectedErrMsg) + } else { + assert.NoError(t, err) + assert.Equal(t, tt.expectedTerms, len(terms), "Unexpected number of affinity terms") + } + }) + } +} + +func TestGetWeightedAffinityTerms(t *testing.T) { + tests := []struct { + name string + pod *corev1.Pod + affinityTerms []corev1.WeightedPodAffinityTerm + expectedTerms int + expectedErrMsg string + }{ + { + name: "Valid weighted affinity terms", + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test-ns", + }, + }, + affinityTerms: []corev1.WeightedPodAffinityTerm{ + { + Weight: 100, + PodAffinityTerm: corev1.PodAffinityTerm{ + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "web"}, + }, + TopologyKey: "kubernetes.io/hostname", + }, + }, + { + Weight: 50, + PodAffinityTerm: corev1.PodAffinityTerm{ + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "db"}, + }, + TopologyKey: "kubernetes.io/zone", + }, + }, + }, + expectedTerms: 2, + expectedErrMsg: "", + }, + { + name: "Invalid label selector", + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test-ns", + }, + }, + affinityTerms: []corev1.WeightedPodAffinityTerm{ + { + Weight: 100, + PodAffinityTerm: corev1.PodAffinityTerm{ + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "app", + Operator: "InvalidOperator", + Values: []string{"web"}, + }, + }, + }, + TopologyKey: "kubernetes.io/hostname", + }, + }, + }, + expectedTerms: 0, + expectedErrMsg: "\"InvalidOperator\" is not a valid label selector operator", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + terms, err := getWeightedAffinityTerms(tt.pod, tt.affinityTerms) + + if tt.expectedErrMsg != "" { + assert.Error(t, err) + assert.Contains(t, err.Error(), tt.expectedErrMsg) + } else { + assert.NoError(t, err) + assert.Equal(t, tt.expectedTerms, len(terms), "Unexpected number of weighted affinity terms") + for i, term := range terms { + assert.Equal(t, tt.affinityTerms[i].Weight, term.Weight, "Unexpected weight for term") + } + } + }) + } +} + +func TestNewPodInfo(t *testing.T) { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-pod", + Namespace: "default", + UID: types.UID("test-uid"), + }, + Spec: corev1.PodSpec{ + Affinity: &corev1.Affinity{ + PodAffinity: &corev1.PodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "web"}, + }, + TopologyKey: "kubernetes.io/hostname", + }, + }, + }, + }, + }, + } + + podInfo, err := NewPodInfo(pod) + + assert.NoError(t, err) + assert.NotNil(t, podInfo) + assert.Equal(t, pod, podInfo.Pod) + assert.Len(t, podInfo.RequiredAffinityTerms, 1) + assert.Equal(t, "kubernetes.io/hostname", podInfo.RequiredAffinityTerms[0].TopologyKey) +} + +func TestGetPodAffinityTerms(t *testing.T) { + affinity := &corev1.Affinity{ + PodAffinity: &corev1.PodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "web"}, + }, + TopologyKey: "kubernetes.io/hostname", + }, + }, + }, + } + + terms := getPodAffinityTerms(affinity) + + assert.Len(t, terms, 1) + assert.Equal(t, "kubernetes.io/hostname", terms[0].TopologyKey) +} + +func TestGetPodAntiAffinityTerms(t *testing.T) { + affinity := &corev1.Affinity{ + PodAntiAffinity: &corev1.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "db"}, + }, + TopologyKey: "kubernetes.io/hostname", + }, + }, + }, + } + + terms := getPodAntiAffinityTerms(affinity) + + assert.Len(t, terms, 1) + assert.Equal(t, "kubernetes.io/hostname", terms[0].TopologyKey) +} + +// disable `deprecation` check as sets.String is deprecated +// +//nolint:staticcheck +func TestGetNamespacesFromPodAffinityTerm(t *testing.T) { + tests := []struct { + name string + pod *corev1.Pod + podAffinityTerm *corev1.PodAffinityTerm + expectedNS sets.String + }{ + { + name: "No namespaces specified", + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Namespace: "default"}, + }, + podAffinityTerm: &corev1.PodAffinityTerm{}, + expectedNS: sets.String{"default": sets.Empty{}}, + }, + { + name: "Namespaces specified", + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Namespace: "default"}, + }, + podAffinityTerm: &corev1.PodAffinityTerm{ + Namespaces: []string{"ns1", "ns2"}, + }, + expectedNS: sets.String{"ns1": sets.Empty{}, "ns2": sets.Empty{}}, + }, + { + name: "Namespace selector specified", + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Namespace: "default"}, + }, + podAffinityTerm: &corev1.PodAffinityTerm{ + NamespaceSelector: &metav1.LabelSelector{}, + }, + expectedNS: sets.String{}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := getNamespacesFromPodAffinityTerm(tt.pod, tt.podAffinityTerm) + assert.Equal(t, tt.expectedNS, result) + }) + } +} + +func TestNewNodeInfo(t *testing.T) { + pods := []*corev1.Pod{ + { + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("200Mi"), + }, + }, + }, + }, + }, + }, + { + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("200m"), + corev1.ResourceMemory: resource.MustParse("300Mi"), + }, + }, + }, + }, + }, + }, + } + + ni := NewNodeInfo(pods...) + + assert.Equal(t, 2, len(ni.Pods), "Expected 2 pods in NodeInfo") + assert.Equal(t, int64(300), ni.Requested.MilliCPU, "Unexpected MilliCPU value") + assert.Equal(t, int64(500*1024*1024), ni.Requested.Memory, "Unexpected Memory value") + assert.NotNil(t, ni.UsedPorts, "UsedPorts should be initialized") + assert.NotNil(t, ni.ImageStates, "ImageStates should be initialized") + assert.NotNil(t, ni.PVCRefCounts, "PVCRefCounts should be initialized") +} + +func TestNodeInfoNode(t *testing.T) { + node := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-node", + }, + } + nodeInfo := &NodeInfo{ + node: node, + } + + assert.Equal(t, node, nodeInfo.Node()) + + nilNodeInfo := (*NodeInfo)(nil) + assert.Nil(t, nilNodeInfo.Node()) +} + +func TestNodeInfoClone(t *testing.T) { + originalNI := &NodeInfo{ + Requested: &util.Resource{MilliCPU: 100, Memory: 200}, + NonZeroRequested: &util.Resource{MilliCPU: 100, Memory: 200}, + Allocatable: &util.Resource{MilliCPU: 1000, Memory: 2000}, + Generation: 42, + UsedPorts: HostPortInfo{ + "0.0.0.0": {ProtocolPort{Protocol: "TCP", Port: 80}: {}}, + }, + ImageStates: map[string]*ImageStateSummary{"image1": {Size: 1000, NumNodes: 1}}, + PVCRefCounts: map[string]int{"pvc1": 1}, + } + + clonedNI := originalNI.Clone() + + assert.Equal(t, originalNI.Requested, clonedNI.Requested, "Cloned Requested should be equal") + assert.Equal(t, originalNI.NonZeroRequested, clonedNI.NonZeroRequested, "Cloned NonZeroRequested should be equal") + assert.Equal(t, originalNI.Allocatable, clonedNI.Allocatable, "Cloned Allocatable should be equal") + assert.Equal(t, originalNI.Generation, clonedNI.Generation, "Cloned Generation should be equal") + assert.Equal(t, originalNI.UsedPorts, clonedNI.UsedPorts, "Cloned UsedPorts should be equal") + assert.Equal(t, originalNI.ImageStates, clonedNI.ImageStates, "Cloned ImageStates should be equal") + assert.Equal(t, originalNI.PVCRefCounts, clonedNI.PVCRefCounts, "Cloned PVCRefCounts should be equal") + + // Verify that modifying the clone doesn't affect the original + clonedNI.Requested.MilliCPU = 200 + assert.NotEqual(t, originalNI.Requested.MilliCPU, clonedNI.Requested.MilliCPU, "Modifying clone should not affect original") +} + +func TestNodeInfoString(t *testing.T) { + nodeInfo := &NodeInfo{ + Requested: &util.Resource{ + MilliCPU: 1000, + Memory: 2048, + }, + NonZeroRequested: &util.Resource{ + MilliCPU: 1000, + Memory: 2048, + }, + Allocatable: &util.Resource{ + MilliCPU: 2000, + Memory: 4096, + }, + UsedPorts: HostPortInfo{ + "0.0.0.0": { + ProtocolPort{Protocol: "TCP", Port: 80}: {}, + }, + }, + } + + nodeInfoString := nodeInfo.String() + + assert.Contains(t, nodeInfoString, "RequestedResource:&util.Resource{MilliCPU:1000, Memory:2048") + assert.Contains(t, nodeInfoString, "NonZeroRequest: &util.Resource{MilliCPU:1000, Memory:2048") + assert.Contains(t, nodeInfoString, "UsedPort: framework.HostPortInfo{\"0.0.0.0\":map[framework.ProtocolPort]struct {}{framework.ProtocolPort{Protocol:\"TCP\", Port:80}:struct {}{}}}") + assert.Contains(t, nodeInfoString, "AllocatableResource:&util.Resource{MilliCPU:2000, Memory:4096") +} + +func TestNodeInfoAddPodInfo(t *testing.T) { + ni := NewNodeInfo() + pod := &corev1.Pod{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("200Mi"), + }, + }, + }, + }, + }, + } + podInfo, _ := NewPodInfo(pod) + + ni.AddPodInfo(podInfo) + + assert.Equal(t, 1, len(ni.Pods), "Expected 1 pod in NodeInfo") + assert.Equal(t, int64(100), ni.Requested.MilliCPU, "Unexpected MilliCPU value") + assert.Equal(t, int64(200*1024*1024), ni.Requested.Memory, "Unexpected Memory value") +} + +func TestPodWithAffinity(t *testing.T) { + tests := []struct { + name string + pod *corev1.Pod + expectedResult bool + }{ + { + name: "Pod with affinity", + pod: &corev1.Pod{ + Spec: corev1.PodSpec{ + Affinity: &corev1.Affinity{ + PodAffinity: &corev1.PodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "web"}, + }, + TopologyKey: "kubernetes.io/hostname", + }, + }, + }, + }, + }, + }, + expectedResult: true, + }, + { + name: "Pod with anti-affinity", + pod: &corev1.Pod{ + Spec: corev1.PodSpec{ + Affinity: &corev1.Affinity{ + PodAntiAffinity: &corev1.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "db"}, + }, + TopologyKey: "kubernetes.io/hostname", + }, + }, + }, + }, + }, + }, + expectedResult: true, + }, + { + name: "Pod without affinity", + pod: &corev1.Pod{ + Spec: corev1.PodSpec{}, + }, + expectedResult: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := podWithAffinity(tt.pod) + assert.Equal(t, tt.expectedResult, result, "Unexpected result for podWithAffinity") + }) + } +} + +func TestRemoveFromSlice(t *testing.T) { + pods := []*PodInfo{ + {Pod: &corev1.Pod{ObjectMeta: metav1.ObjectMeta{UID: "pod1"}}}, + {Pod: &corev1.Pod{ObjectMeta: metav1.ObjectMeta{UID: "pod2"}}}, + {Pod: &corev1.Pod{ObjectMeta: metav1.ObjectMeta{UID: "pod3"}}}, + } + + result := removeFromSlice(pods, "pod2") + + assert.Len(t, result, 2) + assert.Equal(t, "pod1", string(result[0].Pod.UID)) + assert.Equal(t, "pod3", string(result[1].Pod.UID)) +} + +func TestCalculateResource(t *testing.T) { + tests := []struct { + name string + pod *corev1.Pod + expectedRes util.Resource + expectedNon0CPU int64 + expectedNon0Mem int64 + }{ + { + name: "Pod with single container", + pod: &corev1.Pod{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("200Mi"), + }, + }, + }, + }, + }, + }, + expectedRes: util.Resource{ + MilliCPU: 100, + Memory: 200 * 1024 * 1024, + }, + expectedNon0CPU: 100, + expectedNon0Mem: 200 * 1024 * 1024, + }, + { + name: "Pod with multiple containers", + pod: &corev1.Pod{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("200Mi"), + }, + }, + }, + { + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("200m"), + corev1.ResourceMemory: resource.MustParse("300Mi"), + }, + }, + }, + }, + }, + }, + expectedRes: util.Resource{ + MilliCPU: 300, + Memory: 500 * 1024 * 1024, + }, + expectedNon0CPU: 300, + expectedNon0Mem: 500 * 1024 * 1024, + }, + { + name: "Pod with init container", + pod: &corev1.Pod{ + Spec: corev1.PodSpec{ + InitContainers: []corev1.Container{ + { + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("500m"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + }, + }, + }, + Containers: []corev1.Container{ + { + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("200Mi"), + }, + }, + }, + }, + }, + }, + expectedRes: util.Resource{ + MilliCPU: 500, + Memory: 1024 * 1024 * 1024, + }, + expectedNon0CPU: 500, + expectedNon0Mem: 1024 * 1024 * 1024, + }, + { + name: "Pod with overhead", + pod: &corev1.Pod{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("200Mi"), + }, + }, + }, + }, + Overhead: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("50m"), + corev1.ResourceMemory: resource.MustParse("100Mi"), + }, + }, + }, + expectedRes: util.Resource{ + MilliCPU: 150, + Memory: 300 * 1024 * 1024, + }, + expectedNon0CPU: 150, + expectedNon0Mem: 300 * 1024 * 1024, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + res, non0CPU, non0Mem := calculateResource(tt.pod) + assert.Equal(t, tt.expectedRes.MilliCPU, res.MilliCPU, "Unexpected MilliCPU value") + assert.Equal(t, tt.expectedRes.Memory, res.Memory, "Unexpected Memory value") + assert.Equal(t, tt.expectedNon0CPU, non0CPU, "Unexpected non-zero CPU value") + assert.Equal(t, tt.expectedNon0Mem, non0Mem, "Unexpected non-zero Memory value") + }) + } +} + +func TestNodeInfoUpdateUsedPorts(t *testing.T) { + tests := []struct { + name string + nodeInfo *NodeInfo + pod *corev1.Pod + add bool + expected HostPortInfo + }{ + { + name: "Add ports", + nodeInfo: &NodeInfo{ + UsedPorts: make(HostPortInfo), + }, + pod: &corev1.Pod{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Ports: []corev1.ContainerPort{ + {HostIP: "192.168.1.1", Protocol: "TCP", HostPort: 80}, + {HostIP: "192.168.1.1", Protocol: "UDP", HostPort: 53}, + }, + }, + }, + }, + }, + add: true, + expected: HostPortInfo{ + "192.168.1.1": { + ProtocolPort{Protocol: "TCP", Port: 80}: struct{}{}, + ProtocolPort{Protocol: "UDP", Port: 53}: struct{}{}, + }, + }, + }, + { + name: "Remove ports", + nodeInfo: &NodeInfo{ + UsedPorts: HostPortInfo{ + "192.168.1.1": { + ProtocolPort{Protocol: "TCP", Port: 80}: struct{}{}, + ProtocolPort{Protocol: "UDP", Port: 53}: struct{}{}, + }, + }, + }, + pod: &corev1.Pod{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Ports: []corev1.ContainerPort{ + {HostIP: "192.168.1.1", Protocol: "TCP", HostPort: 80}, + }, + }, + }, + }, + }, + add: false, + expected: HostPortInfo{ + "192.168.1.1": { + ProtocolPort{Protocol: "UDP", Port: 53}: struct{}{}, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tt.nodeInfo.updateUsedPorts(tt.pod, tt.add) + assert.Equal(t, tt.expected, tt.nodeInfo.UsedPorts, "Unexpected UsedPorts") + }) + } +} + +func TestNodeInfoUpdatePVCRefCounts(t *testing.T) { + ni := &NodeInfo{ + PVCRefCounts: make(map[string]int), + } + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + }, + Spec: corev1.PodSpec{ + Volumes: []corev1.Volume{ + { + Name: "vol1", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "pvc1", + }, + }, + }, + { + Name: "vol2", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: "pvc2", + }, + }, + }, + }, + }, + } + + // Test adding PVC references + ni.updatePVCRefCounts(pod, true) + assert.Equal(t, 1, ni.PVCRefCounts["default/pvc1"]) + assert.Equal(t, 1, ni.PVCRefCounts["default/pvc2"]) + + // Test removing PVC references + ni.updatePVCRefCounts(pod, false) + assert.NotContains(t, ni.PVCRefCounts, "default/pvc1") + assert.NotContains(t, ni.PVCRefCounts, "default/pvc2") +} + +func TestNodeInfoSetNode(t *testing.T) { + ni := &NodeInfo{} + node := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-node", + }, + Status: corev1.NodeStatus{ + Allocatable: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("4Gi"), + }, + }, + } + + ni.SetNode(node) + + assert.Equal(t, node, ni.node) + assert.Equal(t, int64(2000), ni.Allocatable.MilliCPU) + assert.Equal(t, int64(4*1024*1024*1024), ni.Allocatable.Memory) + assert.NotZero(t, ni.Generation) +} + +func TestNodeInfoRemoveNode(t *testing.T) { + node := &corev1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-node", + }, + } + ni := &NodeInfo{ + node: node, + } + + oldGeneration := ni.Generation + ni.RemoveNode() + + assert.Nil(t, ni.node) + assert.NotEqual(t, oldGeneration, ni.Generation) +} + +func TestGetPodKey(t *testing.T) { + tests := []struct { + name string + pod *corev1.Pod + expectedKey string + expectError bool + }{ + { + name: "Valid pod with UID", + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + UID: "123456789", + }, + }, + expectedKey: "123456789", + expectError: false, + }, + { + name: "Pod without UID", + pod: &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{}, + }, + expectedKey: "", + expectError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + key, err := GetPodKey(tt.pod) + + if tt.expectError { + assert.Error(t, err, "Expected an error for pod without UID") + } else { + assert.NoError(t, err, "Did not expect an error for valid pod") + assert.Equal(t, tt.expectedKey, key, "Unexpected pod key") + } + }) + } +} + +func TestGetNamespacedName(t *testing.T) { + tests := []struct { + testName string + namespace string + resourceName string + expected string + }{ + { + testName: "Valid namespace and name", + namespace: "default", + resourceName: "my-pod", + expected: "default/my-pod", + }, + { + testName: "Empty namespace", + namespace: "", + resourceName: "my-pod", + expected: "/my-pod", + }, + { + testName: "Empty name", + namespace: "default", + resourceName: "", + expected: "default/", + }, + } + + for _, tt := range tests { + t.Run(tt.testName, func(t *testing.T) { + result := GetNamespacedName(tt.namespace, tt.resourceName) + assert.Equal(t, tt.expected, result, "Unexpected namespaced name") + }) + } +} + +func TestNodeInfoRemovePod(t *testing.T) { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + UID: types.UID("test-pod-uid"), + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("200Mi"), + }, + }, + }, + }, + }, + } + + ni := NewNodeInfo() + ni.AddPod(pod) + + // Verify that the pod was added correctly + assert.Equal(t, 1, len(ni.Pods), "Expected 1 pod in NodeInfo before removal") + assert.Equal(t, int64(100), ni.Requested.MilliCPU, "Unexpected MilliCPU value before removal") + assert.Equal(t, int64(200*1024*1024), ni.Requested.Memory, "Unexpected Memory value before removal") + + err := ni.RemovePod(pod) + + assert.NoError(t, err, "RemovePod should not return an error") + assert.Equal(t, 0, len(ni.Pods), "Expected 0 pods in NodeInfo after removal") + assert.Equal(t, int64(0), ni.Requested.MilliCPU, "MilliCPU should be 0 after pod removal") + assert.Equal(t, int64(0), ni.Requested.Memory, "Memory should be 0 after pod removal") +} + +func TestNewProtocolPort(t *testing.T) { + tests := []struct { + name string + protocol string + port int32 + expected ProtocolPort + }{ + { + name: "TCP protocol", + protocol: "TCP", + port: 80, + expected: ProtocolPort{Protocol: "TCP", Port: 80}, + }, + { + name: "UDP protocol", + protocol: "UDP", + port: 53, + expected: ProtocolPort{Protocol: "UDP", Port: 53}, + }, + { + name: "Empty protocol defaults to TCP", + protocol: "", + port: 8080, + expected: ProtocolPort{Protocol: "TCP", Port: 8080}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := NewProtocolPort(tt.protocol, tt.port) + assert.Equal(t, tt.expected, *result, "NewProtocolPort(%s, %d) returned unexpected result", tt.protocol, tt.port) + }) + } +} + +func TestHostPortInfoAdd(t *testing.T) { + tests := []struct { + name string + initial HostPortInfo + ip string + protocol string + port int32 + expected HostPortInfo + }{ + { + name: "Add new IP and port", + initial: HostPortInfo{}, + ip: "192.168.1.1", + protocol: "TCP", + port: 80, + expected: HostPortInfo{ + "192.168.1.1": { + ProtocolPort{Protocol: "TCP", Port: 80}: {}, + }, + }, + }, + { + name: "Add to existing IP", + initial: HostPortInfo{ + "192.168.1.1": { + ProtocolPort{Protocol: "TCP", Port: 80}: {}, + }, + }, + ip: "192.168.1.1", + protocol: "UDP", + port: 53, + expected: HostPortInfo{ + "192.168.1.1": { + ProtocolPort{Protocol: "TCP", Port: 80}: {}, + ProtocolPort{Protocol: "UDP", Port: 53}: {}, + }, + }, + }, + { + name: "Add with empty protocol", + initial: HostPortInfo{}, + ip: "10.0.0.1", + protocol: "", + port: 8080, + expected: HostPortInfo{ + "10.0.0.1": { + ProtocolPort{Protocol: "TCP", Port: 8080}: {}, + }, + }, + }, + { + name: "Add with empty IP", + initial: HostPortInfo{}, + ip: "", + protocol: "TCP", + port: 443, + expected: HostPortInfo{ + "0.0.0.0": { + ProtocolPort{Protocol: "TCP", Port: 443}: {}, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + h := tt.initial + h.Add(tt.ip, tt.protocol, tt.port) + + assert.Equal(t, tt.expected, h, "HostPortInfo.Add() resulted in unexpected state") + }) + } +} + +func TestHostPortInfoRemove(t *testing.T) { + tests := []struct { + name string + initial HostPortInfo + ip string + protocol string + port int32 + expected HostPortInfo + }{ + { + name: "Remove existing entry", + initial: HostPortInfo{ + "192.168.1.1": { + ProtocolPort{Protocol: "TCP", Port: 80}: {}, + ProtocolPort{Protocol: "UDP", Port: 53}: {}, + }, + }, + ip: "192.168.1.1", + protocol: "TCP", + port: 80, + expected: HostPortInfo{ + "192.168.1.1": { + ProtocolPort{Protocol: "UDP", Port: 53}: {}, + }, + }, + }, + { + name: "Remove last entry for IP", + initial: HostPortInfo{ + "192.168.1.1": { + ProtocolPort{Protocol: "TCP", Port: 80}: {}, + }, + }, + ip: "192.168.1.1", + protocol: "TCP", + port: 80, + expected: HostPortInfo{}, + }, + { + name: "Remove non-existent entry", + initial: HostPortInfo{ + "192.168.1.1": { + ProtocolPort{Protocol: "TCP", Port: 80}: {}, + }, + }, + ip: "192.168.1.1", + protocol: "UDP", + port: 53, + expected: HostPortInfo{ + "192.168.1.1": { + ProtocolPort{Protocol: "TCP", Port: 80}: {}, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + h := tt.initial + h.Remove(tt.ip, tt.protocol, tt.port) + + assert.Equal(t, tt.expected, h, "HostPortInfo.Remove() resulted in unexpected state") + }) + } +} + +func TestHostPortInfoLen(t *testing.T) { + tests := []struct { + name string + info HostPortInfo + expected int + }{ + { + name: "Empty HostPortInfo", + info: HostPortInfo{}, + expected: 0, + }, + { + name: "Single IP, single port", + info: HostPortInfo{ + "192.168.1.1": { + ProtocolPort{Protocol: "TCP", Port: 80}: {}, + }, + }, + expected: 1, + }, + { + name: "Single IP, multiple ports", + info: HostPortInfo{ + "192.168.1.1": { + ProtocolPort{Protocol: "TCP", Port: 80}: {}, + ProtocolPort{Protocol: "UDP", Port: 53}: {}, + ProtocolPort{Protocol: "TCP", Port: 443}: {}, + }, + }, + expected: 3, + }, + { + name: "Multiple IPs, multiple ports", + info: HostPortInfo{ + "192.168.1.1": { + ProtocolPort{Protocol: "TCP", Port: 80}: {}, + ProtocolPort{Protocol: "UDP", Port: 53}: {}, + }, + "10.0.0.1": { + ProtocolPort{Protocol: "TCP", Port: 8080}: {}, + }, + }, + expected: 3, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := tt.info.Len() + assert.Equal(t, tt.expected, result, "HostPortInfo.Len() returned unexpected result") + }) + } +} + +func TestHostPortInfoCheckConflict(t *testing.T) { + tests := []struct { + name string + hpi HostPortInfo + ip string + protocol string + port int32 + expected bool + }{ + { + name: "No conflict", + hpi: HostPortInfo{ + "192.168.1.1": {ProtocolPort{Protocol: "TCP", Port: 80}: {}}, + }, + ip: "192.168.1.1", + protocol: "TCP", + port: 8080, + expected: false, + }, + { + name: "Conflict with same IP", + hpi: HostPortInfo{ + "192.168.1.1": {ProtocolPort{Protocol: "TCP", Port: 80}: {}}, + }, + ip: "192.168.1.1", + protocol: "TCP", + port: 80, + expected: true, + }, + { + name: "Conflict with 0.0.0.0", + hpi: HostPortInfo{ + "0.0.0.0": {ProtocolPort{Protocol: "TCP", Port: 80}: {}}, + }, + ip: "192.168.1.1", + protocol: "TCP", + port: 80, + expected: true, + }, + { + name: "No conflict with different protocol", + hpi: HostPortInfo{ + "192.168.1.1": {ProtocolPort{Protocol: "TCP", Port: 80}: {}}, + }, + ip: "192.168.1.1", + protocol: "UDP", + port: 80, + expected: false, + }, + { + name: "Input IP is 0.0.0.0, conflict with any IP", + hpi: HostPortInfo{ + "192.168.1.1": {ProtocolPort{Protocol: "TCP", Port: 80}: {}}, + "10.0.0.1": {ProtocolPort{Protocol: "UDP", Port: 53}: {}}, + }, + ip: "0.0.0.0", + protocol: "TCP", + port: 80, + expected: true, + }, + { + name: "Input IP is 0.0.0.0, no conflict", + hpi: HostPortInfo{ + "192.168.1.1": {ProtocolPort{Protocol: "TCP", Port: 80}: {}}, + "10.0.0.1": {ProtocolPort{Protocol: "UDP", Port: 53}: {}}, + }, + ip: "0.0.0.0", + protocol: "TCP", + port: 8080, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := tt.hpi.CheckConflict(tt.ip, tt.protocol, tt.port) + assert.Equal(t, tt.expected, result) + }) + } +} diff --git a/pkg/util/lifted/validateclustertaints_test.go b/pkg/util/lifted/validateclustertaints_test.go new file mode 100644 index 000000000000..cc39f05ee481 --- /dev/null +++ b/pkg/util/lifted/validateclustertaints_test.go @@ -0,0 +1,157 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package lifted + +import ( + "testing" + + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +func TestValidateClusterTaintEffect(t *testing.T) { + tests := []struct { + name string + effect corev1.TaintEffect + allowEmpty bool + wantErrors int + errorMsg string + }{ + { + name: "valid NoSchedule effect", + effect: corev1.TaintEffectNoSchedule, + allowEmpty: false, + wantErrors: 0, + }, + { + name: "valid NoExecute effect", + effect: corev1.TaintEffectNoExecute, + allowEmpty: false, + wantErrors: 0, + }, + { + name: "invalid effect", + effect: corev1.TaintEffect("InvalidEffect"), + allowEmpty: false, + wantErrors: 1, + errorMsg: "test: Unsupported value: \"InvalidEffect\": supported values: \"NoSchedule\", \"NoExecute\"", + }, + { + name: "empty effect not allowed", + effect: corev1.TaintEffect(""), + allowEmpty: false, + wantErrors: 1, + errorMsg: "test: Required value", + }, + { + name: "empty effect with allowEmpty true", + effect: corev1.TaintEffect(""), + allowEmpty: true, + wantErrors: 1, + errorMsg: "test: Unsupported value: \"\": supported values: \"NoSchedule\", \"NoExecute\"", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + errors := validateClusterTaintEffect(&tt.effect, tt.allowEmpty, field.NewPath("test")) + assert.Len(t, errors, tt.wantErrors) + + if tt.wantErrors == 0 { + assert.Empty(t, errors) + } else { + assert.NotEmpty(t, errors) + assert.Equal(t, tt.errorMsg, errors[0].Error()) + } + }) + } +} +func TestValidateClusterTaints(t *testing.T) { + tests := []struct { + name string + taints []corev1.Taint + wantErrors int + }{ + { + name: "valid taints", + taints: []corev1.Taint{ + {Key: "key1", Value: "value1", Effect: corev1.TaintEffectNoSchedule}, + {Key: "key2", Value: "value2", Effect: corev1.TaintEffectNoExecute}, + }, + wantErrors: 0, + }, + { + name: "invalid taint key", + taints: []corev1.Taint{ + {Key: "invalid key", Value: "value1", Effect: corev1.TaintEffectNoSchedule}, + }, + wantErrors: 1, + }, + { + name: "invalid taint value", + taints: []corev1.Taint{ + {Key: "key1", Value: "invalid value!", Effect: corev1.TaintEffectNoSchedule}, + }, + wantErrors: 1, + }, + { + name: "invalid taint effect", + taints: []corev1.Taint{ + {Key: "key1", Value: "value1", Effect: corev1.TaintEffect("InvalidEffect")}, + }, + wantErrors: 1, + }, + { + name: "duplicate taints", + taints: []corev1.Taint{ + {Key: "key1", Value: "value1", Effect: corev1.TaintEffectNoSchedule}, + {Key: "key1", Value: "value2", Effect: corev1.TaintEffectNoSchedule}, + }, + wantErrors: 1, + }, + { + name: "multiple errors", + taints: []corev1.Taint{ + {Key: "invalid key", Value: "invalid value!", Effect: corev1.TaintEffect("InvalidEffect")}, + {Key: "key1", Value: "value1", Effect: corev1.TaintEffectNoSchedule}, + {Key: "key1", Value: "value2", Effect: corev1.TaintEffectNoSchedule}, + }, + wantErrors: 4, + }, + { + name: "empty effect", + taints: []corev1.Taint{ + {Key: "key1", Value: "value1", Effect: corev1.TaintEffect("")}, + }, + wantErrors: 1, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + errors := ValidateClusterTaints(tt.taints, field.NewPath("test")) + assert.Len(t, errors, tt.wantErrors) + + if tt.wantErrors == 0 { + assert.Empty(t, errors) + } else { + assert.NotEmpty(t, errors) + } + }) + } +} diff --git a/pkg/util/lifted/validatingfhpa_test.go b/pkg/util/lifted/validatingfhpa_test.go new file mode 100644 index 000000000000..4233adec0090 --- /dev/null +++ b/pkg/util/lifted/validatingfhpa_test.go @@ -0,0 +1,1320 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package lifted + +import ( + "testing" + + "github.com/stretchr/testify/assert" + autoscalingv2 "k8s.io/api/autoscaling/v2" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/utils/ptr" + + autoscalingv1alpha1 "github.com/karmada-io/karmada/pkg/apis/autoscaling/v1alpha1" +) + +func TestValidateFederatedHPA(t *testing.T) { + tests := []struct { + name string + fhpa *autoscalingv1alpha1.FederatedHPA + wantErr bool + }{ + { + name: "valid FederatedHPA", + fhpa: &autoscalingv1alpha1.FederatedHPA{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-fhpa", + Namespace: "default", + }, + Spec: autoscalingv1alpha1.FederatedHPASpec{ + ScaleTargetRef: autoscalingv2.CrossVersionObjectReference{ + Kind: "Deployment", + Name: "test-deployment", + }, + MinReplicas: ptr.To[int32](1), + MaxReplicas: 10, + Metrics: []autoscalingv2.MetricSpec{ + { + Type: autoscalingv2.ResourceMetricSourceType, + Resource: &autoscalingv2.ResourceMetricSource{ + Name: "cpu", + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.UtilizationMetricType, + AverageUtilization: ptr.To[int32](50), + }, + }, + }, + }, + }, + }, + wantErr: false, + }, + { + name: "invalid name", + fhpa: &autoscalingv1alpha1.FederatedHPA{ + ObjectMeta: metav1.ObjectMeta{ + Name: "invalid/name", + Namespace: "default", + }, + Spec: autoscalingv1alpha1.FederatedHPASpec{ + ScaleTargetRef: autoscalingv2.CrossVersionObjectReference{ + Kind: "Deployment", + Name: "test-deployment", + }, + MinReplicas: ptr.To[int32](1), + MaxReplicas: 10, + }, + }, + wantErr: true, + }, + { + name: "invalid spec", + fhpa: &autoscalingv1alpha1.FederatedHPA{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-fhpa", + Namespace: "default", + }, + Spec: autoscalingv1alpha1.FederatedHPASpec{ + MinReplicas: ptr.To[int32](0), + MaxReplicas: 0, + }, + }, + wantErr: true, + }, + { + name: "missing namespace", + fhpa: &autoscalingv1alpha1.FederatedHPA{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-fhpa", + }, + Spec: autoscalingv1alpha1.FederatedHPASpec{ + ScaleTargetRef: autoscalingv2.CrossVersionObjectReference{ + Kind: "Deployment", + Name: "test-deployment", + }, + MinReplicas: ptr.To[int32](1), + MaxReplicas: 10, + }, + }, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + errors := ValidateFederatedHPA(tt.fhpa) + if tt.wantErr { + assert.NotEmpty(t, errors, "Expected validation errors, but got none") + } else { + assert.Empty(t, errors, "Expected no validation errors, but got: %v", errors) + } + }) + } +} + +func TestValidateFederatedHPASpec(t *testing.T) { + tests := []struct { + name string + spec *autoscalingv1alpha1.FederatedHPASpec + wantErr bool + }{ + { + name: "valid spec", + spec: &autoscalingv1alpha1.FederatedHPASpec{ + ScaleTargetRef: autoscalingv2.CrossVersionObjectReference{ + Kind: "Deployment", + Name: "test-deployment", + }, + MinReplicas: ptr.To[int32](1), + MaxReplicas: 10, + Metrics: []autoscalingv2.MetricSpec{ + { + Type: autoscalingv2.ResourceMetricSourceType, + Resource: &autoscalingv2.ResourceMetricSource{ + Name: "cpu", + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.UtilizationMetricType, + AverageUtilization: ptr.To[int32](50), + }, + }, + }, + }, + }, + wantErr: false, + }, + { + name: "invalid minReplicas", + spec: &autoscalingv1alpha1.FederatedHPASpec{ + ScaleTargetRef: autoscalingv2.CrossVersionObjectReference{ + Kind: "Deployment", + Name: "test-deployment", + }, + MinReplicas: ptr.To[int32](0), + MaxReplicas: 10, + }, + wantErr: true, + }, + { + name: "maxReplicas less than minReplicas", + spec: &autoscalingv1alpha1.FederatedHPASpec{ + ScaleTargetRef: autoscalingv2.CrossVersionObjectReference{ + Kind: "Deployment", + Name: "test-deployment", + }, + MinReplicas: ptr.To[int32](5), + MaxReplicas: 3, + }, + wantErr: true, + }, + { + name: "invalid scaleTargetRef", + spec: &autoscalingv1alpha1.FederatedHPASpec{ + ScaleTargetRef: autoscalingv2.CrossVersionObjectReference{ + Kind: "", + Name: "test-deployment", + }, + MinReplicas: ptr.To[int32](1), + MaxReplicas: 10, + }, + wantErr: true, + }, + { + name: "invalid metrics", + spec: &autoscalingv1alpha1.FederatedHPASpec{ + ScaleTargetRef: autoscalingv2.CrossVersionObjectReference{ + Kind: "Deployment", + Name: "test-deployment", + }, + MinReplicas: ptr.To[int32](1), + MaxReplicas: 10, + Metrics: []autoscalingv2.MetricSpec{ + { + Type: autoscalingv2.ResourceMetricSourceType, + // Missing Resource field to trigger a validation error + }, + }, + }, + wantErr: true, + }, + { + name: "invalid behavior", + spec: &autoscalingv1alpha1.FederatedHPASpec{ + ScaleTargetRef: autoscalingv2.CrossVersionObjectReference{ + Kind: "Deployment", + Name: "test-deployment", + }, + MinReplicas: ptr.To[int32](1), + MaxReplicas: 10, + Behavior: &autoscalingv2.HorizontalPodAutoscalerBehavior{ + ScaleDown: &autoscalingv2.HPAScalingRules{ + StabilizationWindowSeconds: ptr.To[int32](-1), // Invalid: negative value + }, + }, + }, + wantErr: true, + }, + { + name: "maxReplicas less than 1", + spec: &autoscalingv1alpha1.FederatedHPASpec{ + ScaleTargetRef: autoscalingv2.CrossVersionObjectReference{ + Kind: "Deployment", + Name: "test-deployment", + }, + MinReplicas: ptr.To[int32](1), + MaxReplicas: 0, + }, + wantErr: true, + }, + { + name: "minReplicas equals maxReplicas", + spec: &autoscalingv1alpha1.FederatedHPASpec{ + ScaleTargetRef: autoscalingv2.CrossVersionObjectReference{ + Kind: "Deployment", + Name: "test-deployment", + }, + MinReplicas: ptr.To[int32](5), + MaxReplicas: 5, + }, + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + errors := validateFederatedHPASpec(tt.spec, field.NewPath("spec"), 1) + if tt.wantErr { + assert.NotEmpty(t, errors, "Expected validation errors, but got none") + // Check for specific errors + if tt.name == "invalid minReplicas" { + assert.Contains(t, errors.ToAggregate().Error(), "minReplicas", "Expected error related to minReplicas") + } + } else { + assert.Empty(t, errors, "Expected no validation errors, but got: %v", errors) + } + }) + } +} + +func TestValidateCrossVersionObjectReference(t *testing.T) { + tests := []struct { + name string + ref autoscalingv2.CrossVersionObjectReference + wantErr bool + }{ + { + name: "valid reference", + ref: autoscalingv2.CrossVersionObjectReference{ + Kind: "Deployment", + Name: "my-deployment", + }, + wantErr: false, + }, + { + name: "missing kind", + ref: autoscalingv2.CrossVersionObjectReference{ + Name: "my-deployment", + }, + wantErr: true, + }, + { + name: "missing name", + ref: autoscalingv2.CrossVersionObjectReference{ + Kind: "Deployment", + }, + wantErr: true, + }, + { + name: "invalid kind", + ref: autoscalingv2.CrossVersionObjectReference{ + Kind: "Invalid/Kind", + Name: "my-deployment", + }, + wantErr: true, + }, + { + name: "invalid name", + ref: autoscalingv2.CrossVersionObjectReference{ + Kind: "Deployment", + Name: "my/deployment", + }, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + errors := ValidateCrossVersionObjectReference(tt.ref, field.NewPath("test")) + if tt.wantErr { + assert.NotEmpty(t, errors, "Expected validation errors, but got none") + } else { + assert.Empty(t, errors, "Expected no validation errors, but got: %v", errors) + } + }) + } +} + +func TestValidateFederatedHPAStatus(t *testing.T) { + tests := []struct { + name string + status *autoscalingv2.HorizontalPodAutoscalerStatus + wantErr bool + }{ + { + name: "valid status", + status: &autoscalingv2.HorizontalPodAutoscalerStatus{ + CurrentReplicas: 3, + DesiredReplicas: 5, + }, + wantErr: false, + }, + { + name: "negative current replicas", + status: &autoscalingv2.HorizontalPodAutoscalerStatus{ + CurrentReplicas: -1, + DesiredReplicas: 5, + }, + wantErr: true, + }, + { + name: "negative desired replicas", + status: &autoscalingv2.HorizontalPodAutoscalerStatus{ + CurrentReplicas: 3, + DesiredReplicas: -1, + }, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + errors := validateFederatedHPAStatus(tt.status) + if tt.wantErr { + assert.NotEmpty(t, errors, "Expected validation errors, but got none") + } else { + assert.Empty(t, errors, "Expected no validation errors, but got: %v", errors) + } + }) + } +} + +func TestValidateBehavior(t *testing.T) { + tests := []struct { + name string + behavior *autoscalingv2.HorizontalPodAutoscalerBehavior + wantErr bool + }{ + { + name: "valid behavior", + behavior: &autoscalingv2.HorizontalPodAutoscalerBehavior{ + ScaleUp: &autoscalingv2.HPAScalingRules{ + StabilizationWindowSeconds: ptr.To[int32](60), + Policies: []autoscalingv2.HPAScalingPolicy{ + { + Type: autoscalingv2.PercentScalingPolicy, + Value: 100, + PeriodSeconds: 15, + }, + }, + }, + ScaleDown: &autoscalingv2.HPAScalingRules{ + StabilizationWindowSeconds: ptr.To[int32](300), + Policies: []autoscalingv2.HPAScalingPolicy{ + { + Type: autoscalingv2.PercentScalingPolicy, + Value: 100, + PeriodSeconds: 15, + }, + }, + }, + }, + wantErr: false, + }, + { + name: "invalid scale up stabilization window", + behavior: &autoscalingv2.HorizontalPodAutoscalerBehavior{ + ScaleUp: &autoscalingv2.HPAScalingRules{ + StabilizationWindowSeconds: ptr.To[int32](-1), + }, + }, + wantErr: true, + }, + { + name: "invalid scale down policy", + behavior: &autoscalingv2.HorizontalPodAutoscalerBehavior{ + ScaleDown: &autoscalingv2.HPAScalingRules{ + Policies: []autoscalingv2.HPAScalingPolicy{ + { + Type: autoscalingv2.PercentScalingPolicy, + Value: -1, + PeriodSeconds: 15, + }, + }, + }, + }, + wantErr: true, + }, + { + name: "nil behavior", + behavior: nil, + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + errors := validateBehavior(tt.behavior, field.NewPath("test")) + if tt.wantErr { + assert.NotEmpty(t, errors, "Expected validation errors, but got none") + } else { + assert.Empty(t, errors, "Expected no validation errors, but got: %v", errors) + } + }) + } +} +func TestValidateScalingRules(t *testing.T) { + validPolicy := autoscalingv2.HPAScalingPolicy{ + Type: autoscalingv2.PercentScalingPolicy, + Value: 100, + PeriodSeconds: 15, + } + + tests := []struct { + name string + rules *autoscalingv2.HPAScalingRules + wantErr bool + }{ + { + name: "nil rules", + rules: nil, + wantErr: false, + }, + { + name: "valid rules with Max select policy", + rules: &autoscalingv2.HPAScalingRules{ + StabilizationWindowSeconds: ptr.To[int32](300), + SelectPolicy: ptr.To[autoscalingv2.ScalingPolicySelect](autoscalingv2.MaxChangePolicySelect), + Policies: []autoscalingv2.HPAScalingPolicy{validPolicy}, + }, + wantErr: false, + }, + { + name: "valid rules with Min select policy", + rules: &autoscalingv2.HPAScalingRules{ + StabilizationWindowSeconds: ptr.To[int32](300), + SelectPolicy: ptr.To[autoscalingv2.ScalingPolicySelect](autoscalingv2.MinChangePolicySelect), + Policies: []autoscalingv2.HPAScalingPolicy{validPolicy}, + }, + wantErr: false, + }, + { + name: "valid rules with Disabled select policy", + rules: &autoscalingv2.HPAScalingRules{ + StabilizationWindowSeconds: ptr.To[int32](300), + SelectPolicy: ptr.To[autoscalingv2.ScalingPolicySelect](autoscalingv2.DisabledPolicySelect), + Policies: []autoscalingv2.HPAScalingPolicy{validPolicy}, + }, + wantErr: false, + }, + { + name: "negative stabilization window", + rules: &autoscalingv2.HPAScalingRules{ + StabilizationWindowSeconds: ptr.To[int32](-1), + Policies: []autoscalingv2.HPAScalingPolicy{validPolicy}, + }, + wantErr: true, + }, + { + name: "stabilization window exceeding max", + rules: &autoscalingv2.HPAScalingRules{ + StabilizationWindowSeconds: ptr.To[int32](MaxStabilizationWindowSeconds + 1), + Policies: []autoscalingv2.HPAScalingPolicy{validPolicy}, + }, + wantErr: true, + }, + { + name: "invalid select policy", + rules: &autoscalingv2.HPAScalingRules{ + SelectPolicy: ptr.To[autoscalingv2.ScalingPolicySelect]("InvalidPolicy"), + Policies: []autoscalingv2.HPAScalingPolicy{validPolicy}, + }, + wantErr: true, + }, + { + name: "no policies", + rules: &autoscalingv2.HPAScalingRules{ + Policies: []autoscalingv2.HPAScalingPolicy{}, + }, + wantErr: true, + }, + { + name: "invalid policy", + rules: &autoscalingv2.HPAScalingRules{ + Policies: []autoscalingv2.HPAScalingPolicy{ + { + Type: "InvalidType", + Value: 0, + PeriodSeconds: 0, + }, + }, + }, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + errors := validateScalingRules(tt.rules, field.NewPath("test")) + if tt.wantErr { + assert.NotEmpty(t, errors, "Expected validation errors, but got none") + } else { + assert.Empty(t, errors, "Expected no validation errors, but got: %v", errors) + } + }) + } +} + +func TestValidateScalingPolicy(t *testing.T) { + tests := []struct { + name string + policy autoscalingv2.HPAScalingPolicy + wantErr bool + }{ + { + name: "valid pods scaling policy", + policy: autoscalingv2.HPAScalingPolicy{ + Type: autoscalingv2.PodsScalingPolicy, + Value: 1, + PeriodSeconds: 15, + }, + wantErr: false, + }, + { + name: "valid percent scaling policy", + policy: autoscalingv2.HPAScalingPolicy{ + Type: autoscalingv2.PercentScalingPolicy, + Value: 10, + PeriodSeconds: 15, + }, + wantErr: false, + }, + { + name: "invalid policy type", + policy: autoscalingv2.HPAScalingPolicy{ + Type: "InvalidType", + Value: 1, + PeriodSeconds: 15, + }, + wantErr: true, + }, + { + name: "zero value", + policy: autoscalingv2.HPAScalingPolicy{ + Type: autoscalingv2.PodsScalingPolicy, + Value: 0, + PeriodSeconds: 15, + }, + wantErr: true, + }, + { + name: "negative value", + policy: autoscalingv2.HPAScalingPolicy{ + Type: autoscalingv2.PodsScalingPolicy, + Value: -1, + PeriodSeconds: 15, + }, + wantErr: true, + }, + { + name: "zero period seconds", + policy: autoscalingv2.HPAScalingPolicy{ + Type: autoscalingv2.PodsScalingPolicy, + Value: 1, + PeriodSeconds: 0, + }, + wantErr: true, + }, + { + name: "negative period seconds", + policy: autoscalingv2.HPAScalingPolicy{ + Type: autoscalingv2.PodsScalingPolicy, + Value: 1, + PeriodSeconds: -1, + }, + wantErr: true, + }, + { + name: "period seconds exceeding max", + policy: autoscalingv2.HPAScalingPolicy{ + Type: autoscalingv2.PodsScalingPolicy, + Value: 1, + PeriodSeconds: MaxPeriodSeconds + 1, + }, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + errors := validateScalingPolicy(tt.policy, field.NewPath("test")) + if tt.wantErr { + assert.NotEmpty(t, errors, "Expected validation errors, but got none") + } else { + assert.Empty(t, errors, "Expected no validation errors, but got: %v", errors) + } + }) + } +} + +func TestValidateMetricSpec(t *testing.T) { + tests := []struct { + name string + spec autoscalingv2.MetricSpec + wantErr bool + }{ + { + name: "valid resource metric", + spec: autoscalingv2.MetricSpec{ + Type: autoscalingv2.ResourceMetricSourceType, + Resource: &autoscalingv2.ResourceMetricSource{ + Name: "cpu", + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.UtilizationMetricType, + AverageUtilization: ptr.To[int32](50), + }, + }, + }, + wantErr: false, + }, + { + name: "empty metric type", + spec: autoscalingv2.MetricSpec{}, + wantErr: true, + }, + { + name: "invalid metric type", + spec: autoscalingv2.MetricSpec{ + Type: "InvalidType", + }, + wantErr: true, + }, + { + name: "object metric without object", + spec: autoscalingv2.MetricSpec{ + Type: autoscalingv2.ObjectMetricSourceType, + }, + wantErr: true, + }, + { + name: "pods metric without pods", + spec: autoscalingv2.MetricSpec{ + Type: autoscalingv2.PodsMetricSourceType, + }, + wantErr: true, + }, + { + name: "resource metric without resource", + spec: autoscalingv2.MetricSpec{ + Type: autoscalingv2.ResourceMetricSourceType, + }, + wantErr: true, + }, + { + name: "container resource metric without container resource", + spec: autoscalingv2.MetricSpec{ + Type: autoscalingv2.ContainerResourceMetricSourceType, + }, + wantErr: true, + }, + { + name: "multiple metric sources", + spec: autoscalingv2.MetricSpec{ + Type: autoscalingv2.ResourceMetricSourceType, + Resource: &autoscalingv2.ResourceMetricSource{ + Name: "cpu", + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.UtilizationMetricType, + AverageUtilization: ptr.To[int32](50), + }, + }, + Pods: &autoscalingv2.PodsMetricSource{}, + }, + wantErr: true, + }, { + name: "valid object metric", + spec: autoscalingv2.MetricSpec{ + Type: autoscalingv2.ObjectMetricSourceType, + Object: &autoscalingv2.ObjectMetricSource{ + DescribedObject: autoscalingv2.CrossVersionObjectReference{ + Kind: "Service", + Name: "my-service", + }, + Metric: autoscalingv2.MetricIdentifier{ + Name: "requests-per-second", + }, + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.ValueMetricType, + Value: ptr.To(resource.MustParse("100")), + }, + }, + }, + wantErr: false, + }, + { + name: "valid container resource metric", + spec: autoscalingv2.MetricSpec{ + Type: autoscalingv2.ContainerResourceMetricSourceType, + ContainerResource: &autoscalingv2.ContainerResourceMetricSource{ + Name: "cpu", + Container: "app", + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.UtilizationMetricType, + AverageUtilization: ptr.To[int32](50), + }, + }, + }, + wantErr: false, + }, + { + name: "multiple metric sources - object and container resource", + spec: autoscalingv2.MetricSpec{ + Type: autoscalingv2.ObjectMetricSourceType, + Object: &autoscalingv2.ObjectMetricSource{ + DescribedObject: autoscalingv2.CrossVersionObjectReference{ + Kind: "Service", + Name: "my-service", + }, + Metric: autoscalingv2.MetricIdentifier{ + Name: "requests-per-second", + }, + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.ValueMetricType, + Value: ptr.To(resource.MustParse("100")), + }, + }, + ContainerResource: &autoscalingv2.ContainerResourceMetricSource{ + Name: "cpu", + Container: "app", + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.UtilizationMetricType, + AverageUtilization: ptr.To[int32](50), + }, + }, + }, + wantErr: true, + }, + { + name: "multiple metric sources - all types", + spec: autoscalingv2.MetricSpec{ + Type: autoscalingv2.ObjectMetricSourceType, + Object: &autoscalingv2.ObjectMetricSource{ + DescribedObject: autoscalingv2.CrossVersionObjectReference{ + Kind: "Service", + Name: "my-service", + }, + Metric: autoscalingv2.MetricIdentifier{ + Name: "requests-per-second", + }, + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.ValueMetricType, + Value: ptr.To(resource.MustParse("100")), + }, + }, + Pods: &autoscalingv2.PodsMetricSource{ + Metric: autoscalingv2.MetricIdentifier{ + Name: "packets-per-second", + }, + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.AverageValueMetricType, + AverageValue: ptr.To(resource.MustParse("1k")), + }, + }, + Resource: &autoscalingv2.ResourceMetricSource{ + Name: "cpu", + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.UtilizationMetricType, + AverageUtilization: ptr.To[int32](50), + }, + }, + ContainerResource: &autoscalingv2.ContainerResourceMetricSource{ + Name: "memory", + Container: "app", + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.UtilizationMetricType, + AverageUtilization: ptr.To[int32](60), + }, + }, + }, + wantErr: true, + }, + { + name: "mismatched type and source - object", + spec: autoscalingv2.MetricSpec{ + Type: autoscalingv2.PodsMetricSourceType, + Object: &autoscalingv2.ObjectMetricSource{ + DescribedObject: autoscalingv2.CrossVersionObjectReference{ + Kind: "Service", + Name: "my-service", + }, + Metric: autoscalingv2.MetricIdentifier{ + Name: "requests-per-second", + }, + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.ValueMetricType, + Value: ptr.To(resource.MustParse("100")), + }, + }, + }, + wantErr: true, + }, + { + name: "mismatched type and source - container resource", + spec: autoscalingv2.MetricSpec{ + Type: autoscalingv2.ResourceMetricSourceType, + ContainerResource: &autoscalingv2.ContainerResourceMetricSource{ + Name: "cpu", + Container: "app", + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.UtilizationMetricType, + AverageUtilization: ptr.To[int32](50), + }, + }, + }, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + errors := validateMetricSpec(tt.spec, field.NewPath("test")) + if tt.wantErr { + assert.NotEmpty(t, errors, "Expected validation errors, but got none") + } else { + assert.Empty(t, errors, "Expected no validation errors, but got: %v", errors) + } + }) + } +} + +func TestValidateObjectSource(t *testing.T) { + tests := []struct { + name string + src autoscalingv2.ObjectMetricSource + wantErr bool + }{ + { + name: "valid object metric", + src: autoscalingv2.ObjectMetricSource{ + DescribedObject: autoscalingv2.CrossVersionObjectReference{ + Kind: "Service", + Name: "my-service", + }, + Metric: autoscalingv2.MetricIdentifier{ + Name: "requests-per-second", + }, + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.ValueMetricType, + Value: ptr.To(resource.MustParse("100")), + }, + }, + wantErr: false, + }, + { + name: "missing described object", + src: autoscalingv2.ObjectMetricSource{ + Metric: autoscalingv2.MetricIdentifier{ + Name: "requests-per-second", + }, + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.ValueMetricType, + Value: ptr.To(resource.MustParse("100")), + }, + }, + wantErr: true, + }, + { + name: "missing metric name", + src: autoscalingv2.ObjectMetricSource{ + DescribedObject: autoscalingv2.CrossVersionObjectReference{ + Kind: "Service", + Name: "my-service", + }, + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.ValueMetricType, + Value: ptr.To(resource.MustParse("100")), + }, + }, + wantErr: true, + }, + { + name: "missing target value and average value", + src: autoscalingv2.ObjectMetricSource{ + DescribedObject: autoscalingv2.CrossVersionObjectReference{ + Kind: "Service", + Name: "my-service", + }, + Metric: autoscalingv2.MetricIdentifier{ + Name: "requests-per-second", + }, + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.ValueMetricType, + }, + }, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + errors := validateObjectSource(&tt.src, field.NewPath("test")) + if tt.wantErr { + assert.NotEmpty(t, errors, "Expected validation errors, but got none") + } else { + assert.Empty(t, errors, "Expected no validation errors, but got: %v", errors) + } + }) + } +} + +func TestValidatePodsSource(t *testing.T) { + tests := []struct { + name string + src autoscalingv2.PodsMetricSource + wantErr bool + }{ + { + name: "valid pods metric", + src: autoscalingv2.PodsMetricSource{ + Metric: autoscalingv2.MetricIdentifier{ + Name: "packets-per-second", + }, + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.AverageValueMetricType, + AverageValue: ptr.To(resource.MustParse("1k")), + }, + }, + wantErr: false, + }, + { + name: "valid pods metric with selector", + src: autoscalingv2.PodsMetricSource{ + Metric: autoscalingv2.MetricIdentifier{ + Name: "packets-per-second", + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "web"}, + }, + }, + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.AverageValueMetricType, + AverageValue: ptr.To(resource.MustParse("1k")), + }, + }, + wantErr: false, + }, + { + name: "missing metric name", + src: autoscalingv2.PodsMetricSource{ + Metric: autoscalingv2.MetricIdentifier{}, + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.AverageValueMetricType, + AverageValue: ptr.To(resource.MustParse("1k")), + }, + }, + wantErr: true, + }, + { + name: "missing average value", + src: autoscalingv2.PodsMetricSource{ + Metric: autoscalingv2.MetricIdentifier{ + Name: "packets-per-second", + }, + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.AverageValueMetricType, + }, + }, + wantErr: true, + }, + { + name: "invalid target type", + src: autoscalingv2.PodsMetricSource{ + Metric: autoscalingv2.MetricIdentifier{ + Name: "packets-per-second", + }, + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.UtilizationMetricType, + Value: ptr.To(resource.MustParse("1k")), + }, + }, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + errors := validatePodsSource(&tt.src, field.NewPath("test")) + if tt.wantErr { + assert.NotEmpty(t, errors, "Expected validation errors, but got none") + } else { + assert.Empty(t, errors, "Expected no validation errors, but got: %v", errors) + } + }) + } +} + +func TestValidateContainerResourceSource(t *testing.T) { + tests := []struct { + name string + src autoscalingv2.ContainerResourceMetricSource + wantErr bool + }{ + { + name: "valid container resource metric", + src: autoscalingv2.ContainerResourceMetricSource{ + Name: "cpu", + Container: "app", + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.UtilizationMetricType, + AverageUtilization: ptr.To[int32](50), + }, + }, + wantErr: false, + }, + { + name: "missing resource name", + src: autoscalingv2.ContainerResourceMetricSource{ + Container: "app", + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.UtilizationMetricType, + AverageUtilization: ptr.To[int32](50), + }, + }, + wantErr: true, + }, + { + name: "missing container name", + src: autoscalingv2.ContainerResourceMetricSource{ + Name: "cpu", + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.UtilizationMetricType, + AverageUtilization: ptr.To[int32](50), + }, + }, + wantErr: true, + }, + { + name: "both average utilization and average value set", + src: autoscalingv2.ContainerResourceMetricSource{ + Name: "cpu", + Container: "app", + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.UtilizationMetricType, + AverageUtilization: ptr.To[int32](50), + AverageValue: ptr.To(resource.MustParse("100m")), + }, + }, + wantErr: true, + }, + { + name: "neither average utilization nor average value set", + src: autoscalingv2.ContainerResourceMetricSource{ + Name: "cpu", + Container: "app", + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.UtilizationMetricType, + }, + }, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + errors := validateContainerResourceSource(&tt.src, field.NewPath("test")) + if tt.wantErr { + assert.NotEmpty(t, errors, "Expected validation errors, but got none") + } else { + assert.Empty(t, errors, "Expected no validation errors, but got: %v", errors) + } + }) + } +} + +func TestValidateResourceSource(t *testing.T) { + tests := []struct { + name string + src autoscalingv2.ResourceMetricSource + wantErr bool + }{ + { + name: "valid utilization", + src: autoscalingv2.ResourceMetricSource{ + Name: "cpu", + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.UtilizationMetricType, + AverageUtilization: ptr.To[int32](50), + }, + }, + wantErr: false, + }, + { + name: "valid average value", + src: autoscalingv2.ResourceMetricSource{ + Name: "memory", + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.AverageValueMetricType, + AverageValue: ptr.To(resource.MustParse("100Mi")), + }, + }, + wantErr: false, + }, + { + name: "empty resource name", + src: autoscalingv2.ResourceMetricSource{ + Name: "", + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.UtilizationMetricType, + AverageUtilization: ptr.To[int32](50), + }, + }, + wantErr: true, + }, + { + name: "missing target", + src: autoscalingv2.ResourceMetricSource{ + Name: "cpu", + }, + wantErr: true, + }, + { + name: "both utilization and value set", + src: autoscalingv2.ResourceMetricSource{ + Name: "cpu", + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.UtilizationMetricType, + AverageUtilization: ptr.To[int32](50), + AverageValue: ptr.To(resource.MustParse("100m")), + }, + }, + wantErr: true, + }, + { + name: "neither utilization nor value set", + src: autoscalingv2.ResourceMetricSource{ + Name: "cpu", + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.UtilizationMetricType, + }, + }, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + errors := validateResourceSource(&tt.src, field.NewPath("test")) + if tt.wantErr { + assert.NotEmpty(t, errors, "Expected validation errors, but got none") + } else { + assert.Empty(t, errors, "Expected no validation errors, but got: %v", errors) + } + }) + } +} + +func TestValidateMetricTarget(t *testing.T) { + tests := []struct { + name string + target autoscalingv2.MetricTarget + wantErr bool + }{ + { + name: "valid utilization target", + target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.UtilizationMetricType, + AverageUtilization: ptr.To[int32](80), + }, + wantErr: false, + }, + { + name: "valid value target", + target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.ValueMetricType, + Value: ptr.To(resource.MustParse("100")), + }, + wantErr: false, + }, + { + name: "valid average value target", + target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.AverageValueMetricType, + AverageValue: ptr.To(resource.MustParse("50")), + }, + wantErr: false, + }, + { + name: "missing type", + target: autoscalingv2.MetricTarget{ + AverageUtilization: ptr.To[int32](80), + }, + wantErr: true, + }, + { + name: "invalid type", + target: autoscalingv2.MetricTarget{ + Type: "InvalidType", + AverageUtilization: ptr.To[int32](80), + }, + wantErr: true, + }, + { + name: "negative utilization", + target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.UtilizationMetricType, + AverageUtilization: ptr.To[int32](-1), + }, + wantErr: true, + }, + { + name: "negative value", + target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.ValueMetricType, + Value: ptr.To(resource.MustParse("-100")), + }, + wantErr: true, + }, + { + name: "negative average value", + target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.AverageValueMetricType, + AverageValue: ptr.To(resource.MustParse("-50")), + }, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + errors := validateMetricTarget(tt.target, field.NewPath("test")) + if tt.wantErr { + assert.NotEmpty(t, errors, "Expected validation errors, but got none") + } else { + assert.Empty(t, errors, "Expected no validation errors, but got: %v", errors) + } + }) + } +} + +func TestValidateMetricIdentifier(t *testing.T) { + tests := []struct { + name string + id autoscalingv2.MetricIdentifier + wantErr bool + }{ + { + name: "valid identifier", + id: autoscalingv2.MetricIdentifier{ + Name: "my-metric", + }, + wantErr: false, + }, + { + name: "empty name", + id: autoscalingv2.MetricIdentifier{ + Name: "", + }, + wantErr: true, + }, + { + name: "invalid name", + id: autoscalingv2.MetricIdentifier{ + Name: "my/metric", + }, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + errors := validateMetricIdentifier(tt.id, field.NewPath("test")) + if tt.wantErr { + assert.NotEmpty(t, errors, "Expected validation errors, but got none") + } else { + assert.Empty(t, errors, "Expected no validation errors, but got: %v", errors) + } + }) + } +} diff --git a/pkg/util/lifted/validatingmcs_test.go b/pkg/util/lifted/validatingmcs_test.go new file mode 100644 index 000000000000..385d1829a7f9 --- /dev/null +++ b/pkg/util/lifted/validatingmcs_test.go @@ -0,0 +1,113 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package lifted + +import ( + "testing" + + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +func TestValidateLoadBalancerStatus(t *testing.T) { + tests := []struct { + name string + status *corev1.LoadBalancerStatus + wantErrors int + }{ + { + name: "valid IP", + status: &corev1.LoadBalancerStatus{ + Ingress: []corev1.LoadBalancerIngress{ + {IP: "192.168.1.1"}, + }, + }, + wantErrors: 0, + }, + { + name: "valid hostname", + status: &corev1.LoadBalancerStatus{ + Ingress: []corev1.LoadBalancerIngress{ + {Hostname: "example.com"}, + }, + }, + wantErrors: 0, + }, + { + name: "invalid IP", + status: &corev1.LoadBalancerStatus{ + Ingress: []corev1.LoadBalancerIngress{ + {IP: "300.300.300.300"}, + }, + }, + wantErrors: 1, + }, + { + name: "invalid hostname", + status: &corev1.LoadBalancerStatus{ + Ingress: []corev1.LoadBalancerIngress{ + {Hostname: "invalid_hostname"}, + }, + }, + wantErrors: 1, + }, + { + name: "IP in hostname field", + status: &corev1.LoadBalancerStatus{ + Ingress: []corev1.LoadBalancerIngress{ + {Hostname: "192.168.1.1"}, + }, + }, + wantErrors: 1, + }, + { + name: "multiple valid entries", + status: &corev1.LoadBalancerStatus{ + Ingress: []corev1.LoadBalancerIngress{ + {IP: "192.168.1.1"}, + {Hostname: "example.com"}, + }, + }, + wantErrors: 0, + }, + { + name: "multiple entries with errors", + status: &corev1.LoadBalancerStatus{ + Ingress: []corev1.LoadBalancerIngress{ + {IP: "300.300.300.300"}, + {Hostname: "invalid_hostname"}, + {Hostname: "192.168.1.1"}, + }, + }, + wantErrors: 3, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + errors := ValidateLoadBalancerStatus(tt.status, field.NewPath("status")) + assert.Len(t, errors, tt.wantErrors) + + if tt.wantErrors == 0 { + assert.Empty(t, errors) + } else { + assert.NotEmpty(t, errors) + } + }) + } +} diff --git a/pkg/util/membercluster_client_test.go b/pkg/util/membercluster_client_test.go index 52e807faa3ed..e286a359d26a 100644 --- a/pkg/util/membercluster_client_test.go +++ b/pkg/util/membercluster_client_test.go @@ -21,12 +21,14 @@ import ( "io" "net/http" "net/http/httptest" - "reflect" "testing" + "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/client-go/rest" + controllerruntime "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -56,6 +58,218 @@ grw/ZQTTIVjjh4JBSW3WyWgNo/ikC1lrVxzl4iPUGptxT36Cr7Zk2Bsg0XqwbOvK WkBKOclmOV2xlTVuPw== -----END CERTIFICATE-----`) +func TestNewClusterScaleClientSet(t *testing.T) { + type args struct { + clusterName string + client client.Client + } + tests := []struct { + name string + args args + wantErr bool + errMsg string + }{ + { + name: "cluster not found", + args: args{ + clusterName: "test", + client: fakeclient.NewClientBuilder().WithScheme(gclient.NewSchema()).Build(), + }, + wantErr: true, + errMsg: "clusters.cluster.karmada.io \"test\" not found", + }, + { + name: "APIEndpoint is empty", + args: args{ + clusterName: "test", + client: fakeclient.NewClientBuilder().WithScheme(gclient.NewSchema()). + WithObjects(newCluster("test")).Build(), + }, + wantErr: true, + errMsg: "the api endpoint of cluster test is empty", + }, + { + name: "SecretRef is empty", + args: args{ + clusterName: "test", + client: fakeclient.NewClientBuilder().WithScheme(gclient.NewSchema()). + WithObjects(withAPIEndPoint(newCluster("test"), "https://127.0.0.1")).Build(), + }, + wantErr: true, + errMsg: "cluster test does not have a secret", + }, + { + name: "Secret not found", + args: args{ + clusterName: "test", + client: fakeclient.NewClientBuilder().WithScheme(gclient.NewSchema()).WithObjects( + &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: clusterv1alpha1.ClusterSpec{ + APIEndpoint: "https://127.0.0.1", + SecretRef: &clusterv1alpha1.LocalSecretReference{Namespace: "default", Name: "secret1"}, + }, + }).Build(), + }, + wantErr: true, + errMsg: "secrets \"secret1\" not found", + }, + { + name: "token not found", + args: args{ + clusterName: "test", + client: fakeclient.NewClientBuilder().WithScheme(gclient.NewSchema()).WithObjects( + &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: clusterv1alpha1.ClusterSpec{ + APIEndpoint: "https://127.0.0.1", + SecretRef: &clusterv1alpha1.LocalSecretReference{Namespace: "ns1", Name: "secret1"}, + }, + }, + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Namespace: "ns1", Name: "secret1"}, + }).Build(), + }, + wantErr: true, + errMsg: "the secret for cluster test is missing a non-empty value for \"token\"", + }, + { + name: "valid configuration", + args: args{ + clusterName: "test", + client: fakeclient.NewClientBuilder().WithScheme(gclient.NewSchema()).WithObjects( + &clusterv1alpha1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: clusterv1alpha1.ClusterSpec{ + APIEndpoint: "https://127.0.0.1", + SecretRef: &clusterv1alpha1.LocalSecretReference{Namespace: "ns1", Name: "secret1"}, + }, + }, + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{Namespace: "ns1", Name: "secret1"}, + Data: map[string][]byte{clusterv1alpha1.SecretTokenKey: []byte("token"), clusterv1alpha1.SecretCADataKey: testCA}, + }).Build(), + }, + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := NewClusterScaleClientSet(tt.args.clusterName, tt.args.client) + if tt.wantErr { + assert.Error(t, err) + assert.Nil(t, got) + assert.Contains(t, err.Error(), tt.errMsg) + } else { + assert.NoError(t, err) + assert.NotNil(t, got) + assert.Equal(t, tt.args.clusterName, got.ClusterName) + assert.NotNil(t, got.KubeClient) + assert.NotNil(t, got.ScaleClient) + } + }) + } +} + +func TestNewClusterClientSetForAgent(t *testing.T) { + type args struct { + clusterName string + client client.Client + clientOption *ClientOption + } + tests := []struct { + name string + args args + wantErr bool + }{ + { + name: "valid configuration", + args: args{ + clusterName: "test-agent", + client: fakeclient.NewClientBuilder().WithScheme(gclient.NewSchema()).Build(), + clientOption: &ClientOption{QPS: 100, Burst: 200}, + }, + wantErr: false, + }, + } + + // Store the original GetConfig function + originalGetConfig := controllerruntime.GetConfig + // Defer its restoration + defer func() { controllerruntime.GetConfig = originalGetConfig }() + + // Mock the GetConfig function + controllerruntime.GetConfig = func() (*rest.Config, error) { + return &rest.Config{ + Host: "https://fake.example.com", + }, nil + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := NewClusterClientSetForAgent(tt.args.clusterName, tt.args.client, tt.args.clientOption) + if tt.wantErr { + assert.Error(t, err) + assert.Nil(t, got) + } else { + assert.NoError(t, err) + assert.NotNil(t, got) + assert.Equal(t, tt.args.clusterName, got.ClusterName) + assert.NotNil(t, got.KubeClient) + } + }) + } +} + +func TestNewClusterDynamicClientSetForAgent(t *testing.T) { + type args struct { + clusterName string + client client.Client + } + tests := []struct { + name string + args args + wantErr bool + }{ + { + name: "valid configuration", + args: args{ + clusterName: "test-agent-dynamic", + client: fakeclient.NewClientBuilder().WithScheme(gclient.NewSchema()).Build(), + }, + wantErr: false, + }, + } + + // Store the original GetConfig function + originalGetConfig := controllerruntime.GetConfig + // Defer its restoration + defer func() { controllerruntime.GetConfig = originalGetConfig }() + + // Mock the GetConfig function + controllerruntime.GetConfig = func() (*rest.Config, error) { + return &rest.Config{ + Host: "https://fake.example.com", + }, nil + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := NewClusterDynamicClientSetForAgent(tt.args.clusterName, tt.args.client) + if tt.wantErr { + assert.Error(t, err) + assert.Nil(t, got) + } else { + assert.NoError(t, err) + assert.NotNil(t, got) + assert.Equal(t, tt.args.clusterName, got.ClusterName) + assert.NotNil(t, got.DynamicClientSet) + } + }) + } +} + func TestNewClusterClientSet(t *testing.T) { type args struct { clusterName string @@ -217,25 +431,14 @@ func TestNewClusterClientSet(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := NewClusterClientSet(tt.args.clusterName, tt.args.client, tt.args.clientOption) - if (err != nil) != tt.wantErr { - t.Errorf("NewClusterClientSet() error = %v, wantErr %v", err, tt.wantErr) - return - } - if err != nil { - return - } - - if got == nil { - t.Error("NewClusterClientSet() got nil") - return - } - if got.ClusterName != tt.args.clusterName { - t.Errorf("NewClusterClientSet() got.ClusterName = %v, want %v", got.ClusterName, tt.args.clusterName) - return - } - if got.KubeClient == nil { - t.Error("NewClusterClientSet() got.KubeClient got nil") - return + if tt.wantErr { + assert.Error(t, err) + assert.Nil(t, got) + } else { + assert.NoError(t, err) + assert.NotNil(t, got) + assert.Equal(t, tt.args.clusterName, got.ClusterName) + assert.NotNil(t, got.KubeClient) } }) } @@ -270,24 +473,19 @@ func TestNewClusterClientSet_ClientWorks(t *testing.T) { }).Build() clusterClient, err := NewClusterClientSet(clusterName, hostClient, nil) - if err != nil { - t.Error(err) - return - } + assert.NoError(t, err) + assert.NotNil(t, clusterClient) + got, err := clusterClient.KubeClient.CoreV1().Nodes().Get(context.TODO(), "foo", metav1.GetOptions{}) - if err != nil { - t.Error(err) - return - } + assert.NoError(t, err) + assert.NotNil(t, got) want := &corev1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", }, } - if !reflect.DeepEqual(got, want) { - t.Errorf("got = %#v, want %#v", got, want) - } + assert.Equal(t, want, got) } func TestNewClusterDynamicClientSet(t *testing.T) { @@ -441,25 +639,14 @@ func TestNewClusterDynamicClientSet(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := NewClusterDynamicClientSet(tt.args.clusterName, tt.args.client) - if (err != nil) != tt.wantErr { - t.Errorf("NewClusterClientSet() error = %v, wantErr %v", err, tt.wantErr) - return - } - if err != nil { - return - } - - if got == nil { - t.Error("NewClusterClientSet() got nil") - return - } - if got.ClusterName != tt.args.clusterName { - t.Errorf("NewClusterClientSet() got ClusterName = %v, want %v", got.ClusterName, tt.args.clusterName) - return - } - if got.DynamicClientSet == nil { - t.Error("NewClusterClientSet() got DynamicClientSet nil") - return + if tt.wantErr { + assert.Error(t, err) + assert.Nil(t, got) + } else { + assert.NoError(t, err) + assert.NotNil(t, got) + assert.Equal(t, tt.args.clusterName, got.ClusterName) + assert.NotNil(t, got.DynamicClientSet) } }) } @@ -494,23 +681,17 @@ func TestNewClusterDynamicClientSet_ClientWorks(t *testing.T) { }).Build() clusterClient, err := NewClusterDynamicClientSet(clusterName, hostClient) - if err != nil { - t.Error(err) - return - } + assert.NoError(t, err) + assert.NotNil(t, clusterClient) nodeGVR := corev1.SchemeGroupVersion.WithResource("nodes") got, err := clusterClient.DynamicClientSet.Resource(nodeGVR).Get(context.TODO(), "foo", metav1.GetOptions{}) - if err != nil { - t.Error(err) - return - } + assert.NoError(t, err) + assert.NotNil(t, got) want := &unstructured.Unstructured{} want.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Node")) want.SetName("foo") - if !reflect.DeepEqual(got, want) { - t.Errorf("got = %#v, want %#v", got, want) - } + assert.Equal(t, want, got) } diff --git a/pkg/util/overridemanager/commandargsoverride_test.go b/pkg/util/overridemanager/commandargsoverride_test.go index bda649cc6f3d..10ec80ce44e5 100644 --- a/pkg/util/overridemanager/commandargsoverride_test.go +++ b/pkg/util/overridemanager/commandargsoverride_test.go @@ -260,13 +260,13 @@ func TestParseJSONPatchesByCommandOverrider(t *testing.T) { rawObj: generateTestCommandDeploymentYaml(), CommandArgsOverrider: &policyv1alpha1.CommandArgsOverrider{ ContainerName: "nginx", - Operator: "add", + Operator: policyv1alpha1.OverriderOpAdd, Value: []string{"&& echo 'hello karmada'"}, }, }, want: []overrideOption{ { - Op: "replace", + Op: string(policyv1alpha1.OverriderOpReplace), Path: "/spec/template/spec/containers/0/command", Value: []string{"nginx", "-v", "-t", "&& echo 'hello karmada'"}, }, @@ -278,13 +278,13 @@ func TestParseJSONPatchesByCommandOverrider(t *testing.T) { rawObj: generateTestCommandDeploymentYaml(), CommandArgsOverrider: &policyv1alpha1.CommandArgsOverrider{ ContainerName: "nginx", - Operator: "remove", + Operator: policyv1alpha1.OverriderOpRemove, Value: []string{"-t"}, }, }, want: []overrideOption{ { - Op: "replace", + Op: string(policyv1alpha1.OverriderOpReplace), Path: "/spec/template/spec/containers/0/command", Value: []string{"nginx", "-v"}, }, @@ -296,13 +296,13 @@ func TestParseJSONPatchesByCommandOverrider(t *testing.T) { rawObj: generateTestCommandDeploymentYaml(), CommandArgsOverrider: &policyv1alpha1.CommandArgsOverrider{ ContainerName: "nginx", - Operator: "remove", + Operator: policyv1alpha1.OverriderOpRemove, Value: []string{}, }, }, want: []overrideOption{ { - Op: "replace", + Op: string(policyv1alpha1.OverriderOpReplace), Path: "/spec/template/spec/containers/0/command", Value: []string{"nginx", "-v", "-t"}, }, @@ -314,13 +314,13 @@ func TestParseJSONPatchesByCommandOverrider(t *testing.T) { rawObj: generateTestCommandDeploymentYamlWithTwoContainer(), CommandArgsOverrider: &policyv1alpha1.CommandArgsOverrider{ ContainerName: "nginx", - Operator: "add", + Operator: policyv1alpha1.OverriderOpAdd, Value: []string{"echo 'hello karmada'"}, }, }, want: []overrideOption{ { - Op: "replace", + Op: string(policyv1alpha1.OverriderOpReplace), Path: "/spec/template/spec/containers/0/command", Value: []string{"nginx", "-v", "-t", "echo 'hello karmada'"}, }, @@ -332,13 +332,13 @@ func TestParseJSONPatchesByCommandOverrider(t *testing.T) { rawObj: generateTestCommandDeploymentYamlWithTwoContainer(), CommandArgsOverrider: &policyv1alpha1.CommandArgsOverrider{ ContainerName: "nginx", - Operator: "remove", + Operator: policyv1alpha1.OverriderOpRemove, Value: []string{"-t"}, }, }, want: []overrideOption{ { - Op: "replace", + Op: string(policyv1alpha1.OverriderOpReplace), Path: "/spec/template/spec/containers/0/command", Value: []string{"nginx", "-v"}, }, @@ -350,13 +350,13 @@ func TestParseJSONPatchesByCommandOverrider(t *testing.T) { rawObj: generateTestCommandPodYaml(), CommandArgsOverrider: &policyv1alpha1.CommandArgsOverrider{ ContainerName: "nginx", - Operator: "add", + Operator: policyv1alpha1.OverriderOpAdd, Value: []string{"echo 'hello karmada'"}, }, }, want: []overrideOption{ { - Op: "replace", + Op: string(policyv1alpha1.OverriderOpReplace), Path: "/spec/containers/0/command", Value: []string{"nginx", "-v", "-t", "echo 'hello karmada'"}, }, @@ -368,13 +368,13 @@ func TestParseJSONPatchesByCommandOverrider(t *testing.T) { rawObj: generateTestCommandStatefulSetYaml(), CommandArgsOverrider: &policyv1alpha1.CommandArgsOverrider{ ContainerName: "nginx", - Operator: "add", + Operator: policyv1alpha1.OverriderOpAdd, Value: []string{"echo 'hello karmada'"}, }, }, want: []overrideOption{ { - Op: "replace", + Op: string(policyv1alpha1.OverriderOpReplace), Path: "/spec/template/spec/containers/0/command", Value: []string{"nginx", "-v", "-t", "echo 'hello karmada'"}, }, @@ -386,13 +386,13 @@ func TestParseJSONPatchesByCommandOverrider(t *testing.T) { rawObj: generateTestCommandReplicaSetYaml(), CommandArgsOverrider: &policyv1alpha1.CommandArgsOverrider{ ContainerName: "nginx", - Operator: "remove", + Operator: policyv1alpha1.OverriderOpRemove, Value: []string{"-t"}, }, }, want: []overrideOption{ { - Op: "replace", + Op: string(policyv1alpha1.OverriderOpReplace), Path: "/spec/template/spec/containers/0/command", Value: []string{"nginx", "-v"}, }, @@ -404,13 +404,13 @@ func TestParseJSONPatchesByCommandOverrider(t *testing.T) { rawObj: generateTestCommandDaemonSetYaml(), CommandArgsOverrider: &policyv1alpha1.CommandArgsOverrider{ ContainerName: "nginx", - Operator: "remove", + Operator: policyv1alpha1.OverriderOpRemove, Value: []string{"-t"}, }, }, want: []overrideOption{ { - Op: "replace", + Op: string(policyv1alpha1.OverriderOpReplace), Path: "/spec/template/spec/containers/0/command", Value: []string{"nginx", "-v"}, }, @@ -449,13 +449,13 @@ func TestParseJSONPatchesByArgsOverrider(t *testing.T) { rawObj: generateTestArgsDeploymentYaml(), CommandArgsOverrider: &policyv1alpha1.CommandArgsOverrider{ ContainerName: "nginx", - Operator: "add", + Operator: policyv1alpha1.OverriderOpAdd, Value: []string{"&& echo 'hello karmada'"}, }, }, want: []overrideOption{ { - Op: "replace", + Op: string(policyv1alpha1.OverriderOpReplace), Path: "/spec/template/spec/containers/0/args", Value: []string{"nginx", "-v", "-t", "&& echo 'hello karmada'"}, }, @@ -467,13 +467,13 @@ func TestParseJSONPatchesByArgsOverrider(t *testing.T) { rawObj: generateTestArgsDeploymentYaml(), CommandArgsOverrider: &policyv1alpha1.CommandArgsOverrider{ ContainerName: "nginx", - Operator: "remove", + Operator: policyv1alpha1.OverriderOpRemove, Value: []string{"-t"}, }, }, want: []overrideOption{ { - Op: "replace", + Op: string(policyv1alpha1.OverriderOpReplace), Path: "/spec/template/spec/containers/0/args", Value: []string{"nginx", "-v"}, }, @@ -485,13 +485,13 @@ func TestParseJSONPatchesByArgsOverrider(t *testing.T) { rawObj: generateTestCommandDeploymentYaml(), CommandArgsOverrider: &policyv1alpha1.CommandArgsOverrider{ ContainerName: "nginx", - Operator: "add", + Operator: policyv1alpha1.OverriderOpAdd, Value: []string{"-t"}, }, }, want: []overrideOption{ { - Op: "add", + Op: string(policyv1alpha1.OverriderOpAdd), Path: "/spec/template/spec/containers/0/args", Value: []string{"-t"}, }, diff --git a/pkg/util/overridemanager/imageoverride_test.go b/pkg/util/overridemanager/imageoverride_test.go index b8204bf65979..41af9697b76f 100644 --- a/pkg/util/overridemanager/imageoverride_test.go +++ b/pkg/util/overridemanager/imageoverride_test.go @@ -240,13 +240,13 @@ func TestParseJSONPatchesByImageOverrider(t *testing.T) { rawObj: generateJobYaml(), imageOverrider: &policyv1alpha1.ImageOverrider{ Component: "Registry", - Operator: "add", + Operator: policyv1alpha1.OverriderOpAdd, Value: "registry.k8s.io", }, }, want: []overrideOption{ { - Op: "replace", + Op: string(policyv1alpha1.OverriderOpReplace), Path: "/spec/template/spec/containers/0/image", Value: "registry.k8s.io/perl:5.34.0", }, @@ -262,13 +262,13 @@ func TestParseJSONPatchesByImageOverrider(t *testing.T) { Path: "/spec/template/spec/containers/0/image", }, Component: "Registry", - Operator: "add", + Operator: policyv1alpha1.OverriderOpAdd, Value: "registry.k8s.io", }, }, want: []overrideOption{ { - Op: "replace", + Op: string(policyv1alpha1.OverriderOpReplace), Path: "/spec/template/spec/containers/0/image", Value: "registry.k8s.io/perl:5.34.0", }, @@ -281,13 +281,13 @@ func TestParseJSONPatchesByImageOverrider(t *testing.T) { rawObj: generateDeploymentYaml(), imageOverrider: &policyv1alpha1.ImageOverrider{ Component: "Registry", - Operator: "add", + Operator: policyv1alpha1.OverriderOpAdd, Value: ".test", }, }, want: []overrideOption{ { - Op: "replace", + Op: string(policyv1alpha1.OverriderOpReplace), Path: "/spec/template/spec/containers/0/image", Value: "fictional.registry.example.test/imagename:v1.0.0", }, @@ -300,13 +300,13 @@ func TestParseJSONPatchesByImageOverrider(t *testing.T) { rawObj: generateDeploymentYaml(), imageOverrider: &policyv1alpha1.ImageOverrider{ Component: "Registry", - Operator: "replace", + Operator: policyv1alpha1.OverriderOpReplace, Value: "fictional.registry.us", }, }, want: []overrideOption{ { - Op: "replace", + Op: string(policyv1alpha1.OverriderOpReplace), Path: "/spec/template/spec/containers/0/image", Value: "fictional.registry.us/imagename:v1.0.0", }, @@ -319,13 +319,13 @@ func TestParseJSONPatchesByImageOverrider(t *testing.T) { rawObj: generateDeploymentYaml(), imageOverrider: &policyv1alpha1.ImageOverrider{ Component: "Registry", - Operator: "remove", + Operator: policyv1alpha1.OverriderOpRemove, Value: "fictional.registry.us", }, }, want: []overrideOption{ { - Op: "replace", + Op: string(policyv1alpha1.OverriderOpReplace), Path: "/spec/template/spec/containers/0/image", Value: "imagename:v1.0.0", }, @@ -338,13 +338,13 @@ func TestParseJSONPatchesByImageOverrider(t *testing.T) { rawObj: generateDeploymentYaml(), imageOverrider: &policyv1alpha1.ImageOverrider{ Component: "Repository", - Operator: "add", + Operator: policyv1alpha1.OverriderOpAdd, Value: "/nginx", }, }, want: []overrideOption{ { - Op: "replace", + Op: string(policyv1alpha1.OverriderOpReplace), Path: "/spec/template/spec/containers/0/image", Value: "fictional.registry.example/imagename/nginx:v1.0.0", }, @@ -357,13 +357,13 @@ func TestParseJSONPatchesByImageOverrider(t *testing.T) { rawObj: generateDeploymentYaml(), imageOverrider: &policyv1alpha1.ImageOverrider{ Component: "Repository", - Operator: "replace", + Operator: policyv1alpha1.OverriderOpReplace, Value: "nginx", }, }, want: []overrideOption{ { - Op: "replace", + Op: string(policyv1alpha1.OverriderOpReplace), Path: "/spec/template/spec/containers/0/image", Value: "fictional.registry.example/nginx:v1.0.0", }, @@ -376,13 +376,13 @@ func TestParseJSONPatchesByImageOverrider(t *testing.T) { rawObj: generateDeploymentYaml(), imageOverrider: &policyv1alpha1.ImageOverrider{ Component: "Repository", - Operator: "remove", + Operator: policyv1alpha1.OverriderOpRemove, Value: "nginx", }, }, want: []overrideOption{ { - Op: "replace", + Op: string(policyv1alpha1.OverriderOpReplace), Path: "/spec/template/spec/containers/0/image", Value: "fictional.registry.example/:v1.0.0", }, @@ -395,13 +395,13 @@ func TestParseJSONPatchesByImageOverrider(t *testing.T) { rawObj: generateDeploymentYaml(), imageOverrider: &policyv1alpha1.ImageOverrider{ Component: "Tag", - Operator: "add", + Operator: policyv1alpha1.OverriderOpAdd, Value: "sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", }, }, want: []overrideOption{ { - Op: "replace", + Op: string(policyv1alpha1.OverriderOpReplace), Path: "/spec/template/spec/containers/0/image", Value: "fictional.registry.example/imagename:v1.0.0", // only one of tag and digest is valid. }, @@ -414,13 +414,13 @@ func TestParseJSONPatchesByImageOverrider(t *testing.T) { rawObj: generateDeploymentYaml(), imageOverrider: &policyv1alpha1.ImageOverrider{ Component: "Tag", - Operator: "replace", + Operator: policyv1alpha1.OverriderOpReplace, Value: "sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", }, }, want: []overrideOption{ { - Op: "replace", + Op: string(policyv1alpha1.OverriderOpReplace), Path: "/spec/template/spec/containers/0/image", Value: "fictional.registry.example/imagename@sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", }, @@ -433,13 +433,13 @@ func TestParseJSONPatchesByImageOverrider(t *testing.T) { rawObj: generateDeploymentYaml(), imageOverrider: &policyv1alpha1.ImageOverrider{ Component: "Tag", - Operator: "remove", + Operator: policyv1alpha1.OverriderOpRemove, Value: "sha256:dbcc1c35ac38df41fd2f5e4130b32ffdb93ebae8b3dbe638c23575912276fc9c", }, }, want: []overrideOption{ { - Op: "replace", + Op: string(policyv1alpha1.OverriderOpReplace), Path: "/spec/template/spec/containers/0/image", Value: "fictional.registry.example/imagename", }, @@ -452,13 +452,13 @@ func TestParseJSONPatchesByImageOverrider(t *testing.T) { rawObj: generatePodYaml(), imageOverrider: &policyv1alpha1.ImageOverrider{ Component: "Repository", - Operator: "replace", + Operator: policyv1alpha1.OverriderOpReplace, Value: "nginx", }, }, want: []overrideOption{ { - Op: "replace", + Op: string(policyv1alpha1.OverriderOpReplace), Path: "/spec/containers/0/image", Value: "fictional.registry.example/nginx:v1.0.0", }, @@ -471,13 +471,13 @@ func TestParseJSONPatchesByImageOverrider(t *testing.T) { rawObj: generateStatefulSetYaml(), imageOverrider: &policyv1alpha1.ImageOverrider{ Component: "Repository", - Operator: "replace", + Operator: policyv1alpha1.OverriderOpReplace, Value: "nginx", }, }, want: []overrideOption{ { - Op: "replace", + Op: string(policyv1alpha1.OverriderOpReplace), Path: "/spec/template/spec/containers/0/image", Value: "fictional.registry.example/nginx:v1.0.0", }, @@ -490,13 +490,13 @@ func TestParseJSONPatchesByImageOverrider(t *testing.T) { rawObj: generateReplicaSetYaml(), imageOverrider: &policyv1alpha1.ImageOverrider{ Component: "Repository", - Operator: "replace", + Operator: policyv1alpha1.OverriderOpReplace, Value: "nginx", }, }, want: []overrideOption{ { - Op: "replace", + Op: string(policyv1alpha1.OverriderOpReplace), Path: "/spec/template/spec/containers/0/image", Value: "fictional.registry.example/nginx:v1.0.0", }, @@ -509,13 +509,13 @@ func TestParseJSONPatchesByImageOverrider(t *testing.T) { rawObj: generateDaemonSetYaml(), imageOverrider: &policyv1alpha1.ImageOverrider{ Component: "Repository", - Operator: "replace", + Operator: policyv1alpha1.OverriderOpReplace, Value: "nginx", }, }, want: []overrideOption{ { - Op: "replace", + Op: string(policyv1alpha1.OverriderOpReplace), Path: "/spec/template/spec/containers/0/image", Value: "fictional.registry.example/nginx:v1.0.0", }, @@ -528,18 +528,18 @@ func TestParseJSONPatchesByImageOverrider(t *testing.T) { rawObj: generateDeploymentYamlWithTwoContainer(), imageOverrider: &policyv1alpha1.ImageOverrider{ Component: "Repository", - Operator: "replace", + Operator: policyv1alpha1.OverriderOpReplace, Value: "nginx", }, }, want: []overrideOption{ { - Op: "replace", + Op: string(policyv1alpha1.OverriderOpReplace), Path: "/spec/template/spec/containers/0/image", Value: "fictional.registry.example/nginx:v1.0.0", }, { - Op: "replace", + Op: string(policyv1alpha1.OverriderOpReplace), Path: "/spec/template/spec/containers/1/image", Value: "registry.k8s.io/nginx:0.8", }, @@ -555,13 +555,13 @@ func TestParseJSONPatchesByImageOverrider(t *testing.T) { Path: "/spec/template/spec/containers/0/image", }, Component: "Repository", - Operator: "replace", + Operator: policyv1alpha1.OverriderOpReplace, Value: "nginx", }, }, want: []overrideOption{ { - Op: "replace", + Op: string(policyv1alpha1.OverriderOpReplace), Path: "/spec/template/spec/containers/0/image", Value: "fictional.registry.example/nginx:v1.0.0", }, @@ -577,13 +577,13 @@ func TestParseJSONPatchesByImageOverrider(t *testing.T) { Path: "/spec/template/spec/containers/1/image", }, Component: "Repository", - Operator: "replace", + Operator: policyv1alpha1.OverriderOpReplace, Value: "nginx", }, }, want: []overrideOption{ { - Op: "replace", + Op: string(policyv1alpha1.OverriderOpReplace), Path: "/spec/template/spec/containers/1/image", Value: "registry.k8s.io/nginx:0.8", }, diff --git a/pkg/util/overridemanager/overridemanager.go b/pkg/util/overridemanager/overridemanager.go index 4663e7e36023..f6c7abbf4c39 100644 --- a/pkg/util/overridemanager/overridemanager.go +++ b/pkg/util/overridemanager/overridemanager.go @@ -19,14 +19,18 @@ package overridemanager import ( "context" "encoding/json" + "fmt" + "reflect" "sort" jsonpatch "github.com/evanphx/json-patch/v5" + "github.com/go-openapi/jsonpointer" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/client-go/tools/record" "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/yaml" clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1" policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" @@ -280,6 +284,52 @@ func applyJSONPatch(obj *unstructured.Unstructured, overrides []overrideOption) return err } +// applyRawJSONPatch applies the override on to the given raw json object. +func applyRawJSONPatch(raw []byte, overrides []overrideOption) ([]byte, error) { + jsonPatchBytes, err := json.Marshal(overrides) + if err != nil { + return nil, err + } + + patch, err := jsonpatch.DecodePatch(jsonPatchBytes) + if err != nil { + return nil, err + } + + return patch.Apply(raw) +} + +func applyRawYAMLPatch(raw []byte, overrides []overrideOption) ([]byte, error) { + rawJSON, err := yaml.YAMLToJSON(raw) + if err != nil { + klog.ErrorS(err, "Failed to convert yaml to json") + return nil, err + } + + jsonPatchBytes, err := json.Marshal(overrides) + if err != nil { + return nil, err + } + + patch, err := jsonpatch.DecodePatch(jsonPatchBytes) + if err != nil { + return nil, err + } + + rawJSON, err = patch.Apply(rawJSON) + if err != nil { + return nil, err + } + + rawYAML, err := yaml.JSONToYAML(rawJSON) + if err != nil { + klog.Errorf("Failed to convert json to yaml, error: %v", err) + return nil, err + } + + return rawYAML, nil +} + // applyPolicyOverriders applies OverridePolicy/ClusterOverridePolicy overriders to target object func applyPolicyOverriders(rawObj *unstructured.Unstructured, overriders policyv1alpha1.Overriders) error { err := applyImageOverriders(rawObj, overriders.ImageOverrider) @@ -300,6 +350,9 @@ func applyPolicyOverriders(rawObj *unstructured.Unstructured, overriders policyv if err := applyAnnotationsOverriders(rawObj, overriders.AnnotationsOverrider); err != nil { return err } + if err := applyFieldOverriders(rawObj, overriders.FieldOverrider); err != nil { + return err + } return applyJSONPatch(rawObj, parseJSONPatchesByPlaintext(overriders.Plaintext)) } @@ -352,6 +405,50 @@ func applyArgsOverriders(rawObj *unstructured.Unstructured, argsOverriders []pol return nil } +func applyFieldOverriders(rawObj *unstructured.Unstructured, FieldOverriders []policyv1alpha1.FieldOverrider) error { + if len(FieldOverriders) == 0 { + return nil + } + for index := range FieldOverriders { + pointer, err := jsonpointer.New(FieldOverriders[index].FieldPath) + if err != nil { + klog.Errorf("Build jsonpointer with overrider's path err: %v", err) + return err + } + res, kind, err := pointer.Get(rawObj.Object) + if err != nil { + klog.Errorf("Get value by overrider's path err: %v", err) + return err + } + if kind != reflect.String { + errMsg := fmt.Sprintf("Get object's value by overrider's path(%s) is not string", FieldOverriders[index].FieldPath) + klog.Errorf(errMsg) + return fmt.Errorf(errMsg) + } + dataBytes := []byte(res.(string)) + klog.V(4).Infof("Parsed JSON patches by FieldOverriders[%d](%+v)", index, FieldOverriders[index]) + var appliedRawData []byte + if len(FieldOverriders[index].YAML) > 0 { + appliedRawData, err = applyRawYAMLPatch(dataBytes, parseYAMLPatchesByField(FieldOverriders[index].YAML)) + if err != nil { + klog.Errorf("Error applying raw JSON patch: %v", err) + return err + } + } else if len(FieldOverriders[index].JSON) > 0 { + appliedRawData, err = applyRawJSONPatch(dataBytes, parseJSONPatchesByField(FieldOverriders[index].JSON)) + if err != nil { + klog.Errorf("Error applying raw YAML patch: %v", err) + return err + } + } + _, err = pointer.Set(rawObj.Object, string(appliedRawData)) + if err != nil { + return err + } + } + return nil +} + func parseJSONPatchesByPlaintext(overriders []policyv1alpha1.PlaintextOverrider) []overrideOption { patches := make([]overrideOption, 0, len(overriders)) for i := range overriders { @@ -363,3 +460,27 @@ func parseJSONPatchesByPlaintext(overriders []policyv1alpha1.PlaintextOverrider) } return patches } + +func parseYAMLPatchesByField(overriders []policyv1alpha1.YAMLPatchOperation) []overrideOption { + patches := make([]overrideOption, 0, len(overriders)) + for i := range overriders { + patches = append(patches, overrideOption{ + Op: string(overriders[i].Operator), + Path: overriders[i].SubPath, + Value: overriders[i].Value, + }) + } + return patches +} + +func parseJSONPatchesByField(overriders []policyv1alpha1.JSONPatchOperation) []overrideOption { + patches := make([]overrideOption, 0, len(overriders)) + for i := range overriders { + patches = append(patches, overrideOption{ + Op: string(overriders[i].Operator), + Path: overriders[i].SubPath, + Value: overriders[i].Value, + }) + } + return patches +} diff --git a/pkg/util/overridemanager/overridemanager_test.go b/pkg/util/overridemanager/overridemanager_test.go index b70e37f8c876..ae66e6cee586 100644 --- a/pkg/util/overridemanager/overridemanager_test.go +++ b/pkg/util/overridemanager/overridemanager_test.go @@ -34,7 +34,7 @@ import ( "github.com/karmada-io/karmada/test/helper" ) -func Test_overrideManagerImpl_ApplyOverridePolicies(t *testing.T) { +func Test_overrideManagerImpl_ApplyLabelAnnotationOverriderPolicies(t *testing.T) { deployment := helper.NewDeployment(metav1.NamespaceDefault, "test1") deployment.Labels = map[string]string{ "testLabel": "testLabel", @@ -258,7 +258,7 @@ func TestGetMatchingOverridePolicies(t *testing.T) { Plaintext: []policyv1alpha1.PlaintextOverrider{ { Path: "/metadata/annotations", - Operator: "add", + Operator: policyv1alpha1.OverriderOpAdd, Value: apiextensionsv1.JSON{Raw: []byte(`"foo: bar"`)}, }, }, @@ -267,7 +267,7 @@ func TestGetMatchingOverridePolicies(t *testing.T) { Plaintext: []policyv1alpha1.PlaintextOverrider{ { Path: "/metadata/annotations", - Operator: "add", + Operator: policyv1alpha1.OverriderOpAdd, Value: apiextensionsv1.JSON{Raw: []byte(`"aaa: bbb"`)}, }, }, @@ -276,7 +276,7 @@ func TestGetMatchingOverridePolicies(t *testing.T) { Plaintext: []policyv1alpha1.PlaintextOverrider{ { Path: "/metadata/annotations", - Operator: "add", + Operator: policyv1alpha1.OverriderOpAdd, Value: apiextensionsv1.JSON{Raw: []byte(`"hello: world"`)}, }, }, @@ -442,3 +442,242 @@ func TestGetMatchingOverridePolicies(t *testing.T) { }) } } + +func Test_overrideManagerImpl_ApplyFieldOverriderPolicies_YAML(t *testing.T) { + configmap := helper.NewConfigMap(metav1.NamespaceDefault, "test1", map[string]string{ + "test.yaml": ` +key: + key1: value +`, + }) + configmapObj, _ := utilhelper.ToUnstructured(configmap) + + type fields struct { + Client client.Client + EventRecorder record.EventRecorder + } + type args struct { + rawObj *unstructured.Unstructured + clusterName string + } + tests := []struct { + name string + fields fields + args args + wantCOP *AppliedOverrides + wantOP *AppliedOverrides + wantErr bool + }{ + { + name: "test yaml overridePolicies", + fields: fields{ + Client: fake.NewClientBuilder().WithScheme(gclient.NewSchema()).WithObjects(helper.NewCluster("test1"), + &policyv1alpha1.OverridePolicy{ + ObjectMeta: metav1.ObjectMeta{Name: "test1", Namespace: metav1.NamespaceDefault}, + Spec: policyv1alpha1.OverrideSpec{ + ResourceSelectors: []policyv1alpha1.ResourceSelector{ + { + APIVersion: "v1", + Kind: "ConfigMap", + Namespace: "default", + Name: "test1", + }, + }, + OverrideRules: []policyv1alpha1.RuleWithCluster{ + { + TargetCluster: &policyv1alpha1.ClusterAffinity{ClusterNames: []string{"test1"}}, + Overriders: policyv1alpha1.Overriders{ + FieldOverrider: []policyv1alpha1.FieldOverrider{ + { + FieldPath: "/data/test.yaml", + YAML: []policyv1alpha1.YAMLPatchOperation{ + { + SubPath: "/key/key1", + Operator: policyv1alpha1.OverriderOpReplace, + Value: apiextensionsv1.JSON{Raw: []byte(`"updated_value"`)}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + ).Build(), + EventRecorder: &record.FakeRecorder{}, + }, + args: args{ + rawObj: configmapObj, + clusterName: "test1", + }, + wantCOP: nil, + wantOP: &AppliedOverrides{ + AppliedItems: []OverridePolicyShadow{ + { + PolicyName: "test1", + Overriders: policyv1alpha1.Overriders{ + FieldOverrider: []policyv1alpha1.FieldOverrider{ + { + FieldPath: "/data/test.yaml", + YAML: []policyv1alpha1.YAMLPatchOperation{ + { + SubPath: "/key/key1", + Operator: policyv1alpha1.OverriderOpReplace, + Value: apiextensionsv1.JSON{Raw: []byte(`"updated_value"`)}, + }, + }, + }, + }, + }, + }, + }, + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + o := &overrideManagerImpl{ + Client: tt.fields.Client, + EventRecorder: tt.fields.EventRecorder, + } + gotCOP, gotOP, err := o.ApplyOverridePolicies(tt.args.rawObj, tt.args.clusterName) + if (err != nil) != tt.wantErr { + t.Errorf("ApplyOverridePolicies() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(gotCOP, tt.wantCOP) { + t.Errorf("ApplyOverridePolicies() gotCOP = %v, wantCOP %v", gotCOP, tt.wantCOP) + } + if !reflect.DeepEqual(gotOP, tt.wantOP) { + t.Errorf("ApplyOverridePolicies() gotOP = %v, wantOP %v", gotOP, tt.wantOP) + } + wantData := map[string]interface{}{ + "test.yaml": `key: + key1: updated_value +`, + } + if !reflect.DeepEqual(tt.args.rawObj.Object["data"], wantData) { + t.Errorf("ApplyOverridePolicies() gotData = %v, wantData %v", tt.args.rawObj.Object["data"], wantData) + } + }) + } +} + +func Test_overrideManagerImpl_ApplyJSONOverridePolicies_JSON(t *testing.T) { + configmap := helper.NewConfigMap(metav1.NamespaceDefault, "test1", map[string]string{ + "test.json": `{"key":{"key1":"value"}}`, + }) + configmapObj, _ := utilhelper.ToUnstructured(configmap) + + type fields struct { + Client client.Client + EventRecorder record.EventRecorder + } + type args struct { + rawObj *unstructured.Unstructured + clusterName string + } + tests := []struct { + name string + fields fields + args args + wantCOP *AppliedOverrides + wantOP *AppliedOverrides + wantErr bool + }{ + { + name: "test yaml overridePolicies", + fields: fields{ + Client: fake.NewClientBuilder().WithScheme(gclient.NewSchema()).WithObjects(helper.NewCluster("test1"), + &policyv1alpha1.OverridePolicy{ + ObjectMeta: metav1.ObjectMeta{Name: "test1", Namespace: metav1.NamespaceDefault}, + Spec: policyv1alpha1.OverrideSpec{ + ResourceSelectors: []policyv1alpha1.ResourceSelector{ + { + APIVersion: "v1", + Kind: "ConfigMap", + Namespace: "default", + Name: "test1", + }, + }, + OverrideRules: []policyv1alpha1.RuleWithCluster{ + { + TargetCluster: &policyv1alpha1.ClusterAffinity{ClusterNames: []string{"test1"}}, + Overriders: policyv1alpha1.Overriders{ + FieldOverrider: []policyv1alpha1.FieldOverrider{ + { + FieldPath: "/data/test.json", + JSON: []policyv1alpha1.JSONPatchOperation{ + { + SubPath: "/key/key1", + Operator: policyv1alpha1.OverriderOpReplace, + Value: apiextensionsv1.JSON{Raw: []byte(`"updated_value"`)}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + ).Build(), + EventRecorder: &record.FakeRecorder{}, + }, + args: args{ + rawObj: configmapObj, + clusterName: "test1", + }, + wantCOP: nil, + wantOP: &AppliedOverrides{ + AppliedItems: []OverridePolicyShadow{ + { + PolicyName: "test1", + Overriders: policyv1alpha1.Overriders{ + FieldOverrider: []policyv1alpha1.FieldOverrider{ + { + FieldPath: "/data/test.json", + JSON: []policyv1alpha1.JSONPatchOperation{ + { + SubPath: "/key/key1", + Operator: policyv1alpha1.OverriderOpReplace, + Value: apiextensionsv1.JSON{Raw: []byte(`"updated_value"`)}, + }, + }, + }, + }, + }, + }, + }, + }, + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + o := &overrideManagerImpl{ + Client: tt.fields.Client, + EventRecorder: tt.fields.EventRecorder, + } + gotCOP, gotOP, err := o.ApplyOverridePolicies(tt.args.rawObj, tt.args.clusterName) + if (err != nil) != tt.wantErr { + t.Errorf("ApplyOverridePolicies() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(gotCOP, tt.wantCOP) { + t.Errorf("ApplyOverridePolicies() gotCOP = %v, wantCOP %v", gotCOP, tt.wantCOP) + } + if !reflect.DeepEqual(gotOP, tt.wantOP) { + t.Errorf("ApplyOverridePolicies() gotOP = %v, wantOP %v", gotOP, tt.wantOP) + } + wantData := map[string]interface{}{ + "test.json": `{"key":{"key1":"updated_value"}}`, + } + if !reflect.DeepEqual(tt.args.rawObj.Object["data"], wantData) { + t.Errorf("ApplyOverridePolicies() gotData = %v, wantData %v", tt.args.rawObj.Object["data"], wantData) + } + }) + } +} diff --git a/pkg/util/policy_test.go b/pkg/util/policy_test.go new file mode 100644 index 000000000000..a924e4e2dedc --- /dev/null +++ b/pkg/util/policy_test.go @@ -0,0 +1,56 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" +) + +func TestIsLazyActivationEnabled(t *testing.T) { + tests := []struct { + name string + activationPreference policyv1alpha1.ActivationPreference + expected bool + }{ + { + name: "empty activation preference", + activationPreference: "", + expected: false, + }, + { + name: "lazy activation enabled", + activationPreference: policyv1alpha1.LazyActivation, + expected: true, + }, + { + name: "different activation preference", + activationPreference: "SomeOtherPreference", + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := IsLazyActivationEnabled(tt.activationPreference) + assert.Equal(t, tt.expected, result, "unexpected result for activation preference: %s", tt.activationPreference) + }) + } +} diff --git a/pkg/util/proxy/proxy.go b/pkg/util/proxy/proxy.go index 3e75108fb84b..97370de0fcc7 100644 --- a/pkg/util/proxy/proxy.go +++ b/pkg/util/proxy/proxy.go @@ -183,8 +183,9 @@ func constructLocation(cluster *clusterapis.Cluster) (*url.URL, error) { func createProxyTransport(cluster *clusterapis.Cluster, tlsConfig *tls.Config) (*http.Transport, error) { var proxyDialerFn utilnet.DialFunc trans := utilnet.SetTransportDefaults(&http.Transport{ - DialContext: proxyDialerFn, - TLSClientConfig: tlsConfig, + DialContext: proxyDialerFn, + TLSClientConfig: tlsConfig, + DisableKeepAlives: true, }) if proxyURL := cluster.Spec.ProxyURL; proxyURL != "" { diff --git a/pkg/util/round_trippers_test.go b/pkg/util/round_trippers_test.go new file mode 100644 index 000000000000..e28ff85c1c3b --- /dev/null +++ b/pkg/util/round_trippers_test.go @@ -0,0 +1,242 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" + "k8s.io/client-go/transport" +) + +func TestNewProxyHeaderRoundTripperWrapperConstructor(t *testing.T) { + tests := []struct { + name string + wrapperFunc transport.WrapperFunc + headers map[string]string + expectedEmpty bool + expectedCount int + expectedHeader string + expectedValues []string + }{ + { + name: "nil wrapper with empty headers", + wrapperFunc: nil, + headers: nil, + expectedEmpty: true, + }, + { + name: "nil wrapper with single header", + wrapperFunc: nil, + headers: map[string]string{ + "Proxy-Authorization": "Basic xyz", + }, + expectedCount: 1, + expectedHeader: "Proxy-Authorization", + expectedValues: []string{"Basic xyz"}, + }, + { + name: "nil wrapper with multiple comma-separated values", + wrapperFunc: nil, + headers: map[string]string{ + "X-Custom-Header": "value1,value2,value3", + }, + expectedCount: 1, + expectedHeader: "X-Custom-Header", + expectedValues: []string{"value1", "value2", "value3"}, + }, + { + name: "with wrapper func", + wrapperFunc: func(rt http.RoundTripper) http.RoundTripper { + return rt + }, + headers: map[string]string{ + "Proxy-Authorization": "Basic abc", + }, + expectedCount: 1, + expectedHeader: "Proxy-Authorization", + expectedValues: []string{"Basic abc"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + wrapper := NewProxyHeaderRoundTripperWrapperConstructor(tt.wrapperFunc, tt.headers) + assert.NotNil(t, wrapper, "wrapper should not be nil") + + mockRT := &mockRoundTripper{} + rt := wrapper(mockRT) + phrt, ok := rt.(*proxyHeaderRoundTripper) + assert.True(t, ok, "should be able to cast to proxyHeaderRoundTripper") + + if tt.expectedEmpty { + assert.Empty(t, phrt.proxyHeaders, "proxy headers should be empty") + return + } + + assert.Equal(t, tt.expectedCount, len(phrt.proxyHeaders), "should have expected number of headers") + assert.Equal(t, tt.expectedValues, phrt.proxyHeaders[tt.expectedHeader], "should have expected header values") + }) + } +} + +func TestRoundTrip(t *testing.T) { + tests := []struct { + name string + roundTripper http.RoundTripper + headers map[string]string + expectedError bool + expectedStatus int + }{ + { + name: "with http transport", + roundTripper: &http.Transport{ + ProxyConnectHeader: make(http.Header), + }, + headers: map[string]string{ + "Proxy-Authorization": "Basic xyz", + }, + expectedStatus: http.StatusOK, + }, + { + name: "with custom round tripper", + roundTripper: &mockRoundTripper{ + response: &http.Response{ + StatusCode: http.StatusOK, + }, + }, + headers: map[string]string{ + "Custom-Header": "value", + }, + expectedStatus: http.StatusOK, + }, + { + name: "with error", + roundTripper: &mockRoundTripper{ + err: assert.AnError, + }, + expectedError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + phrt := &proxyHeaderRoundTripper{ + proxyHeaders: parseProxyHeaders(tt.headers), + roundTripper: tt.roundTripper, + } + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusOK) + })) + defer server.Close() + + req, err := http.NewRequest(http.MethodGet, server.URL, nil) + assert.NoError(t, err, "should create request without error") + + resp, err := phrt.RoundTrip(req) + + if tt.expectedError { + assert.Error(t, err, "should return error") + assert.Nil(t, resp, "response should be nil") + return + } + + assert.NoError(t, err, "should not return error") + assert.NotNil(t, resp, "response should not be nil") + assert.Equal(t, tt.expectedStatus, resp.StatusCode, "should have expected status code") + }) + } +} + +func TestParseProxyHeaders(t *testing.T) { + tests := []struct { + name string + headers map[string]string + expectedEmpty bool + expectedCount int + expectedHeader string + expectedValues []string + }{ + { + name: "nil headers", + headers: nil, + expectedEmpty: true, + }, + { + name: "empty headers", + headers: map[string]string{}, + expectedEmpty: true, + }, + { + name: "single header", + headers: map[string]string{ + "proxy-authorization": "Basic xyz", + }, + expectedCount: 1, + expectedHeader: "Proxy-Authorization", + expectedValues: []string{"Basic xyz"}, + }, + { + name: "multiple comma-separated values", + headers: map[string]string{ + "x-custom-header": "value1,value2,value3", + }, + expectedCount: 1, + expectedHeader: "X-Custom-Header", + expectedValues: []string{"value1", "value2", "value3"}, + }, + { + name: "multiple headers", + headers: map[string]string{ + "proxy-authorization": "Basic xyz", + "x-custom-header": "value1,value2", + }, + expectedCount: 2, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := parseProxyHeaders(tt.headers) + + if tt.expectedEmpty { + assert.Nil(t, result, "headers should be nil") + return + } + + assert.Equal(t, tt.expectedCount, len(result), "should have expected number of headers") + + if tt.expectedHeader != "" { + assert.Equal(t, tt.expectedValues, result[tt.expectedHeader], "should have expected header values") + } + }) + } +} + +// Mock Implementations + +type mockRoundTripper struct { + response *http.Response + err error +} + +func (m *mockRoundTripper) RoundTrip(_ *http.Request) (*http.Response, error) { + return m.response, m.err +} diff --git a/pkg/util/secret.go b/pkg/util/secret.go index ab05bccfeff8..c4b93dc417e1 100644 --- a/pkg/util/secret.go +++ b/pkg/util/secret.go @@ -60,3 +60,12 @@ func PatchSecret(client kubeclient.Interface, namespace, name string, pt types.P } return nil } + +// DeleteSecret just try to delete the Secret. +func DeleteSecret(client kubeclient.Interface, namespace, name string) error { + err := client.CoreV1().Secrets(namespace).Delete(context.Background(), name, metav1.DeleteOptions{}) + if err != nil && !apierrors.IsNotFound(err) { + return err + } + return nil +} diff --git a/pkg/util/validation/validation.go b/pkg/util/validation/validation.go index 084cae0237c4..0fdb9861d6a8 100644 --- a/pkg/util/validation/validation.go +++ b/pkg/util/validation/validation.go @@ -18,9 +18,10 @@ package validation import ( "fmt" - "strings" + "github.com/go-openapi/jsonpointer" corev1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apivalidation "k8s.io/apimachinery/pkg/api/validation" metav1validation "k8s.io/apimachinery/pkg/apis/meta/v1/validation" "k8s.io/apimachinery/pkg/util/validation" @@ -31,9 +32,6 @@ import ( "github.com/karmada-io/karmada/pkg/util" ) -// LabelValueMaxLength is a label's max length -const LabelValueMaxLength int = 63 - // ValidatePropagationSpec validates a PropagationSpec before creation or update. func ValidatePropagationSpec(spec policyv1alpha1.PropagationSpec) field.ErrorList { var allErrs field.ErrorList @@ -307,9 +305,27 @@ func ValidateOverrideRules(overrideRules []policyv1alpha1.RuleWithCluster, fldPa // validates predicate path. for imageIndex, image := range rule.Overriders.ImageOverrider { imagePath := rulePath.Child("overriders").Child("imageOverrider").Index(imageIndex) - if image.Predicate != nil && !strings.HasPrefix(image.Predicate.Path, "/") { - allErrs = append(allErrs, field.Invalid(imagePath.Child("predicate").Child("path"), image.Predicate.Path, "path should be start with / character")) + if image.Predicate != nil { + if _, err := jsonpointer.New(image.Predicate.Path); err != nil { + allErrs = append(allErrs, field.Invalid(imagePath.Child("predicate").Child("path"), image.Predicate.Path, err.Error())) + } + } + } + + for fieldIndex, fieldOverrider := range rule.Overriders.FieldOverrider { + fieldPath := rulePath.Child("overriders").Child("fieldOverrider").Index(fieldIndex) + // validates that either YAML or JSON is selected for each field overrider. + if len(fieldOverrider.YAML) > 0 && len(fieldOverrider.JSON) > 0 { + allErrs = append(allErrs, field.Invalid(fieldPath, fieldOverrider, "FieldOverrider has both YAML and JSON set. Only one is allowed")) + } + // validates the field path. + if _, err := jsonpointer.New(fieldOverrider.FieldPath); err != nil { + allErrs = append(allErrs, field.Invalid(fieldPath.Child("fieldPath"), fieldOverrider.FieldPath, err.Error())) } + // validates the JSON patch operations sub path. + allErrs = append(allErrs, validateJSONPatchSubPaths(fieldOverrider.JSON, fieldPath.Child("json"))...) + // validates the YAML patch operations sub path. + allErrs = append(allErrs, validateYAMLPatchSubPaths(fieldOverrider.YAML, fieldPath.Child("yaml"))...) } // validates the targetCluster. @@ -317,3 +333,42 @@ func ValidateOverrideRules(overrideRules []policyv1alpha1.RuleWithCluster, fldPa } return allErrs } + +func validateJSONPatchSubPaths(patches []policyv1alpha1.JSONPatchOperation, fieldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + for index, patch := range patches { + patchPath := fieldPath.Index(index) + if _, err := jsonpointer.New(patch.SubPath); err != nil { + allErrs = append(allErrs, field.Invalid(patchPath.Child("subPath"), patch.SubPath, err.Error())) + } + allErrs = append(allErrs, validateOverrideOperator(patch.Operator, patch.Value, patchPath.Child("value"))...) + } + return allErrs +} + +func validateYAMLPatchSubPaths(patches []policyv1alpha1.YAMLPatchOperation, fieldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + for index, patch := range patches { + patchPath := fieldPath.Index(index) + if _, err := jsonpointer.New(patch.SubPath); err != nil { + allErrs = append(allErrs, field.Invalid(patchPath.Child("subPath"), patch.SubPath, err.Error())) + } + allErrs = append(allErrs, validateOverrideOperator(patch.Operator, patch.Value, patchPath.Child("value"))...) + } + return allErrs +} + +func validateOverrideOperator(operator policyv1alpha1.OverriderOperator, value apiextensionsv1.JSON, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + switch operator { + case policyv1alpha1.OverriderOpAdd, policyv1alpha1.OverriderOpReplace: + if value.Size() == 0 { + allErrs = append(allErrs, field.Invalid(fldPath, value, "value is required for add or replace operation")) + } + case policyv1alpha1.OverriderOpRemove: + if value.Size() != 0 { + allErrs = append(allErrs, field.Invalid(fldPath, value, "value is not allowed for remove operation")) + } + } + return allErrs +} diff --git a/pkg/util/validation/validation_test.go b/pkg/util/validation/validation_test.go index f5aec8aec3f6..b1046e4ac458 100644 --- a/pkg/util/validation/validation_test.go +++ b/pkg/util/validation/validation_test.go @@ -133,7 +133,7 @@ func TestValidateOverrideSpec(t *testing.T) { Overriders: policyv1alpha1.Overriders{ AnnotationsOverrider: []policyv1alpha1.LabelAnnotationOverrider{ { - Operator: "add", + Operator: policyv1alpha1.OverriderOpAdd, Value: map[string]string{"testannotation~projectId": "c-m-lfx9lk92p-v86cf"}, }, }, @@ -154,7 +154,7 @@ func TestValidateOverrideSpec(t *testing.T) { Overriders: policyv1alpha1.Overriders{ LabelsOverrider: []policyv1alpha1.LabelAnnotationOverrider{ { - Operator: "add", + Operator: policyv1alpha1.OverriderOpAdd, Value: map[string]string{"testannotation~projectId": "c-m-lfx9lk92p-v86cf"}, }, }, @@ -277,14 +277,14 @@ func TestEmptyOverrides(t *testing.T) { ImageOverrider: []policyv1alpha1.ImageOverrider{ { Component: "Registry", - Operator: "remove", + Operator: policyv1alpha1.OverriderOpRemove, Value: "fictional.registry.us", }, }, CommandOverrider: []policyv1alpha1.CommandArgsOverrider{ { ContainerName: "nginx", - Operator: "add", + Operator: policyv1alpha1.OverriderOpAdd, Value: []string{"echo 'hello karmada'"}, }, }, diff --git a/pkg/webhook/clusteroverridepolicy/validating_test.go b/pkg/webhook/clusteroverridepolicy/validating_test.go new file mode 100644 index 000000000000..d0e96b869e3f --- /dev/null +++ b/pkg/webhook/clusteroverridepolicy/validating_test.go @@ -0,0 +1,196 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clusteroverridepolicy + +import ( + "context" + "errors" + "net/http" + "reflect" + "strings" + "testing" + + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" +) + +// ResponseType represents the type of admission response. +type ResponseType string + +const ( + Denied ResponseType = "Denied" + Allowed ResponseType = "Allowed" + Errored ResponseType = "Errored" +) + +// TestResponse is used to define expected response in a test case. +type TestResponse struct { + Type ResponseType + Message string +} + +type fakeValidationDecoder struct { + err error + obj runtime.Object +} + +// Decode mocks the Decode method of admission.Decoder. +func (f *fakeValidationDecoder) Decode(_ admission.Request, obj runtime.Object) error { + if f.err != nil { + return f.err + } + if f.obj != nil { + reflect.ValueOf(obj).Elem().Set(reflect.ValueOf(f.obj).Elem()) + } + return nil +} + +// DecodeRaw mocks the DecodeRaw method of admission.Decoder. +func (f *fakeValidationDecoder) DecodeRaw(_ runtime.RawExtension, obj runtime.Object) error { + if f.err != nil { + return f.err + } + if f.obj != nil { + reflect.ValueOf(obj).Elem().Set(reflect.ValueOf(f.obj).Elem()) + } + return nil +} + +func TestValidatingAdmission_Handle(t *testing.T) { + tests := []struct { + name string + decoder admission.Decoder + req admission.Request + want TestResponse + }{ + { + name: "Handle_DecodeError_DeniesAdmission", + decoder: &fakeValidationDecoder{ + err: errors.New("decode error"), + }, + req: admission.Request{}, + want: TestResponse{ + Type: Errored, + Message: "decode error", + }, + }, + { + name: "Handle_ValidationClusterOverrideSpecFails_DeniesAdmission", + decoder: &fakeValidationDecoder{ + obj: &policyv1alpha1.ClusterOverridePolicy{ + Spec: policyv1alpha1.OverrideSpec{ + ResourceSelectors: []policyv1alpha1.ResourceSelector{ + {APIVersion: "test-apiversion", Kind: "test"}, + }, + OverrideRules: []policyv1alpha1.RuleWithCluster{ + { + TargetCluster: &policyv1alpha1.ClusterAffinity{ + ClusterNames: []string{"member1"}, + }, + Overriders: policyv1alpha1.Overriders{ + LabelsOverrider: []policyv1alpha1.LabelAnnotationOverrider{ + { + Operator: policyv1alpha1.OverriderOpAdd, + Value: map[string]string{"testannotation~projectId": "c-m-lfx9lk92p-v86cf"}, + }, + }, + }, + }, + }, + }, + }, + }, + req: admission.Request{}, + want: TestResponse{ + Type: Denied, + Message: "Invalid value: \"testannotation~projectId\"", + }, + }, + { + name: "Handle_ValidationSucceeds_AllowsAdmission", + decoder: &fakeValidationDecoder{ + obj: &policyv1alpha1.ClusterOverridePolicy{ + Spec: policyv1alpha1.OverrideSpec{ + ResourceSelectors: []policyv1alpha1.ResourceSelector{ + {APIVersion: "test-apiversion", Kind: "test"}, + }, + OverrideRules: []policyv1alpha1.RuleWithCluster{ + { + TargetCluster: &policyv1alpha1.ClusterAffinity{ + ClusterNames: []string{"member1"}, + }, + Overriders: policyv1alpha1.Overriders{ + Plaintext: []policyv1alpha1.PlaintextOverrider{ + { + Path: "/spec/optional", + Operator: policyv1alpha1.OverriderOpRemove, + }, + }, + }, + }, + }, + }, + }, + }, + req: admission.Request{}, + want: TestResponse{ + Type: Allowed, + Message: "", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + v := &ValidatingAdmission{ + Decoder: tt.decoder, + } + got := v.Handle(context.Background(), tt.req) + + // Extract type and message from the actual response. + gotType := extractResponseType(got) + gotMessage := extractErrorMessage(got) + + if gotType != tt.want.Type || !strings.Contains(gotMessage, tt.want.Message) { + t.Errorf("Handle() = {Type: %v, Message: %v}, want {Type: %v, Message: %v}", gotType, gotMessage, tt.want.Type, tt.want.Message) + } + }) + } +} + +// extractResponseType extracts the type of admission response. +func extractResponseType(resp admission.Response) ResponseType { + if resp.Allowed { + return Allowed + } + if resp.Result != nil { + if resp.Result.Code == http.StatusBadRequest { + return Errored + } + } + return Denied +} + +// extractErrorMessage extracts the error message from a Denied/Errored response. +func extractErrorMessage(resp admission.Response) string { + if !resp.Allowed && resp.Result != nil { + return resp.Result.Message + } + return "" +} diff --git a/pkg/webhook/clusterpropagationpolicy/mutating.go b/pkg/webhook/clusterpropagationpolicy/mutating.go index 2905637d1e3b..cc8e33501416 100644 --- a/pkg/webhook/clusterpropagationpolicy/mutating.go +++ b/pkg/webhook/clusterpropagationpolicy/mutating.go @@ -19,18 +19,17 @@ package clusterpropagationpolicy import ( "context" "encoding/json" - "fmt" "net/http" "github.com/google/uuid" admissionv1 "k8s.io/api/admission/v1" + "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" "github.com/karmada-io/karmada/pkg/util" "github.com/karmada-io/karmada/pkg/util/helper" - "github.com/karmada-io/karmada/pkg/util/validation" ) // MutatingAdmission mutates API request if necessary. @@ -61,16 +60,13 @@ func (a *MutatingAdmission) Handle(_ context.Context, req admission.Request) adm if err != nil { return admission.Errored(http.StatusBadRequest, err) } + klog.V(2).Infof("Mutating ClusterPropagationPolicy(%s/%s) for request: %s", req.Namespace, policy.Name, req.Operation) // Set default spread constraints if both 'SpreadByField' and 'SpreadByLabel' not set. helper.SetDefaultSpreadConstraints(policy.Spec.Placement.SpreadConstraints) helper.AddTolerations(&policy.Spec.Placement, helper.NewNotReadyToleration(a.DefaultNotReadyTolerationSeconds), helper.NewUnreachableToleration(a.DefaultUnreachableTolerationSeconds)) - if len(policy.Name) > validation.LabelValueMaxLength { - return admission.Errored(http.StatusBadRequest, fmt.Errorf("ClusterPropagationPolicy's name should be no more than %d characters", validation.LabelValueMaxLength)) - } - if helper.ContainsServiceImport(policy.Spec.ResourceSelectors) { policy.Spec.PropagateDeps = true } diff --git a/pkg/webhook/clusterpropagationpolicy/mutating_test.go b/pkg/webhook/clusterpropagationpolicy/mutating_test.go new file mode 100644 index 000000000000..f07a7053acf9 --- /dev/null +++ b/pkg/webhook/clusterpropagationpolicy/mutating_test.go @@ -0,0 +1,222 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clusterpropagationpolicy + +import ( + "context" + "encoding/json" + "errors" + "net/http" + "reflect" + "testing" + + admissionv1 "k8s.io/api/admission/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + mcsv1alpha1 "sigs.k8s.io/mcs-api/pkg/apis/v1alpha1" + + policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" + "github.com/karmada-io/karmada/pkg/util" + "github.com/karmada-io/karmada/pkg/util/helper" +) + +var ( + notReadyTolerationSeconds int64 = 300 + unreachableTolerationSeconds int64 = 300 + failOverGracePeriodSeconds int32 = 600 +) + +type fakeMutationDecoder struct { + err error + obj runtime.Object +} + +// Decode mocks the Decode method of admission.Decoder. +func (f *fakeMutationDecoder) Decode(_ admission.Request, obj runtime.Object) error { + if f.err != nil { + return f.err + } + if f.obj != nil { + reflect.ValueOf(obj).Elem().Set(reflect.ValueOf(f.obj).Elem()) + } + return nil +} + +// DecodeRaw mocks the DecodeRaw method of admission.Decoder. +func (f *fakeMutationDecoder) DecodeRaw(_ runtime.RawExtension, obj runtime.Object) error { + if f.err != nil { + return f.err + } + if f.obj != nil { + reflect.ValueOf(obj).Elem().Set(reflect.ValueOf(f.obj).Elem()) + } + return nil +} + +func TestMutatingAdmission_Handle(t *testing.T) { + tests := []struct { + name string + decoder admission.Decoder + req admission.Request + want admission.Response + }{ + { + name: "Handle_DecodeError_DeniesAdmission", + decoder: &fakeMutationDecoder{ + err: errors.New("decode error"), + }, + req: admission.Request{}, + want: admission.Errored(http.StatusBadRequest, errors.New("decode error")), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + v := NewMutatingHandler( + notReadyTolerationSeconds, unreachableTolerationSeconds, tt.decoder, + ) + got := v.Handle(context.Background(), tt.req) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("Handle() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestMutatingAdmission_Handle_FullCoverage(t *testing.T) { + // Define the cp policy name to be used in the test. + policyName := "test-cp-policy" + + // Mock admission request with no specific namespace. + req := admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Create, + }, + } + + // Create the initial cp policy with default values for testing. + cpPolicy := &policyv1alpha1.ClusterPropagationPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: policyName, + }, + Spec: policyv1alpha1.PropagationSpec{ + Placement: policyv1alpha1.Placement{ + SpreadConstraints: []policyv1alpha1.SpreadConstraint{ + {SpreadByLabel: "", SpreadByField: "", MinGroups: 0}, + }, + ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{ + ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDivided, + }, + }, + PropagateDeps: false, + ResourceSelectors: []policyv1alpha1.ResourceSelector{ + { + Kind: util.ServiceImportKind, + APIVersion: mcsv1alpha1.GroupVersion.String(), + }, + }, + Failover: &policyv1alpha1.FailoverBehavior{ + Application: &policyv1alpha1.ApplicationFailoverBehavior{ + PurgeMode: policyv1alpha1.Graciously, + GracePeriodSeconds: nil, + }, + }, + }, + } + + // Define the expected cp policy after mutations. + wantCPPolicy := &policyv1alpha1.ClusterPropagationPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: policyName, + Labels: map[string]string{ + policyv1alpha1.ClusterPropagationPolicyPermanentIDLabel: "some-unique-uuid", + }, + Finalizers: []string{util.ClusterPropagationPolicyControllerFinalizer}, + }, + Spec: policyv1alpha1.PropagationSpec{ + Placement: policyv1alpha1.Placement{ + SpreadConstraints: []policyv1alpha1.SpreadConstraint{ + { + SpreadByField: policyv1alpha1.SpreadByFieldCluster, + MinGroups: 1, + }, + }, + ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{ + ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDivided, + ReplicaDivisionPreference: policyv1alpha1.ReplicaDivisionPreferenceWeighted, + }, + ClusterTolerations: []corev1.Toleration{ + *helper.NewNotReadyToleration(notReadyTolerationSeconds), + *helper.NewUnreachableToleration(unreachableTolerationSeconds), + }, + }, + PropagateDeps: true, + ResourceSelectors: []policyv1alpha1.ResourceSelector{ + { + Kind: util.ServiceImportKind, + APIVersion: mcsv1alpha1.GroupVersion.String(), + }, + }, + Failover: &policyv1alpha1.FailoverBehavior{ + Application: &policyv1alpha1.ApplicationFailoverBehavior{ + PurgeMode: policyv1alpha1.Graciously, + GracePeriodSeconds: ptr.To[int32](failOverGracePeriodSeconds), + }, + }, + }, + } + + // Mock decoder that decodes the request into the cp policy object. + decoder := &fakeMutationDecoder{ + obj: cpPolicy, + } + + // Marshal the expected cp policy to simulate the final mutated object. + wantBytes, err := json.Marshal(wantCPPolicy) + if err != nil { + t.Fatalf("Failed to marshal expected cp policy: %v", err) + } + req.Object.Raw = wantBytes + + // Instantiate the mutating handler. + mutatingHandler := NewMutatingHandler( + notReadyTolerationSeconds, unreachableTolerationSeconds, decoder, + ) + + // Call the Handle function. + got := mutatingHandler.Handle(context.Background(), req) + + // Check if exactly one patch is applied. + if len(got.Patches) != 1 { + t.Errorf("Handle() returned an unexpected number of patches. Expected one patch, received: %v", got.Patches) + } + + // Verify that the only patch applied is for the UUID label. + // If any other patches are present, it indicates that the cp policy was not handled as expected. + firstPatch := got.Patches[0] + if firstPatch.Operation != "replace" || firstPatch.Path != "/metadata/labels/clusterpropagationpolicy.karmada.io~1permanent-id" { + t.Errorf("Handle() returned unexpected patches. Only the UUID patch was expected. Received patches: %v", got.Patches) + } + + // Check if the admission request was allowed. + if !got.Allowed { + t.Errorf("Handle() got.Allowed = false, want true") + } +} diff --git a/pkg/webhook/clusterpropagationpolicy/validating_test.go b/pkg/webhook/clusterpropagationpolicy/validating_test.go new file mode 100644 index 000000000000..6cf729db1bbf --- /dev/null +++ b/pkg/webhook/clusterpropagationpolicy/validating_test.go @@ -0,0 +1,180 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clusterpropagationpolicy + +import ( + "context" + "errors" + "fmt" + "net/http" + "reflect" + "testing" + + admissionv1 "k8s.io/api/admission/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" + "github.com/karmada-io/karmada/pkg/scheduler" +) + +type fakeValidationDecoder struct { + err error + obj runtime.Object +} + +// Decode mocks the Decode method of admission.Decoder. +func (f *fakeValidationDecoder) Decode(_ admission.Request, obj runtime.Object) error { + if f.err != nil { + return f.err + } + if f.obj != nil { + reflect.ValueOf(obj).Elem().Set(reflect.ValueOf(f.obj).Elem()) + } + return nil +} + +// DecodeRaw mocks the DecodeRaw method of admission.Decoder. +func (f *fakeValidationDecoder) DecodeRaw(rawObject runtime.RawExtension, obj runtime.Object) error { + if f.err != nil { + return f.err + } + if f.obj != nil { + reflect.ValueOf(obj).Elem().Set(reflect.ValueOf(rawObject.Object).Elem()) + } + return nil +} + +func TestValidatingAdmission_Handle(t *testing.T) { + tests := []struct { + name string + decoder admission.Decoder + req admission.Request + want admission.Response + }{ + { + name: "Handle_DecodeError_DeniesAdmission", + decoder: &fakeValidationDecoder{ + err: errors.New("decode error"), + }, + req: admission.Request{}, + want: admission.Errored(http.StatusBadRequest, errors.New("decode error")), + }, + { + name: "Handle_SchedulerNameUpdated_DeniesAdmission", + decoder: &fakeValidationDecoder{ + obj: &policyv1alpha1.ClusterPropagationPolicy{ + Spec: policyv1alpha1.PropagationSpec{ + SchedulerName: "new-scheduler", + }, + }, + }, + req: admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Update, + OldObject: runtime.RawExtension{ + Object: &policyv1alpha1.ClusterPropagationPolicy{ + Spec: policyv1alpha1.PropagationSpec{ + SchedulerName: scheduler.DefaultScheduler, + }, + }, + }, + }, + }, + want: admission.Denied("the schedulerName should not be updated"), + }, + { + name: "Handle_PermanentIDLabelUpdated_DeniesAdmission", + decoder: &fakeValidationDecoder{ + obj: &policyv1alpha1.ClusterPropagationPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + policyv1alpha1.ClusterPropagationPolicyPermanentIDLabel: "new-id", + }, + }, + }, + }, + req: admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Update, + OldObject: runtime.RawExtension{ + Object: &policyv1alpha1.ClusterPropagationPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + policyv1alpha1.ClusterPropagationPolicyPermanentIDLabel: "old-id", + }, + }, + }, + }, + }, + }, + want: admission.Denied(fmt.Sprintf("label %s is immutable, it can only be set by the system "+ + "during creation", policyv1alpha1.ClusterPropagationPolicyPermanentIDLabel)), + }, + { + name: "Handle_PermanentIDLabelMissing_DeniesAdmission", + decoder: &fakeValidationDecoder{ + obj: &policyv1alpha1.ClusterPropagationPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{}, + }, + }, + }, + req: admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Create, + }, + }, + want: admission.Denied(fmt.Sprintf("label %s is required, it should be set by the mutating "+ + "admission webhook during creation", policyv1alpha1.ClusterPropagationPolicyPermanentIDLabel)), + }, + { + name: "Handle_ValidationSucceeds_AllowsAdmission", + decoder: &fakeValidationDecoder{ + obj: &policyv1alpha1.ClusterPropagationPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + policyv1alpha1.ClusterPropagationPolicyPermanentIDLabel: "new-id", + }, + }, + Spec: policyv1alpha1.PropagationSpec{ + SchedulerName: scheduler.DefaultScheduler, + }, + }, + }, + req: admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Create, + }, + }, + want: admission.Allowed(""), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + v := &ValidatingAdmission{ + Decoder: tt.decoder, + } + got := v.Handle(context.Background(), tt.req) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("Handle() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/pkg/webhook/clusterresourcebinding/mutating_test.go b/pkg/webhook/clusterresourcebinding/mutating_test.go new file mode 100644 index 000000000000..c94403972d23 --- /dev/null +++ b/pkg/webhook/clusterresourcebinding/mutating_test.go @@ -0,0 +1,183 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clusterresourcebinding + +import ( + "context" + "encoding/json" + "errors" + "net/http" + "reflect" + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2" +) + +type fakeMutationDecoder struct { + err error + obj runtime.Object +} + +// Decode mocks the Decode method of admission.Decoder. +func (f *fakeMutationDecoder) Decode(_ admission.Request, obj runtime.Object) error { + if f.err != nil { + return f.err + } + if f.obj != nil { + reflect.ValueOf(obj).Elem().Set(reflect.ValueOf(f.obj).Elem()) + } + return nil +} + +// DecodeRaw mocks the DecodeRaw method of admission.Decoder. +func (f *fakeMutationDecoder) DecodeRaw(_ runtime.RawExtension, obj runtime.Object) error { + if f.err != nil { + return f.err + } + if f.obj != nil { + reflect.ValueOf(obj).Elem().Set(reflect.ValueOf(f.obj).Elem()) + } + return nil +} + +func TestMutatingAdmission_Handle(t *testing.T) { + tests := []struct { + name string + decoder admission.Decoder + req admission.Request + want admission.Response + }{ + { + name: "Handle_DecodeError_DeniesAdmission", + decoder: &fakeMutationDecoder{ + err: errors.New("decode error"), + }, + req: admission.Request{}, + want: admission.Errored(http.StatusBadRequest, errors.New("decode error")), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m := MutatingAdmission{ + Decoder: tt.decoder, + } + got := m.Handle(context.Background(), tt.req) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("Handle() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestMutatingAdmission_Handle_FullCoverage(t *testing.T) { + // Define the crb object name and namespace to be used in the test. + name := "test-cluster-resource-binding" + namespace := "test-namespace" + podName := "test-pod" + + // Mock an admission request. + req := admission.Request{} + + // Create the initial crb object with default values for testing. + crb := &workv1alpha2.ClusterResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "v1", + Kind: "Pod", + Namespace: namespace, + Name: podName, + }, + Clusters: []workv1alpha2.TargetCluster{ + { + Name: "member1", + Replicas: 1, + }, + }, + }, + } + + // Define the expected crb object after mutations. + wantCRB := &workv1alpha2.ClusterResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: map[string]string{ + workv1alpha2.ClusterResourceBindingPermanentIDLabel: "some-unique-uuid", + }, + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "v1", + Kind: "Pod", + Namespace: namespace, + Name: podName, + }, + Clusters: []workv1alpha2.TargetCluster{ + { + Name: "member1", + Replicas: 1, + }, + }, + }, + } + + // Mock decoder that decodes the request into the crb object. + decoder := &fakeMutationDecoder{ + obj: crb, + } + + // Marshal the expected crb object to simulate the final mutated object. + wantBytes, err := json.Marshal(wantCRB) + if err != nil { + t.Fatalf("Failed to marshal expected crb object: %v", err) + } + req.Object.Raw = wantBytes + + // Instantiate the mutating handler. + mutatingHandler := MutatingAdmission{ + Decoder: decoder, + } + + // Call the Handle function. + got := mutatingHandler.Handle(context.Background(), req) + + // Check if exactly one patch is applied. + if len(got.Patches) != 1 { + t.Errorf("Handle() returned an unexpected number of patches. Expected one patch, received: %v", got.Patches) + } + + // Verify that the patches are for the UUID label. + // If any other patches are present, it indicates that the crb object was not handled as expected. + firstPatch := got.Patches[0] + if firstPatch.Operation != "replace" || firstPatch.Path != "/metadata/labels/clusterresourcebinding.karmada.io~1permanent-id" { + t.Errorf("Handle() returned unexpected patches. Only the UUID patch was expected. Received patches: %v", got.Patches) + } + + // Check if the admission request was allowed. + if !got.Allowed { + t.Errorf("Handle() got.Allowed = false, want true") + } +} diff --git a/pkg/webhook/federatedhpa/mutating_test.go b/pkg/webhook/federatedhpa/mutating_test.go new file mode 100644 index 000000000000..b43d5a0487a6 --- /dev/null +++ b/pkg/webhook/federatedhpa/mutating_test.go @@ -0,0 +1,214 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package federatedhpa + +import ( + "context" + "encoding/json" + "errors" + "net/http" + "reflect" + "testing" + + admissionv1 "k8s.io/api/admission/v1" + autoscalingv2 "k8s.io/api/autoscaling/v2" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + autoscalingv1alpha1 "github.com/karmada-io/karmada/pkg/apis/autoscaling/v1alpha1" + "github.com/karmada-io/karmada/pkg/util/lifted" +) + +type fakeMutationDecoder struct { + err error + obj runtime.Object +} + +// Decode mocks the Decode method of admission.Decoder. +func (f *fakeMutationDecoder) Decode(_ admission.Request, obj runtime.Object) error { + if f.err != nil { + return f.err + } + if f.obj != nil { + reflect.ValueOf(obj).Elem().Set(reflect.ValueOf(f.obj).Elem()) + } + return nil +} + +// DecodeRaw mocks the DecodeRaw method of admission.Decoder. +func (f *fakeMutationDecoder) DecodeRaw(_ runtime.RawExtension, obj runtime.Object) error { + if f.err != nil { + return f.err + } + if f.obj != nil { + reflect.ValueOf(obj).Elem().Set(reflect.ValueOf(f.obj).Elem()) + } + return nil +} + +func TestMutatingAdmission_Handle(t *testing.T) { + tests := []struct { + name string + decoder admission.Decoder + req admission.Request + want admission.Response + }{ + { + name: "Handle_DecodeError_DeniesAdmission", + decoder: &fakeValidationDecoder{ + err: errors.New("decode error"), + }, + req: admission.Request{}, + want: admission.Errored(http.StatusBadRequest, errors.New("decode error")), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m := MutatingAdmission{ + Decoder: tt.decoder, + } + got := m.Handle(context.Background(), tt.req) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("Handle() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestMutatingAdmission_Handle_FullCoverage(t *testing.T) { + // Define the federatedhpa object name and namespace to be used in the test. + name := "test-app" + namespace := "test-namespace" + + // Mock an admission request request. + req := admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Create, + }, + } + + // Create the initial federatedhpa with default values for testing. + federatedHPAObj := &autoscalingv1alpha1.FederatedHPA{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: autoscalingv1alpha1.FederatedHPASpec{ + ScaleTargetRef: autoscalingv2.CrossVersionObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: name, + }, + MinReplicas: nil, + MaxReplicas: 10, + Behavior: &autoscalingv2.HorizontalPodAutoscalerBehavior{}, + Metrics: []autoscalingv2.MetricSpec{}, + }, + } + + // Define the expected federatedhpa object after mutations. + wantFederatedHPAObj := &autoscalingv1alpha1.FederatedHPA{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: autoscalingv1alpha1.FederatedHPASpec{ + ScaleTargetRef: autoscalingv2.CrossVersionObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: name, + }, + MinReplicas: ptr.To[int32](1), + MaxReplicas: 10, + Behavior: &autoscalingv2.HorizontalPodAutoscalerBehavior{ + ScaleUp: &autoscalingv2.HPAScalingRules{ + StabilizationWindowSeconds: ptr.To[int32](0), + SelectPolicy: ptr.To[autoscalingv2.ScalingPolicySelect](autoscalingv2.MaxChangePolicySelect), + Policies: []autoscalingv2.HPAScalingPolicy{ + { + Type: autoscalingv2.PodsScalingPolicy, + Value: 4, + PeriodSeconds: 15, + }, + { + Type: autoscalingv2.PercentScalingPolicy, + Value: 100, + PeriodSeconds: 15, + }, + }, + }, + ScaleDown: &autoscalingv2.HPAScalingRules{ + StabilizationWindowSeconds: nil, + SelectPolicy: ptr.To[autoscalingv2.ScalingPolicySelect](autoscalingv2.MaxChangePolicySelect), + Policies: []autoscalingv2.HPAScalingPolicy{ + { + Type: autoscalingv2.PercentScalingPolicy, + Value: 100, + PeriodSeconds: 15, + }, + }, + }, + }, + Metrics: []autoscalingv2.MetricSpec{ + { + Type: autoscalingv2.ResourceMetricSourceType, + Resource: &autoscalingv2.ResourceMetricSource{ + Name: corev1.ResourceCPU, + Target: autoscalingv2.MetricTarget{ + Type: autoscalingv2.UtilizationMetricType, + AverageUtilization: ptr.To[int32](lifted.DefaultCPUUtilization), + }, + }, + }, + }, + }, + } + + // Mock decoder that decodes the request into the federatedhpa object. + decoder := &fakeMutationDecoder{ + obj: federatedHPAObj, + } + + // Marshal the expected federatedhpa object to simulate the final mutated object. + wantBytes, err := json.Marshal(wantFederatedHPAObj) + if err != nil { + t.Fatalf("Failed to marshal expected federatedHPA object: %v", err) + } + req.Object.Raw = wantBytes + + // Instantiate the mutating handler. + mutatingHandler := MutatingAdmission{ + Decoder: decoder, + } + + // Call the Handle function. + got := mutatingHandler.Handle(context.Background(), req) + + // Check if there are any patches applied. There should be no patches if the federatedhpa object is handled correctly. + if len(got.Patches) > 0 { + t.Errorf("Handle() returned patches, but no patches were expected. Got patches: %v", got.Patches) + } + + // Check if the admission request was allowed. + if !got.Allowed { + t.Errorf("Handle() got.Allowed = false, want true") + } +} diff --git a/pkg/webhook/federatedhpa/validating_test.go b/pkg/webhook/federatedhpa/validating_test.go new file mode 100644 index 000000000000..155a03b0c277 --- /dev/null +++ b/pkg/webhook/federatedhpa/validating_test.go @@ -0,0 +1,276 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package federatedhpa + +import ( + "context" + "errors" + "net/http" + "reflect" + "strings" + "testing" + + autoscalingv2 "k8s.io/api/autoscaling/v2" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + autoscalingv1alpha1 "github.com/karmada-io/karmada/pkg/apis/autoscaling/v1alpha1" +) + +// ResponseType represents the type of admission response. +type ResponseType string + +const ( + Denied ResponseType = "Denied" + Allowed ResponseType = "Allowed" + Errored ResponseType = "Errored" +) + +// TestResponse is used to define expected response in a test case. +type TestResponse struct { + Type ResponseType + Message string +} + +type fakeValidationDecoder struct { + err error + obj runtime.Object +} + +// Decode mocks the Decode method of admission.Decoder. +func (f *fakeValidationDecoder) Decode(_ admission.Request, obj runtime.Object) error { + if f.err != nil { + return f.err + } + if f.obj != nil { + reflect.ValueOf(obj).Elem().Set(reflect.ValueOf(f.obj).Elem()) + } + return nil +} + +// DecodeRaw mocks the DecodeRaw method of admission.Decoder. +func (f *fakeValidationDecoder) DecodeRaw(_ runtime.RawExtension, obj runtime.Object) error { + if f.err != nil { + return f.err + } + if f.obj != nil { + reflect.ValueOf(obj).Elem().Set(reflect.ValueOf(f.obj).Elem()) + } + return nil +} + +func TestValidatingAdmission_Handle(t *testing.T) { + tests := []struct { + name string + decoder admission.Decoder + req admission.Request + want TestResponse + }{ + { + name: "Handle_DecodeError_DeniesAdmission", + decoder: &fakeValidationDecoder{ + err: errors.New("decode error"), + }, + req: admission.Request{}, + want: TestResponse{ + Type: Errored, + Message: "decode error", + }, + }, + { + name: "Handle_ValidationFederatedHPASpecFails_DeniesAdmission", + decoder: &fakeValidationDecoder{ + obj: &autoscalingv1alpha1.FederatedHPA{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app", + Namespace: "test-namespace", + }, + Spec: autoscalingv1alpha1.FederatedHPASpec{ + ScaleTargetRef: autoscalingv2.CrossVersionObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "test-app", + }, + MinReplicas: ptr.To[int32](0), + MaxReplicas: 10, + Behavior: &autoscalingv2.HorizontalPodAutoscalerBehavior{ + ScaleUp: &autoscalingv2.HPAScalingRules{ + StabilizationWindowSeconds: ptr.To[int32](0), + Policies: []autoscalingv2.HPAScalingPolicy{ + { + PeriodSeconds: 15, + Type: autoscalingv2.PodsScalingPolicy, + Value: 4, + }, + { + PeriodSeconds: 15, + Type: autoscalingv2.PercentScalingPolicy, + Value: 100, + }, + }, + }, + ScaleDown: &autoscalingv2.HPAScalingRules{ + StabilizationWindowSeconds: ptr.To[int32](300), + Policies: []autoscalingv2.HPAScalingPolicy{ + { + PeriodSeconds: 15, + Type: autoscalingv2.PercentScalingPolicy, + Value: 100, + }, + }, + }, + }, + Metrics: []autoscalingv2.MetricSpec{ + { + Type: autoscalingv2.PodsMetricSourceType, + Pods: &autoscalingv2.PodsMetricSource{ + Metric: autoscalingv2.MetricIdentifier{ + Name: "http_requests", + }, + Target: autoscalingv2.MetricTarget{ + AverageValue: resource.NewMilliQuantity(700, resource.DecimalSI), + Type: autoscalingv2.ValueMetricType, + }, + }, + }, + }, + }, + Status: autoscalingv2.HorizontalPodAutoscalerStatus{ + CurrentReplicas: 2, + DesiredReplicas: 2, + }, + }, + }, + req: admission.Request{}, + want: TestResponse{ + Type: Denied, + Message: "spec.minReplicas: Invalid value: 0", + }, + }, + { + name: "Handle_ValidationSucceeds_AllowsAdmission", + decoder: &fakeValidationDecoder{ + obj: &autoscalingv1alpha1.FederatedHPA{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-app", + Namespace: "test-namespace", + }, + Spec: autoscalingv1alpha1.FederatedHPASpec{ + ScaleTargetRef: autoscalingv2.CrossVersionObjectReference{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "test-app", + }, + MinReplicas: ptr.To[int32](1), + MaxReplicas: 10, + Behavior: &autoscalingv2.HorizontalPodAutoscalerBehavior{ + ScaleUp: &autoscalingv2.HPAScalingRules{ + StabilizationWindowSeconds: ptr.To[int32](0), + Policies: []autoscalingv2.HPAScalingPolicy{ + { + PeriodSeconds: 15, + Type: autoscalingv2.PodsScalingPolicy, + Value: 4, + }, + { + PeriodSeconds: 15, + Type: autoscalingv2.PercentScalingPolicy, + Value: 100, + }, + }, + }, + ScaleDown: &autoscalingv2.HPAScalingRules{ + StabilizationWindowSeconds: ptr.To[int32](300), + Policies: []autoscalingv2.HPAScalingPolicy{ + { + PeriodSeconds: 15, + Type: autoscalingv2.PercentScalingPolicy, + Value: 100, + }, + }, + }, + }, + Metrics: []autoscalingv2.MetricSpec{ + { + Type: autoscalingv2.PodsMetricSourceType, + Pods: &autoscalingv2.PodsMetricSource{ + Metric: autoscalingv2.MetricIdentifier{ + Name: "http_requests", + }, + Target: autoscalingv2.MetricTarget{ + AverageValue: resource.NewMilliQuantity(700, resource.DecimalSI), + Type: autoscalingv2.ValueMetricType, + }, + }, + }, + }, + }, + Status: autoscalingv2.HorizontalPodAutoscalerStatus{ + CurrentReplicas: 2, + DesiredReplicas: 2, + }, + }, + }, + req: admission.Request{}, + want: TestResponse{ + Type: Allowed, + Message: "", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + v := &ValidatingAdmission{ + Decoder: tt.decoder, + } + got := v.Handle(context.Background(), tt.req) + + // Extract type and message from the actual response. + gotType := extractResponseType(got) + gotMessage := extractErrorMessage(got) + + if gotType != tt.want.Type || !strings.Contains(gotMessage, tt.want.Message) { + t.Errorf("Handle() = {Type: %v, Message: %v}, want {Type: %v, Message: %v}", gotType, gotMessage, tt.want.Type, tt.want.Message) + } + }) + } +} + +// extractResponseType extracts the type of admission response. +func extractResponseType(resp admission.Response) ResponseType { + if resp.Allowed { + return Allowed + } + if resp.Result != nil { + if resp.Result.Code == http.StatusBadRequest { + return Errored + } + } + return Denied +} + +// extractErrorMessage extracts the error message from a Denied/Errored response. +func extractErrorMessage(resp admission.Response) string { + if !resp.Allowed && resp.Result != nil { + return resp.Result.Message + } + return "" +} diff --git a/pkg/webhook/federatedresourcequota/validating_test.go b/pkg/webhook/federatedresourcequota/validating_test.go index 41134c8efdce..a1360b6ea367 100644 --- a/pkg/webhook/federatedresourcequota/validating_test.go +++ b/pkg/webhook/federatedresourcequota/validating_test.go @@ -36,13 +36,28 @@ import ( policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" ) -type fakeDecoder struct { +// ResponseType represents the type of admission response. +type ResponseType string + +const ( + Denied ResponseType = "Denied" + Allowed ResponseType = "Allowed" + Errored ResponseType = "Errored" +) + +// TestResponse is used to define expected response in a test case. +type TestResponse struct { + Type ResponseType + Message string +} + +type fakeValidationDecoder struct { err error obj runtime.Object } // Decode mocks the Decode method of admission.Decoder. -func (f *fakeDecoder) Decode(_ admission.Request, obj runtime.Object) error { +func (f *fakeValidationDecoder) Decode(_ admission.Request, obj runtime.Object) error { if f.err != nil { return f.err } @@ -53,7 +68,7 @@ func (f *fakeDecoder) Decode(_ admission.Request, obj runtime.Object) error { } // DecodeRaw mocks the DecodeRaw method of admission.Decoder. -func (f *fakeDecoder) DecodeRaw(_ runtime.RawExtension, obj runtime.Object) error { +func (f *fakeValidationDecoder) DecodeRaw(_ runtime.RawExtension, obj runtime.Object) error { if f.err != nil { return f.err } @@ -63,10 +78,12 @@ func (f *fakeDecoder) DecodeRaw(_ runtime.RawExtension, obj runtime.Object) erro return nil } -// sortAndJoinMessages sorts and joins error message parts to ensure consistent ordering. +// normalizedMessages trim the square brackets, and sorted error message parts to ensure consistent ordering. // This prevents test flakiness caused by varying error message order. -func sortAndJoinMessages(message string) string { - parts := strings.Split(strings.Trim(message, "[]"), ", ") +func normalizedMessages(message string) string { + message = strings.TrimLeft(message, "[") + message = strings.TrimRight(message, "]") + parts := strings.Split(message, ", ") sort.Strings(parts) return strings.Join(parts, ", ") } @@ -86,7 +103,7 @@ func Test_validateOverallAndAssignments(t *testing.T) { want field.ErrorList }{ { - "normal", + "validateOverallAndAssignments_ValidValues_NoErrors", args{ quotaSpec: &policyv1alpha1.FederatedResourceQuotaSpec{ Overall: corev1.ResourceList{ @@ -115,7 +132,7 @@ func Test_validateOverallAndAssignments(t *testing.T) { field.ErrorList{}, }, { - "overall[cpu] is less than assignments", + "validateOverallAndAssignments_LowerOverallCPUThanAssignments_OverallCPULessThanAssignments", args{ quotaSpec: &policyv1alpha1.FederatedResourceQuotaSpec{ Overall: corev1.ResourceList{ @@ -146,7 +163,7 @@ func Test_validateOverallAndAssignments(t *testing.T) { }, }, { - "overall[memory] is less than assignments", + "validateOverallAndAssignments_LowerOverallMemoryThanAssignments_OverallMemoryLessThanAssignments", args{ quotaSpec: &policyv1alpha1.FederatedResourceQuotaSpec{ Overall: corev1.ResourceList{ @@ -177,7 +194,7 @@ func Test_validateOverallAndAssignments(t *testing.T) { }, }, { - "assignment resourceName is not exist in overall", + "validateOverallAndAssignments_InvalidAssignmentResource_AssignmentResourceNotInOverall", args{ quotaSpec: &policyv1alpha1.FederatedResourceQuotaSpec{ Overall: corev1.ResourceList{ @@ -222,19 +239,22 @@ func TestValidatingAdmission_Handle(t *testing.T) { name string decoder admission.Decoder req admission.Request - want admission.Response + want TestResponse }{ { - name: "Decode Error Handling", - decoder: &fakeDecoder{ + name: "Handle_DecodeErrorHandling_ErrorReturned", + decoder: &fakeValidationDecoder{ err: errors.New("decode error"), }, - req: admission.Request{}, - want: admission.Errored(http.StatusBadRequest, errors.New("decode error")), + req: admission.Request{}, + want: TestResponse{ + Type: Errored, + Message: "decode error", + }, }, { - name: "Validation Success - Resource Limits Match", - decoder: &fakeDecoder{ + name: "Handle_ResourceLimitsMatch_NoErrors", + decoder: &fakeValidationDecoder{ obj: &policyv1alpha1.FederatedResourceQuota{ Spec: policyv1alpha1.FederatedResourceQuotaSpec{ Overall: corev1.ResourceList{ @@ -260,12 +280,15 @@ func TestValidatingAdmission_Handle(t *testing.T) { }, }, }, - req: admission.Request{}, - want: admission.Allowed(""), + req: admission.Request{}, + want: TestResponse{ + Type: Allowed, + Message: "", + }, }, { - name: "Validation Error - Resource Limits Exceeded", - decoder: &fakeDecoder{ + name: "Handle_ExceedResourceLimits_ResourceLimitsExceeded", + decoder: &fakeValidationDecoder{ obj: &policyv1alpha1.FederatedResourceQuota{ Spec: policyv1alpha1.FederatedResourceQuotaSpec{ Overall: corev1.ResourceList{ @@ -291,12 +314,15 @@ func TestValidatingAdmission_Handle(t *testing.T) { }, }, }, - req: admission.Request{}, - want: admission.Denied(fmt.Sprintf("[spec.overall[cpu]: Invalid value: \"%s\": overall is less than assignments, spec.overall[memory]: Invalid value: \"%s\": overall is less than assignments]", "5", "5Gi")), + req: admission.Request{}, + want: TestResponse{ + Type: Denied, + Message: fmt.Sprintf("spec.overall[cpu]: Invalid value: \"%s\": overall is less than assignments, spec.overall[memory]: Invalid value: \"%s\": overall is less than assignments", "5", "5Gi"), + }, }, { - name: "Validation Error - CPU Allocation Exceeds Overall Limit", - decoder: &fakeDecoder{ + name: "Handle_ExceedCPUAllocationOverallLimit_CPUAllocationExceedsOverallLimit", + decoder: &fakeValidationDecoder{ obj: &policyv1alpha1.FederatedResourceQuota{ Spec: policyv1alpha1.FederatedResourceQuotaSpec{ Overall: corev1.ResourceList{ @@ -322,12 +348,15 @@ func TestValidatingAdmission_Handle(t *testing.T) { }, }, }, - req: admission.Request{}, - want: admission.Denied(fmt.Sprintf("spec.overall[cpu]: Invalid value: \"%s\": overall is less than assignments", "5")), + req: admission.Request{}, + want: TestResponse{ + Type: Denied, + Message: fmt.Sprintf("spec.overall[cpu]: Invalid value: \"%s\": overall is less than assignments", "5"), + }, }, { - name: "Validation Error - Memory Allocation Exceeds Overall Limit", - decoder: &fakeDecoder{ + name: "Handle_ExceedMemoryAllocationOverallLimit_MemoryAllocationExceedsOverallLimit", + decoder: &fakeValidationDecoder{ obj: &policyv1alpha1.FederatedResourceQuota{ Spec: policyv1alpha1.FederatedResourceQuotaSpec{ Overall: corev1.ResourceList{ @@ -353,12 +382,15 @@ func TestValidatingAdmission_Handle(t *testing.T) { }, }, }, - req: admission.Request{}, - want: admission.Denied(fmt.Sprintf("spec.overall[memory]: Invalid value: \"%s\": overall is less than assignments", "5Gi")), + req: admission.Request{}, + want: TestResponse{ + Type: Denied, + Message: fmt.Sprintf("spec.overall[memory]: Invalid value: \"%s\": overall is less than assignments", "5Gi"), + }, }, { - name: "Invalid Cluster Name", - decoder: &fakeDecoder{ + name: "Handle_InvalidClusterName_ClusterNameInvalid", + decoder: &fakeValidationDecoder{ obj: &policyv1alpha1.FederatedResourceQuota{ Spec: policyv1alpha1.FederatedResourceQuotaSpec{ Overall: corev1.ResourceList{ @@ -377,8 +409,11 @@ func TestValidatingAdmission_Handle(t *testing.T) { }, }, }, - req: admission.Request{}, - want: admission.Denied("[spec.staticAssignments[0].clusterName: Invalid value: \"invalid cluster name\": a lowercase RFC 1123 label must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character (e.g. 'my-name', or '123-abc', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?')]"), + req: admission.Request{}, + want: TestResponse{ + Type: Denied, + Message: "Invalid value: \"invalid cluster name\"", + }, }, } @@ -388,10 +423,15 @@ func TestValidatingAdmission_Handle(t *testing.T) { Decoder: tt.decoder, } got := v.Handle(context.Background(), tt.req) - got.Result.Message = sortAndJoinMessages(got.Result.Message) - tt.want.Result.Message = sortAndJoinMessages(tt.want.Result.Message) - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("Handle() = %v, want %v", got, tt.want) + got.Result.Message = normalizedMessages(got.Result.Message) + tt.want.Message = normalizedMessages(tt.want.Message) + + // Extract type and message from the actual response. + gotType := extractResponseType(got) + gotMessage := extractErrorMessage(got) + + if gotType != tt.want.Type || !strings.Contains(gotMessage, tt.want.Message) { + t.Errorf("Handle() = {Type: %v, Message: %v}, want {Type: %v, Message: %v}", gotType, gotMessage, tt.want.Type, tt.want.Message) } }) } @@ -406,7 +446,7 @@ func Test_validateFederatedResourceQuotaStatus(t *testing.T) { expected field.ErrorList }{ { - name: "Valid FederatedResourceQuotaStatus", + name: "validateFederatedResourceQuotaStatus_ValidStatus_NoErrors", status: &policyv1alpha1.FederatedResourceQuotaStatus{ Overall: corev1.ResourceList{"cpu": resource.MustParse("10")}, OverallUsed: corev1.ResourceList{"cpu": resource.MustParse("5")}, @@ -423,7 +463,7 @@ func Test_validateFederatedResourceQuotaStatus(t *testing.T) { expected: field.ErrorList{}, }, { - name: "Invalid Overall Resource List", + name: "validateFederatedResourceQuotaStatus_InvalidOverallResourceList_ResourceTypeInvalid", status: &policyv1alpha1.FederatedResourceQuotaStatus{ Overall: corev1.ResourceList{"invalid-resource": resource.MustParse("10")}, OverallUsed: corev1.ResourceList{"cpu": resource.MustParse("5")}, @@ -443,7 +483,7 @@ func Test_validateFederatedResourceQuotaStatus(t *testing.T) { }, }, { - name: "Invalid AggregatedStatus Resource List", + name: "validateFederatedResourceQuotaStatus_InvalidAggregatedStatusResourceList_ResourceTypeInvalid", status: &policyv1alpha1.FederatedResourceQuotaStatus{ Overall: corev1.ResourceList{"cpu": resource.MustParse("10")}, OverallUsed: corev1.ResourceList{"cpu": resource.MustParse("5")}, @@ -482,7 +522,7 @@ func Test_validateClusterQuotaStatus(t *testing.T) { expected field.ErrorList }{ { - name: "Valid ClusterQuotaStatus", + name: "validateClusterQuotaStatus_ValidClusterQuotaStatus_NoErrors", status: &policyv1alpha1.ClusterQuotaStatus{ ClusterName: "valid-cluster", ResourceQuotaStatus: corev1.ResourceQuotaStatus{ @@ -493,7 +533,7 @@ func Test_validateClusterQuotaStatus(t *testing.T) { expected: field.ErrorList{}, }, { - name: "Invalid Cluster Name", + name: "validateClusterQuotaStatus_InvalidClusterName_ClusterNameInvalid", status: &policyv1alpha1.ClusterQuotaStatus{ ClusterName: "invalid cluster name", ResourceQuotaStatus: corev1.ResourceQuotaStatus{ @@ -506,7 +546,7 @@ func Test_validateClusterQuotaStatus(t *testing.T) { }, }, { - name: "Invalid Resource List - Hard", + name: "validateClusterQuotaStatus_InvalidResourceList_HardResourceInvalid", status: &policyv1alpha1.ClusterQuotaStatus{ ClusterName: "valid-cluster", ResourceQuotaStatus: corev1.ResourceQuotaStatus{ @@ -520,7 +560,7 @@ func Test_validateClusterQuotaStatus(t *testing.T) { }, }, { - name: "Invalid Resource List - Used", + name: "validateClusterQuotaStatus_InvalidResourceList_UsedResourceInvalid", status: &policyv1alpha1.ClusterQuotaStatus{ ClusterName: "valid-cluster", ResourceQuotaStatus: corev1.ResourceQuotaStatus{ @@ -543,3 +583,24 @@ func Test_validateClusterQuotaStatus(t *testing.T) { }) } } + +// extractResponseType extracts the type of admission response. +func extractResponseType(resp admission.Response) ResponseType { + if resp.Allowed { + return Allowed + } + if resp.Result != nil { + if resp.Result.Code == http.StatusBadRequest { + return Errored + } + } + return Denied +} + +// extractErrorMessage extracts the error message from a Denied/Errored response. +func extractErrorMessage(resp admission.Response) string { + if !resp.Allowed && resp.Result != nil { + return resp.Result.Message + } + return "" +} diff --git a/pkg/webhook/interpreter/decode.go b/pkg/webhook/interpreter/decode.go index f10c5a416489..ea4bce532967 100644 --- a/pkg/webhook/interpreter/decode.go +++ b/pkg/webhook/interpreter/decode.go @@ -43,7 +43,7 @@ func NewDecoder(scheme *runtime.Scheme) *Decoder { // It errors out if req.Object.Raw is empty i.e. containing 0 raw bytes. func (d *Decoder) Decode(req Request, into runtime.Object) error { if len(req.Object.Raw) == 0 { - return fmt.Errorf("there is no context to decode") + return fmt.Errorf("there is no content to decode") } return d.DecodeRaw(req.Object, into) } diff --git a/pkg/webhook/interpreter/decode_test.go b/pkg/webhook/interpreter/decode_test.go new file mode 100644 index 000000000000..9b3580389056 --- /dev/null +++ b/pkg/webhook/interpreter/decode_test.go @@ -0,0 +1,277 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package interpreter + +import ( + "fmt" + "strings" + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + + configv1alpha1 "github.com/karmada-io/karmada/pkg/apis/config/v1alpha1" +) + +type Interface interface { + GetAPIVersion() string + GetKind() string + GetName() string +} + +// MyTestPod represents a simplified version of a Kubernetes Pod for testing purposes. +// It includes basic fields such as API version, kind, and metadata. +type MyTestPod struct { + APIVersion string `json:"apiVersion"` + Kind string `json:"kind"` + Metadata struct { + Name string `json:"name"` + } `json:"metadata"` +} + +// DeepCopyObject creates a deep copy of the MyTestPod instance. +// This method is part of the runtime.Object interface and ensures that modifications +// to the copy do not affect the original object. +func (p *MyTestPod) DeepCopyObject() runtime.Object { + return &MyTestPod{ + APIVersion: p.APIVersion, + Kind: p.Kind, + Metadata: p.Metadata, + } +} + +// GetObjectKind returns the schema.ObjectKind for the MyTestPod instance. +// This method is part of the runtime.Object interface and provides the API version +// and kind of the object, which is used for object identification in Kubernetes. +func (p *MyTestPod) GetObjectKind() schema.ObjectKind { + return &metav1.TypeMeta{ + APIVersion: p.APIVersion, + Kind: p.Kind, + } +} + +// GetAPIVersion returns the API version of the MyTestPod. +func (p *MyTestPod) GetAPIVersion() string { + return p.APIVersion +} + +// GetKind returns the kind of the MyTestPod. +func (p *MyTestPod) GetKind() string { + return p.Kind +} + +// GetName returns the name of the MyTestPod. +func (p *MyTestPod) GetName() string { + return p.Metadata.Name +} + +func TestNewDecoder(t *testing.T) { + tests := []struct { + name string + }{ + { + name: "NewDecoder_ValidDecoder_DecoderIsValid", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + scheme := runtime.NewScheme() + decoder := NewDecoder(scheme) + if decoder == nil { + t.Errorf("expected decoder to not be nil") + } + }) + } +} + +func TestDecodeRaw(t *testing.T) { + tests := []struct { + name string + apiVersion string + kind string + objName string + rawObj *runtime.RawExtension + into Interface + prep func(re *runtime.RawExtension, apiVersion, kind, name string) error + verify func(into Interface, apiVersion, kind, name string) error + wantErr bool + errMsg string + }{ + { + name: "DecodeRaw_ValidRaw_DecodeRawIsSuccessful", + objName: "test-pod", + kind: "Pod", + apiVersion: "v1", + rawObj: &runtime.RawExtension{ + Raw: []byte{}, + }, + into: &unstructured.Unstructured{}, + prep: func(re *runtime.RawExtension, apiVersion, kind, name string) error { + re.Raw = []byte(fmt.Sprintf(`{"apiVersion": "%s", "kind": "%s", "metadata": {"name": "%s"}}`, apiVersion, kind, name)) + return nil + }, + verify: verifyRuntimeObject, + wantErr: false, + }, + { + name: "DecodeRaw_IntoNonUnstructuredType_RawDecoded", + objName: "test-pod", + kind: "Pod", + apiVersion: "v1", + rawObj: &runtime.RawExtension{ + Raw: []byte{}, + }, + into: &MyTestPod{}, + prep: func(re *runtime.RawExtension, apiVersion, kind, name string) error { + re.Raw = []byte(fmt.Sprintf(`{"apiVersion": "%s", "kind": "%s", "metadata": {"name": "%s"}}`, apiVersion, kind, name)) + return nil + }, + verify: verifyRuntimeObject, + wantErr: false, + }, + { + name: "DecodeRaw_EmptyRaw_NoContentToDecode", + rawObj: &runtime.RawExtension{ + Raw: []byte{}, + }, + into: &unstructured.Unstructured{}, + prep: func(*runtime.RawExtension, string, string, string) error { return nil }, + verify: func(Interface, string, string, string) error { return nil }, + wantErr: true, + errMsg: "there is no content to decode", + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if err := test.prep(test.rawObj, test.apiVersion, test.kind, test.objName); err != nil { + t.Errorf("failed to prep the runtime raw extension object: %v", err) + } + scheme := runtime.NewScheme() + decoder := NewDecoder(scheme) + intoObj, ok := test.into.(runtime.Object) + if !ok { + t.Errorf("failed to type assert into object into runtime rawextension") + } + err := decoder.DecodeRaw(*test.rawObj, intoObj) + if err != nil && !test.wantErr { + t.Errorf("unexpected error while decoding the raw: %v", err) + } + if err == nil && test.wantErr { + t.Errorf("expected an error, but got none") + } + if err != nil && test.wantErr && !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("expected %s error msg to contain %s", err.Error(), test.errMsg) + } + if err := test.verify(test.into, test.apiVersion, test.kind, test.objName); err != nil { + t.Errorf("failed to verify decoding the raw: %v", err) + } + }) + } +} + +func TestDecode(t *testing.T) { + tests := []struct { + name string + apiVersion string + kind string + objName string + req *Request + into Interface + prep func(re *Request, apiVersion, kind, name string) error + verify func(into Interface, apiVersion, kind, name string) error + wantErr bool + errMsg string + }{ + { + name: "Decode_ValidRequest_DecodeRequestIsSuccessful", + objName: "test-pod", + kind: "Pod", + apiVersion: "v1", + req: &Request{ + ResourceInterpreterRequest: configv1alpha1.ResourceInterpreterRequest{ + Object: runtime.RawExtension{}, + }, + }, + into: &unstructured.Unstructured{}, + prep: func(re *Request, apiVersion, kind, name string) error { + re.ResourceInterpreterRequest.Object.Raw = []byte(fmt.Sprintf(`{"apiVersion": "%s", "kind": "%s", "metadata": {"name": "%s"}}`, apiVersion, kind, name)) + return nil + }, + verify: verifyRuntimeObject, + wantErr: false, + }, + { + name: "Decode_EmptyRaw_NoContentToDecode", + req: &Request{ + ResourceInterpreterRequest: configv1alpha1.ResourceInterpreterRequest{ + Object: runtime.RawExtension{}, + }, + }, + into: &unstructured.Unstructured{}, + prep: func(*Request, string, string, string) error { return nil }, + verify: func(Interface, string, string, string) error { return nil }, + wantErr: true, + errMsg: "there is no content to decode", + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if err := test.prep(test.req, test.apiVersion, test.kind, test.objName); err != nil { + t.Errorf("failed to prep the runtime raw extension object: %v", err) + } + scheme := runtime.NewScheme() + decoder := NewDecoder(scheme) + if decoder == nil { + t.Errorf("expected decoder to not be nil") + } + intoObj, ok := test.into.(runtime.Object) + if !ok { + t.Errorf("failed to type assert into object into runtime rawextension") + } + err := decoder.Decode(*test.req, intoObj) + if err != nil && !test.wantErr { + t.Errorf("unexpected error while decoding the raw: %v", err) + } + if err == nil && test.wantErr { + t.Errorf("expected an error, but got none") + } + if err != nil && test.wantErr && !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("expected %s error msg to contain %s", err.Error(), test.errMsg) + } + if err := test.verify(test.into, test.apiVersion, test.kind, test.objName); err != nil { + t.Errorf("failed to verify decoding the raw: %v", err) + } + }) + } +} + +// verifyRuntimeObject checks if the runtime object (`into`) matches the given +// `apiVersion`, `kind`, and `name`. It returns an error if any field doesn't match. +func verifyRuntimeObject(into Interface, apiVersion, kind, name string) error { + if got := into.GetAPIVersion(); got != apiVersion { + return fmt.Errorf("expected API version '%s', got '%s'", apiVersion, got) + } + if got := into.GetKind(); got != kind { + return fmt.Errorf("expected kind '%s', got '%s'", kind, got) + } + if got := into.GetName(); got != name { + return fmt.Errorf("expected name '%s', got '%s'", name, got) + } + return nil +} diff --git a/pkg/webhook/interpreter/http_test.go b/pkg/webhook/interpreter/http_test.go new file mode 100644 index 000000000000..04443821b0da --- /dev/null +++ b/pkg/webhook/interpreter/http_test.go @@ -0,0 +1,346 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package interpreter + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "net/http" + "net/http/httptest" + "strings" + "testing" + + "k8s.io/apimachinery/pkg/util/json" + + configv1alpha1 "github.com/karmada-io/karmada/pkg/apis/config/v1alpha1" +) + +// HTTPMockHandler implements the Handler and DecoderInjector interfaces for testing. +type HTTPMockHandler struct { + response Response + decoder *Decoder +} + +// Handle implements the Handler interface for HTTPMockHandler. +func (m *HTTPMockHandler) Handle(_ context.Context, _ Request) Response { + return m.response +} + +// InjectDecoder implements the DecoderInjector interface by setting the decoder. +func (m *HTTPMockHandler) InjectDecoder(decoder *Decoder) { + m.decoder = decoder +} + +// mockBody simulates an error when reading the request body. +type mockBody struct{} + +func (m *mockBody) Read(_ []byte) (n int, err error) { + return 0, errors.New("mock read error") +} + +func (m *mockBody) Close() error { + return nil +} + +// limitedBadResponseWriter is a custom io.Writer implementation that simulates +// write errors for a specified number of attempts. After a certain number of failures, +// it allows the write operation to succeed. +type limitedBadResponseWriter struct { + failCount int + maxFailures int +} + +// Write simulates writing data to the writer. It forces an error response for +// a limited number of attempts, specified by maxFailures. Once failCount reaches +// maxFailures, it allows the write to succeed. +func (b *limitedBadResponseWriter) Write(p []byte) (n int, err error) { + if b.failCount < b.maxFailures { + b.failCount++ + return 0, errors.New("forced write error") + } + // After reaching maxFailures, allow the write to succeed to stop the infinite loop. + return len(p), nil +} + +func TestServeHTTP(t *testing.T) { + tests := []struct { + name string + req *http.Request + mockHandler *HTTPMockHandler + contentType string + res configv1alpha1.ResourceInterpreterContext + prep func(*http.Request, string) error + want *configv1alpha1.ResourceInterpreterResponse + }{ + { + name: "ServeHTTP_EmptyBody_RequestFailed", + req: httptest.NewRequest(http.MethodPost, "/", nil), + mockHandler: &HTTPMockHandler{}, + contentType: "application/json", + prep: func(req *http.Request, contentType string) error { + req.Header.Set("Content-Type", contentType) + req.Body = nil + return nil + }, + want: &configv1alpha1.ResourceInterpreterResponse{ + UID: "", + Successful: false, + Status: &configv1alpha1.RequestStatus{ + Message: "request body is empty", + Code: http.StatusBadRequest, + }, + }, + }, + { + name: "ServeHTTP_InvalidContentType_ContentTypeIsInvalid", + req: httptest.NewRequest(http.MethodPost, "/", bytes.NewBuffer([]byte(`{}`))), + mockHandler: &HTTPMockHandler{}, + contentType: "text/plain", + prep: func(req *http.Request, contentType string) error { + req.Header.Set("Content-Type", contentType) + return nil + }, + want: &configv1alpha1.ResourceInterpreterResponse{ + UID: "", + Successful: false, + Status: &configv1alpha1.RequestStatus{ + Message: "contentType=text/plain, expected application/json", + Code: http.StatusBadRequest, + }, + }, + }, + { + name: "ServeHTTP_InvalidBodyJSON_JSONBodyIsInvalid", + req: httptest.NewRequest(http.MethodPost, "/", bytes.NewBuffer([]byte(`invalid-json`))), + mockHandler: &HTTPMockHandler{}, + contentType: "application/json", + prep: func(req *http.Request, contentType string) error { + req.Header.Set("Content-Type", contentType) + return nil + }, + want: &configv1alpha1.ResourceInterpreterResponse{ + UID: "", + Successful: false, + Status: &configv1alpha1.RequestStatus{ + Message: "json parse error", + Code: http.StatusBadRequest, + }, + }, + }, + { + name: "ServeHTTP_ReadBodyError_FailedToReadBody", + req: httptest.NewRequest(http.MethodPost, "/", &mockBody{}), + mockHandler: &HTTPMockHandler{}, + contentType: "application/json", + prep: func(req *http.Request, contentType string) error { + req.Header.Set("Content-Type", contentType) + return nil + }, + want: &configv1alpha1.ResourceInterpreterResponse{ + UID: "", + Successful: false, + Status: &configv1alpha1.RequestStatus{ + Message: "mock read error", + Code: http.StatusBadRequest, + }, + }, + }, + { + name: "ServeHTTP_ValidRequest_RequestIsValid", + req: httptest.NewRequest(http.MethodPost, "/", bytes.NewBuffer([]byte(`{}`))), + mockHandler: &HTTPMockHandler{ + response: Response{ + ResourceInterpreterResponse: configv1alpha1.ResourceInterpreterResponse{ + Successful: true, + Status: &configv1alpha1.RequestStatus{Code: http.StatusOK}, + }, + }, + }, + contentType: "application/json", + prep: func(req *http.Request, contentType string) error { + req.Header.Set("Content-Type", contentType) + requestBody := configv1alpha1.ResourceInterpreterContext{ + Request: &configv1alpha1.ResourceInterpreterRequest{ + UID: "test-uid", + }, + } + body, err := json.Marshal(requestBody) + if err != nil { + return fmt.Errorf("failed to marshal request body: %v", err) + } + req.Body = io.NopCloser(bytes.NewBuffer(body)) + return nil + }, + want: &configv1alpha1.ResourceInterpreterResponse{ + UID: "test-uid", + Successful: true, + Status: &configv1alpha1.RequestStatus{ + Message: "", + Code: http.StatusOK, + }, + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + recorder := httptest.NewRecorder() + if err := test.prep(test.req, test.contentType); err != nil { + t.Errorf("failed to prep serving http: %v", err) + } + webhook := NewWebhook(test.mockHandler, &Decoder{}) + webhook.ServeHTTP(recorder, test.req) + if err := verifyResourceInterpreterResponse(recorder.Body.Bytes(), test.want); err != nil { + t.Errorf("failed to verify resource interpreter response: %v", err) + } + }) + } +} + +func TestWriteResponse(t *testing.T) { + tests := []struct { + name string + res Response + rec *httptest.ResponseRecorder + mockHandler *HTTPMockHandler + decoder *Decoder + verify func([]byte, *configv1alpha1.ResourceInterpreterResponse) error + want *configv1alpha1.ResourceInterpreterResponse + }{ + { + name: "WriteResponse_ValidValues_IsSucceeded", + res: Response{ + ResourceInterpreterResponse: configv1alpha1.ResourceInterpreterResponse{ + UID: "test-uid", + Successful: true, + Status: &configv1alpha1.RequestStatus{Code: http.StatusOK}, + }, + }, + rec: httptest.NewRecorder(), + mockHandler: &HTTPMockHandler{}, + decoder: &Decoder{}, + verify: verifyResourceInterpreterResponse, + want: &configv1alpha1.ResourceInterpreterResponse{ + UID: "test-uid", + Successful: true, + Status: &configv1alpha1.RequestStatus{ + Message: "", + Code: http.StatusOK, + }, + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + webhook := NewWebhook(test.mockHandler, test.decoder) + webhook.writeResponse(test.rec, test.res) + if err := test.verify(test.rec.Body.Bytes(), test.want); err != nil { + t.Errorf("failed to verify resource interpreter response: %v", err) + } + }) + } +} + +func TestWriteResourceInterpreterResponse(t *testing.T) { + tests := []struct { + name string + mockHandler *HTTPMockHandler + rec io.Writer + res configv1alpha1.ResourceInterpreterContext + verify func(io.Writer, *configv1alpha1.ResourceInterpreterResponse) error + want *configv1alpha1.ResourceInterpreterResponse + }{ + { + name: "WriteResourceInterpreterResponse_ValidValues_WriteIsSuccessful", + mockHandler: &HTTPMockHandler{}, + rec: httptest.NewRecorder(), + res: configv1alpha1.ResourceInterpreterContext{ + Response: &configv1alpha1.ResourceInterpreterResponse{ + UID: "test-uid", + Successful: true, + Status: &configv1alpha1.RequestStatus{Code: http.StatusOK}, + }, + }, + verify: func(writer io.Writer, rir *configv1alpha1.ResourceInterpreterResponse) error { + data, ok := writer.(*httptest.ResponseRecorder) + if !ok { + return fmt.Errorf("expected writer of type httptest.ResponseRecorder but got %T", writer) + } + return verifyResourceInterpreterResponse(data.Body.Bytes(), rir) + }, + want: &configv1alpha1.ResourceInterpreterResponse{ + UID: "test-uid", + Successful: true, + Status: &configv1alpha1.RequestStatus{ + Message: "", + Code: http.StatusOK, + }, + }, + }, + { + name: "WriteResourceInterpreterResponse_FailedToWrite_WriterReachedMaxFailures", + mockHandler: &HTTPMockHandler{}, + res: configv1alpha1.ResourceInterpreterContext{ + Response: &configv1alpha1.ResourceInterpreterResponse{}, + }, + rec: &limitedBadResponseWriter{maxFailures: 3}, + verify: func(writer io.Writer, _ *configv1alpha1.ResourceInterpreterResponse) error { + data, ok := writer.(*limitedBadResponseWriter) + if !ok { + return fmt.Errorf("expected writer of type limitedBadResponseWriter but got %T", writer) + } + if data.failCount != data.maxFailures { + return fmt.Errorf("expected %d write failures, got %d", data.maxFailures, data.failCount) + } + return nil + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + webhook := NewWebhook(test.mockHandler, &Decoder{}) + webhook.writeResourceInterpreterResponse(test.rec, test.res) + if err := test.verify(test.rec, test.want); err != nil { + t.Errorf("failed to verify resource interpreter response: %v", err) + } + }) + } +} + +// verifyResourceInterpreterResponse unmarshals the provided body into a +// ResourceInterpreterContext and verifies it matches the expected values in res2. +func verifyResourceInterpreterResponse(body []byte, res2 *configv1alpha1.ResourceInterpreterResponse) error { + var resContext configv1alpha1.ResourceInterpreterContext + if err := json.Unmarshal(body, &resContext); err != nil { + return fmt.Errorf("failed to unmarshal body: %v", err) + } + if resContext.Response.UID != res2.UID { + return fmt.Errorf("expected UID %s, but got %s", res2.UID, resContext.Response.UID) + } + if resContext.Response.Successful != res2.Successful { + return fmt.Errorf("expected success status %t, but got %t", res2.Successful, resContext.Response.Successful) + } + if !strings.Contains(resContext.Response.Status.Message, res2.Status.Message) { + return fmt.Errorf("expected message %s to be subset, but got %s", res2.Status.Message, resContext.Response.Status.Message) + } + if resContext.Response.Status.Code != res2.Status.Code { + return fmt.Errorf("expected status code %d, but got %d", res2.Status.Code, resContext.Response.Status.Code) + } + return nil +} diff --git a/pkg/webhook/interpreter/inject_test.go b/pkg/webhook/interpreter/inject_test.go new file mode 100644 index 000000000000..c61d30cf00db --- /dev/null +++ b/pkg/webhook/interpreter/inject_test.go @@ -0,0 +1,69 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package interpreter + +import ( + "testing" +) + +// MockDecoderInjector is a mock struct implementing the DecoderInjector interface for testing purposes. +type MockDecoderInjector struct { + decoder *Decoder +} + +// InjectDecoder implements the DecoderInjector interface by setting the decoder. +func (m *MockDecoderInjector) InjectDecoder(decoder *Decoder) { + m.decoder = decoder +} + +func TestInjectDecoder(t *testing.T) { + tests := []struct { + name string + mockInjector interface{} + decoder *Decoder + wantToBeInjected bool + }{ + { + name: "InjectDecoder_ObjectImplementsDecoderInjector_Injected", + mockInjector: &MockDecoderInjector{}, + decoder: &Decoder{}, + wantToBeInjected: true, + }, + { + name: "InjectDecoder_ObjectNotImplementDecoderInjector_NotInjected", + mockInjector: struct{}{}, + decoder: &Decoder{}, + wantToBeInjected: false, + }, + { + name: "InjectDecoder_ObjectImplementsDecoderInjector_Injected", + mockInjector: &MockDecoderInjector{}, + wantToBeInjected: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if got := InjectDecoderInto(test.decoder, test.mockInjector); got != test.wantToBeInjected { + t.Errorf("expected status injection to be %t, but got %t", test.wantToBeInjected, got) + } + if test.wantToBeInjected && test.mockInjector.(*MockDecoderInjector).decoder != test.decoder { + t.Errorf("failed to inject the correct decoder, expected %v but got %v", test.decoder, test.mockInjector.(*MockDecoderInjector).decoder) + } + }) + } +} diff --git a/pkg/webhook/interpreter/response_test.go b/pkg/webhook/interpreter/response_test.go new file mode 100644 index 000000000000..8a5aeebf979f --- /dev/null +++ b/pkg/webhook/interpreter/response_test.go @@ -0,0 +1,179 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package interpreter + +import ( + "encoding/json" + "fmt" + "net/http" + "reflect" + "testing" + + "gomodules.xyz/jsonpatch/v2" + + configv1alpha1 "github.com/karmada-io/karmada/pkg/apis/config/v1alpha1" +) + +// customError is a helper struct for simulating errors. +type customError struct { + msg string +} + +func (e *customError) Error() string { + return e.msg +} + +func TestErrored(t *testing.T) { + err := &customError{"Test Error"} + code := int32(500) + response := Errored(code, err) + + expectedResponse := Response{ + ResourceInterpreterResponse: configv1alpha1.ResourceInterpreterResponse{ + Successful: false, + Status: &configv1alpha1.RequestStatus{ + Code: code, + Message: err.Error(), + }, + }, + } + + if !reflect.DeepEqual(expectedResponse, response) { + t.Errorf("response mismatch: expected %v, got %v", expectedResponse, response) + } +} + +func TestSucceeded(t *testing.T) { + message := "Operation succeeded" + response := Succeeded(message) + + expectedResponse := ValidationResponse(true, message) + + if !reflect.DeepEqual(expectedResponse, response) { + t.Errorf("response mismatch: expected %v, got %v", expectedResponse, response) + } +} + +func TestValidationResponse(t *testing.T) { + tests := []struct { + name string + msg string + isSuccessful bool + want Response + }{ + { + name: "ValidationResponse_IsSuccessful_Succeeded", + msg: "Success", + isSuccessful: true, + want: Response{ + ResourceInterpreterResponse: configv1alpha1.ResourceInterpreterResponse{ + Successful: true, + Status: &configv1alpha1.RequestStatus{ + Code: int32(http.StatusOK), + Message: "Success", + }, + }, + }, + }, + { + name: "ValidationResponse_IsFailed_Failed", + msg: "Failed", + isSuccessful: false, + want: Response{ + ResourceInterpreterResponse: configv1alpha1.ResourceInterpreterResponse{ + Successful: false, + Status: &configv1alpha1.RequestStatus{ + Code: int32(http.StatusForbidden), + Message: "Failed", + }, + }, + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + response := ValidationResponse(test.isSuccessful, test.msg) + if !reflect.DeepEqual(response, test.want) { + t.Errorf("expected response %v but got response %v", test.want, response) + } + }) + } +} + +func TestPatchResponseFromRaw(t *testing.T) { + tests := []struct { + name string + original, current []byte + expectedPatch []jsonpatch.Operation + res Response + want Response + prep func(wantRes *Response) error + }{ + { + name: "PatchResponseFromRaw_ReplacePatch_ReplacePatchExpected", + original: []byte(fmt.Sprintf(`{"name": "%s"}`, "original")), + current: []byte(fmt.Sprintf(`{"name": "%s"}`, "current")), + prep: func(wantRes *Response) error { + expectedPatch := []jsonpatch.Operation{ + { + Operation: "replace", + Path: "/name", + Value: "current", + }, + } + expectedPatchJSON, err := json.Marshal(expectedPatch) + if err != nil { + return fmt.Errorf("marshal failure: %v", err) + } + wantRes.ResourceInterpreterResponse.Patch = expectedPatchJSON + + return nil + }, + want: Response{ + ResourceInterpreterResponse: configv1alpha1.ResourceInterpreterResponse{ + Successful: true, + PatchType: func() *configv1alpha1.PatchType { pt := configv1alpha1.PatchTypeJSONPatch; return &pt }(), + }, + }, + }, + { + name: "PatchResponseFromRaw_OriginalSameAsCurrentValue_NoPatchExpected", + original: []byte(fmt.Sprintf(`{"name": "%s"}`, "same")), + current: []byte(fmt.Sprintf(`{"name": "%s"}`, "same")), + prep: func(*Response) error { return nil }, + want: Succeeded(""), + }, + { + name: "PatchResponseFromRaw_InvalidJSONDocument_JSONDocumentIsInvalid", + original: []byte(nil), + current: []byte("updated"), + prep: func(*Response) error { return nil }, + want: Errored(http.StatusInternalServerError, &customError{"invalid JSON Document"}), + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if err := test.prep(&test.want); err != nil { + t.Errorf("failed to prep for patching response from raw: %v", err) + } + patchResponse := PatchResponseFromRaw(test.original, test.current) + if !reflect.DeepEqual(patchResponse, test.want) { + t.Errorf("unexpected error, patch responses not matched; expected %v, but got %v", patchResponse, test.want) + } + }) + } +} diff --git a/pkg/webhook/interpreter/webhook_test.go b/pkg/webhook/interpreter/webhook_test.go new file mode 100644 index 000000000000..5b5e7e6712f1 --- /dev/null +++ b/pkg/webhook/interpreter/webhook_test.go @@ -0,0 +1,158 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package interpreter + +import ( + "context" + "fmt" + "net/http" + "reflect" + "testing" + + "k8s.io/apimachinery/pkg/types" + + configv1alpha1 "github.com/karmada-io/karmada/pkg/apis/config/v1alpha1" +) + +// WebhookMockHandler implements the Handler and DecoderInjector interfaces for testing. +type WebhookMockHandler struct { + response Response + decoder *Decoder +} + +// Handle implements the Handler interface for WebhookMockHandler. +func (m *WebhookMockHandler) Handle(_ context.Context, _ Request) Response { + return m.response +} + +// InjectDecoder implements the DecoderInjector interface by setting the decoder. +func (m *WebhookMockHandler) InjectDecoder(decoder *Decoder) { + m.decoder = decoder +} + +func TestNewWebhook(t *testing.T) { + mockHandler := &WebhookMockHandler{} + decoder := &Decoder{} + + webhook := NewWebhook(mockHandler, decoder) + if webhook == nil { + t.Fatalf("webhook returned by NewWebhook() is nil") + } + if webhook.handler != mockHandler { + t.Errorf("webhook has incorrect handler: expected %v, got %v", mockHandler, webhook.handler) + } +} + +func TestHandle(t *testing.T) { + var uid types.UID = "test-uid" + + expectedResponse := Response{ + ResourceInterpreterResponse: configv1alpha1.ResourceInterpreterResponse{ + Successful: true, + Status: &configv1alpha1.RequestStatus{ + Code: http.StatusOK, + }, + UID: uid, + }, + } + + mockHandler := &WebhookMockHandler{response: expectedResponse} + webhook := NewWebhook(mockHandler, &Decoder{}) + req := Request{ + ResourceInterpreterRequest: configv1alpha1.ResourceInterpreterRequest{ + UID: uid, + }, + } + + resp := webhook.Handle(context.TODO(), req) + if !reflect.DeepEqual(resp, expectedResponse) { + t.Errorf("response mismatch in Handle(): expected %v, got %v", expectedResponse, resp) + } + if resp.UID != req.UID { + t.Errorf("uid was not set as expected: expected %v, got %v", req.UID, resp.UID) + } +} + +func TestComplete(t *testing.T) { + tests := []struct { + name string + req Request + res Response + verify func(*Response, *Request) error + }{ + { + name: "TestComplete_StatusAndStatusCodeAreUnset_FieldsArePopulated", + req: Request{ + ResourceInterpreterRequest: configv1alpha1.ResourceInterpreterRequest{ + UID: "test-uid", + }, + }, + res: Response{}, + verify: verifyResourceInterpreterCompleteResponse, + }, + { + name: "TestComplete_OverrideResponseUIDAndStatusCode_ResponseUIDAndStatusCodeAreOverrided", + req: Request{ + ResourceInterpreterRequest: configv1alpha1.ResourceInterpreterRequest{ + UID: "test-uid", + }, + }, + res: Response{ + ResourceInterpreterResponse: configv1alpha1.ResourceInterpreterResponse{ + UID: "existing-uid", + Status: &configv1alpha1.RequestStatus{ + Code: http.StatusForbidden, + }, + }, + }, + verify: func(resp *Response, req *Request) error { + if resp.UID != req.UID { + return fmt.Errorf("uid should be overridden if it's already set in the request: expected %v, got %v", req.UID, resp.UID) + } + if resp.Status.Code != http.StatusForbidden { + return fmt.Errorf("status code should not be overridden if it's already set: expected %v, got %v", http.StatusForbidden, resp.Status.Code) + } + return nil + }, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + test.res.Complete(test.req) + if err := test.verify(&test.res, &test.req); err != nil { + t.Errorf("failed to verify complete resource interpreter response: %v", err) + } + }) + } +} + +// verifyResourceInterpreterCompleteResponse checks if the response from +// the resource interpreter's Complete method is valid. +// It ensures the response UID matches the request UID, the Status is initialized, +// and the Status code is set to http.StatusOK. Returns an error if any check fails. +func verifyResourceInterpreterCompleteResponse(res *Response, req *Request) error { + if res.UID != req.UID { + return fmt.Errorf("uid was not set as expected: expected %v, got %v", req.UID, res.UID) + } + if res.Status == nil { + return fmt.Errorf("status should be initialized if it's nil") + } + if res.Status.Code != http.StatusOK { + return fmt.Errorf("status code should be set to %v if it was 0, got %v", http.StatusOK, res.Status.Code) + } + return nil +} diff --git a/pkg/webhook/multiclusteringress/validating_test.go b/pkg/webhook/multiclusteringress/validating_test.go new file mode 100644 index 000000000000..91b21ecdd910 --- /dev/null +++ b/pkg/webhook/multiclusteringress/validating_test.go @@ -0,0 +1,448 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package multiclusteringress + +import ( + "context" + "errors" + "net/http" + "reflect" + "strings" + "testing" + + admissionv1 "k8s.io/api/admission/v1" + networkingv1 "k8s.io/api/networking/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + networkingv1alpha1 "github.com/karmada-io/karmada/pkg/apis/networking/v1alpha1" +) + +// ResponseType represents the type of admission response. +type ResponseType string + +const ( + Denied ResponseType = "Denied" + Allowed ResponseType = "Allowed" + Errored ResponseType = "Errored" +) + +// TestResponse is used to define expected response in a test case. +type TestResponse struct { + Type ResponseType + Message string +} + +type fakeValidationDecoder struct { + err error + obj runtime.Object +} + +// Decode mocks the Decode method of admission.Decoder. +func (f *fakeValidationDecoder) Decode(_ admission.Request, obj runtime.Object) error { + if f.err != nil { + return f.err + } + if f.obj != nil { + reflect.ValueOf(obj).Elem().Set(reflect.ValueOf(f.obj).Elem()) + } + return nil +} + +// DecodeRaw mocks the DecodeRaw method of admission.Decoder. +func (f *fakeValidationDecoder) DecodeRaw(rawObject runtime.RawExtension, obj runtime.Object) error { + if f.err != nil { + return f.err + } + if rawObject.Object != nil { + reflect.ValueOf(obj).Elem().Set(reflect.ValueOf(rawObject.Object).Elem()) + return nil + } + return errors.New("decode raw object error; object is nil") +} + +func TestValidatingAdmission_Handle(t *testing.T) { + tests := []struct { + name string + decoder admission.Decoder + req admission.Request + want TestResponse + }{ + { + name: "Handle_DecodeError_DeniesAdmission", + decoder: &fakeValidationDecoder{ + err: errors.New("decode error"), + }, + req: admission.Request{}, + want: TestResponse{ + Type: Errored, + Message: "decode error", + }, + }, + { + name: "Handle_DecodeOldObjectError_DeniesAdmission", + decoder: &fakeValidationDecoder{}, + req: admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Update, + OldObject: runtime.RawExtension{ + Object: nil, + }, + }, + }, + want: TestResponse{ + Type: Errored, + Message: "decode raw object error; object is nil", + }, + }, + { + name: "Handle_UpdateMCIWithInvalidSpec_DeniesAdmission", + decoder: &fakeValidationDecoder{ + obj: &networkingv1alpha1.MultiClusterIngress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mci", + Namespace: "test-namespace", + ResourceVersion: "1001", + }, + Spec: networkingv1.IngressSpec{ + DefaultBackend: &networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: "test-new-backend", + Port: networkingv1.ServiceBackendPort{ + Name: "", + Number: 80, + }, + }, + }, + Rules: []networkingv1.IngressRule{ + {Host: "10.0.0.5"}, + }, + }, + }, + }, + req: admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Update, + OldObject: runtime.RawExtension{ + Object: &networkingv1alpha1.MultiClusterIngress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mci", + Namespace: "test-namespace", + ResourceVersion: "1000", + }, + Spec: networkingv1.IngressSpec{ + DefaultBackend: &networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: "test-backend", + Port: networkingv1.ServiceBackendPort{ + Name: "", + Number: 80, + }, + }, + }, + Rules: []networkingv1.IngressRule{ + {Host: "127.0.0.1"}, + }, + }, + }, + }, + }, + }, + want: TestResponse{ + Type: Denied, + Message: "Invalid value: \"10.0.0.5\": must be a DNS name, not an IP address", + }, + }, + { + name: "Handle_CreateMCIWithInvalidSpec_DeniesAdmission", + decoder: &fakeValidationDecoder{ + obj: &networkingv1alpha1.MultiClusterIngress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mci", + Namespace: "test-namespace", + ResourceVersion: "1000", + }, + Spec: networkingv1.IngressSpec{ + DefaultBackend: nil, + Rules: nil, + }, + }, + }, + req: admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Create, + }, + }, + want: TestResponse{ + Type: Denied, + Message: "either `defaultBackend` or `rules` must be specified", + }, + }, + { + name: "Handle_ValidationSucceeds_AllowsAdmission", + decoder: &fakeValidationDecoder{ + obj: &networkingv1alpha1.MultiClusterIngress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mci", + Namespace: "test-namespace", + ResourceVersion: "1001", + }, + Spec: networkingv1.IngressSpec{ + DefaultBackend: &networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: "test-new-backend", + Port: networkingv1.ServiceBackendPort{ + Name: "", + Number: 80, + }, + }, + }, + Rules: []networkingv1.IngressRule{ + {Host: "test-new-backend.com"}, + }, + }, + }, + }, + req: admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Update, + OldObject: runtime.RawExtension{ + Object: &networkingv1alpha1.MultiClusterIngress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mci", + Namespace: "test-namespace", + ResourceVersion: "1000", + }, + Spec: networkingv1.IngressSpec{ + DefaultBackend: &networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: "test-backend", + Port: networkingv1.ServiceBackendPort{ + Name: "", + Number: 80, + }, + }, + }, + Rules: []networkingv1.IngressRule{ + {Host: "test-backend.com"}, + }, + }, + }, + }, + }, + }, + want: TestResponse{ + Type: Allowed, + Message: "", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + v := &ValidatingAdmission{ + Decoder: tt.decoder, + } + got := v.Handle(context.Background(), tt.req) + + // Extract type and message from the actual response. + gotType := extractResponseType(got) + gotMessage := extractErrorMessage(got) + + if gotType != tt.want.Type || !strings.Contains(gotMessage, tt.want.Message) { + t.Errorf("Handle() = {Type: %v, Message: %v}, want {Type: %v, Message: %v}", gotType, gotMessage, tt.want.Type, tt.want.Message) + } + }) + } +} + +func TestValidatingSpec_validateMCIUpdate(t *testing.T) { + tests := []struct { + name string + oldMcs *networkingv1alpha1.MultiClusterIngress + newMcs *networkingv1alpha1.MultiClusterIngress + wantErr bool + errMsg string + }{ + { + name: "validateMCIUpdate_ValidMetadataUpdate_NoError", + oldMcs: &networkingv1alpha1.MultiClusterIngress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mci", + Namespace: "test-namespace", + Labels: map[string]string{"key": "oldValue"}, + ResourceVersion: "1000", + }, + Spec: networkingv1.IngressSpec{ + DefaultBackend: &networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: "test-backend", + Port: networkingv1.ServiceBackendPort{ + Name: "", + Number: 80, + }, + }, + }, + Rules: []networkingv1.IngressRule{ + {Host: "test-backend.com"}, + }, + }, + }, + newMcs: &networkingv1alpha1.MultiClusterIngress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mci", + Namespace: "test-namespace", + Labels: map[string]string{"key": "oldValue"}, + ResourceVersion: "1001", + }, + Spec: networkingv1.IngressSpec{ + DefaultBackend: &networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: "test-new-backend", + Port: networkingv1.ServiceBackendPort{ + Name: "", + Number: 80, + }, + }, + }, + Rules: []networkingv1.IngressRule{ + {Host: "test-new-backend.com"}, + }, + }, + }, + wantErr: false, + }, + { + name: "validateMCIUpdate_InvalidMetadataUpdate_Error", + oldMcs: &networkingv1alpha1.MultiClusterIngress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mci", + Namespace: "test-namespace", + ResourceVersion: "1000", + }, + }, + newMcs: &networkingv1alpha1.MultiClusterIngress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "mutated-name", + Namespace: "test-namespace", + ResourceVersion: "1001", + }, + }, + wantErr: true, + errMsg: "metadata.name: Invalid value: \"mutated-name\"", + }, + { + name: "validateMCSUpdate_InvalidIngressLoadBalancerStatus_Error", + oldMcs: &networkingv1alpha1.MultiClusterIngress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mci", + Namespace: "test-namespace", + ResourceVersion: "1000", + }, + Spec: networkingv1.IngressSpec{ + DefaultBackend: &networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: "test-backend", + Port: networkingv1.ServiceBackendPort{ + Name: "", + Number: 80, + }, + }, + }, + Rules: []networkingv1.IngressRule{ + {Host: "test-backend.com"}, + }, + }, + Status: networkingv1alpha1.MultiClusterIngressStatus{ + IngressStatus: networkingv1.IngressStatus{ + LoadBalancer: networkingv1.IngressLoadBalancerStatus{ + Ingress: []networkingv1.IngressLoadBalancerIngress{ + {IP: "127.0.0.1", Hostname: "test-backend.com"}, + }, + }, + }, + }, + }, + newMcs: &networkingv1alpha1.MultiClusterIngress{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mci", + Namespace: "test-namespace", + ResourceVersion: "1001", + }, + Spec: networkingv1.IngressSpec{ + DefaultBackend: &networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: "test-new-backend", + Port: networkingv1.ServiceBackendPort{ + Name: "", + Number: 80, + }, + }, + }, + Rules: []networkingv1.IngressRule{ + {Host: "test-new-backend.com"}, + }, + }, + Status: networkingv1alpha1.MultiClusterIngressStatus{ + IngressStatus: networkingv1.IngressStatus{ + LoadBalancer: networkingv1.IngressLoadBalancerStatus{ + Ingress: []networkingv1.IngressLoadBalancerIngress{ + {IP: "test-new-backend.com", Hostname: "test-new-backend.com"}, + }, + }, + }, + }, + }, + wantErr: true, + errMsg: "Invalid value: \"test-new-backend.com\": must be a valid IP address", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + errs := validateMCIUpdate(tt.oldMcs, tt.newMcs) + if (len(errs) > 0) != tt.wantErr { + t.Errorf("validateMCSUpdate() gotErr = %v, wantErr %v", len(errs) > 0, tt.wantErr) + } + if tt.wantErr && !strings.Contains(errs.ToAggregate().Error(), tt.errMsg) { + t.Errorf("Expected error message: %v, got: %v", tt.errMsg, errs.ToAggregate().Error()) + } + }) + } +} + +// extractResponseType extracts the type of admission response. +func extractResponseType(resp admission.Response) ResponseType { + if resp.Allowed { + return Allowed + } + if resp.Result != nil { + if resp.Result.Code == http.StatusBadRequest { + return Errored + } + } + return Denied +} + +// extractErrorMessage extracts the error message from a Denied/Errored response. +func extractErrorMessage(resp admission.Response) string { + if !resp.Allowed && resp.Result != nil { + return resp.Result.Message + } + return "" +} diff --git a/pkg/webhook/multiclusterservice/mutating.go b/pkg/webhook/multiclusterservice/mutating.go index 14784192c3dd..e2ea648ffc60 100644 --- a/pkg/webhook/multiclusterservice/mutating.go +++ b/pkg/webhook/multiclusterservice/mutating.go @@ -22,6 +22,7 @@ import ( "net/http" "github.com/google/uuid" + "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" networkingv1alpha1 "github.com/karmada-io/karmada/pkg/apis/networking/v1alpha1" @@ -44,6 +45,7 @@ func (a *MutatingAdmission) Handle(_ context.Context, req admission.Request) adm if err != nil { return admission.Errored(http.StatusBadRequest, err) } + klog.V(2).Infof("Mutating MultiClusterService(%s/%s) for request: %s", req.Namespace, mcs.Name, req.Operation) if util.GetLabelValue(mcs.Labels, networkingv1alpha1.MultiClusterServicePermanentIDLabel) == "" { id := uuid.New().String() diff --git a/pkg/webhook/multiclusterservice/mutating_test.go b/pkg/webhook/multiclusterservice/mutating_test.go new file mode 100644 index 000000000000..a4edf66652cd --- /dev/null +++ b/pkg/webhook/multiclusterservice/mutating_test.go @@ -0,0 +1,204 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package multiclusterservice + +import ( + "context" + "encoding/json" + "errors" + "net/http" + "reflect" + "testing" + + admissionv1 "k8s.io/api/admission/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + networkingv1alpha1 "github.com/karmada-io/karmada/pkg/apis/networking/v1alpha1" +) + +type fakeMutationDecoder struct { + err error + obj runtime.Object +} + +// Decode mocks the Decode method of admission.Decoder. +func (f *fakeMutationDecoder) Decode(_ admission.Request, obj runtime.Object) error { + if f.err != nil { + return f.err + } + if f.obj != nil { + reflect.ValueOf(obj).Elem().Set(reflect.ValueOf(f.obj).Elem()) + } + return nil +} + +// DecodeRaw mocks the DecodeRaw method of admission.Decoder. +func (f *fakeMutationDecoder) DecodeRaw(_ runtime.RawExtension, obj runtime.Object) error { + if f.err != nil { + return f.err + } + if f.obj != nil { + reflect.ValueOf(obj).Elem().Set(reflect.ValueOf(f.obj).Elem()) + } + return nil +} + +func TestMutatingAdmission_Handle(t *testing.T) { + tests := []struct { + name string + decoder admission.Decoder + req admission.Request + want admission.Response + }{ + { + name: "Handle_DecodeError_DeniesAdmission", + decoder: &fakeValidationDecoder{ + err: errors.New("decode error"), + }, + req: admission.Request{}, + want: admission.Errored(http.StatusBadRequest, errors.New("decode error")), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m := MutatingAdmission{ + Decoder: tt.decoder, + } + got := m.Handle(context.Background(), tt.req) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("Handle() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestMutatingAdmission_Handle_FullCoverage(t *testing.T) { + // Define the multi-cluster service (mcs) name and namespace to be used in the test. + name := "test-mcs" + namespace := "test-namespace" + + // Mock a request with a specific namespace. + req := admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Name: name, + Namespace: namespace, + }, + } + + // Create the initial mcs with default values for testing. + mcsObj := &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + ResourceVersion: "1001", + }, + Spec: networkingv1alpha1.MultiClusterServiceSpec{ + Ports: []networkingv1alpha1.ExposurePort{ + { + Name: "foo", + Port: 16312, + }, + { + Name: "bar", + Port: 16313, + }, + }, + ProviderClusters: []networkingv1alpha1.ClusterSelector{ + {Name: "member1"}, + {Name: "member2"}, + }, + ConsumerClusters: []networkingv1alpha1.ClusterSelector{ + {Name: "member1"}, + {Name: "member2"}, + }, + Types: []networkingv1alpha1.ExposureType{ + networkingv1alpha1.ExposureTypeLoadBalancer, + }, + }, + } + + // Define the expected mcs object after mutations. + wantMCSObj := &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + ResourceVersion: "1001", + Labels: map[string]string{ + networkingv1alpha1.MultiClusterServicePermanentIDLabel: "some-unique-id", + }, + }, + Spec: networkingv1alpha1.MultiClusterServiceSpec{ + Ports: []networkingv1alpha1.ExposurePort{ + { + Name: "foo", + Port: 16312, + }, + { + Name: "bar", + Port: 16313, + }, + }, + ProviderClusters: []networkingv1alpha1.ClusterSelector{ + {Name: "member1"}, + {Name: "member2"}, + }, + ConsumerClusters: []networkingv1alpha1.ClusterSelector{ + {Name: "member1"}, + {Name: "member2"}, + }, + Types: []networkingv1alpha1.ExposureType{ + networkingv1alpha1.ExposureTypeLoadBalancer, + }, + }, + } + + // Mock decoder that decodes the request into the mcs object. + decoder := &fakeMutationDecoder{ + obj: mcsObj, + } + + // Marshal the expected policy to simulate the final mutated object. + wantBytes, err := json.Marshal(wantMCSObj) + if err != nil { + t.Fatalf("Failed to marshal expected policy: %v", err) + } + req.Object.Raw = wantBytes + + // Instantiate the mutating handler. + mutatingHandler := MutatingAdmission{ + Decoder: decoder, + } + + // Call the Handle function. + got := mutatingHandler.Handle(context.Background(), req) + + // Verify that the only patch applied is for the UUID label. If any other patches are present, it indicates that the mcs object was not handled as expected. + if len(got.Patches) > 0 { + firstPatch := got.Patches[0] + if firstPatch.Operation != "replace" || firstPatch.Path != "/metadata/labels/multiclusterservice.karmada.io~1permanent-id" { + t.Errorf("Handle() returned unexpected patches. Only the UUID patch was expected. Received patches: %v", got.Patches) + } + } + + // Check if the admission request was allowed. + if !got.Allowed { + t.Errorf("Handle() got.Allowed = false, want true") + } +} diff --git a/pkg/webhook/multiclusterservice/validating_test.go b/pkg/webhook/multiclusterservice/validating_test.go old mode 100755 new mode 100644 index 3f722c6c9091..4519ca9b24ff --- a/pkg/webhook/multiclusterservice/validating_test.go +++ b/pkg/webhook/multiclusterservice/validating_test.go @@ -17,15 +17,281 @@ limitations under the License. package multiclusterservice import ( + "context" + "errors" + "net/http" "reflect" + "strings" "testing" + admissionv1 "k8s.io/api/admission/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/validation" "k8s.io/apimachinery/pkg/util/validation/field" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" networkingv1alpha1 "github.com/karmada-io/karmada/pkg/apis/networking/v1alpha1" ) +// ResponseType represents the type of admission response. +type ResponseType string + +const ( + Denied ResponseType = "Denied" + Allowed ResponseType = "Allowed" + Errored ResponseType = "Errored" +) + +// TestResponse is used to define expected response in a test case. +type TestResponse struct { + Type ResponseType + Message string +} + +type fakeValidationDecoder struct { + err error + obj runtime.Object +} + +// Decode mocks the Decode method of admission.Decoder. +func (f *fakeValidationDecoder) Decode(_ admission.Request, obj runtime.Object) error { + if f.err != nil { + return f.err + } + if f.obj != nil { + reflect.ValueOf(obj).Elem().Set(reflect.ValueOf(f.obj).Elem()) + } + return nil +} + +// DecodeRaw mocks the DecodeRaw method of admission.Decoder. +func (f *fakeValidationDecoder) DecodeRaw(rawObject runtime.RawExtension, obj runtime.Object) error { + if f.err != nil { + return f.err + } + if rawObject.Object != nil { + reflect.ValueOf(obj).Elem().Set(reflect.ValueOf(rawObject.Object).Elem()) + } + return nil +} + +func TestValidatingAdmission_Handle(t *testing.T) { + tests := []struct { + name string + decoder admission.Decoder + req admission.Request + want TestResponse + }{ + { + name: "Handle_DecodeError_DeniesAdmission", + decoder: &fakeValidationDecoder{ + err: errors.New("decode error"), + }, + req: admission.Request{}, + want: TestResponse{ + Type: Errored, + Message: "decode error", + }, + }, + { + name: "Handle_DecodeOldObjectError_DeniesAdmission", + decoder: &fakeValidationDecoder{ + err: errors.New("decode raw error"), + }, + req: admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Update, + OldObject: runtime.RawExtension{ + Object: nil, + }, + }, + }, + want: TestResponse{ + Type: Errored, + Message: "decode raw error", + }, + }, + { + name: "Handle_UpdateMCSWithInvalidSpec_DeniesAdmission", + decoder: &fakeValidationDecoder{ + obj: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "test-namespace", + ResourceVersion: "1001", + }, + Spec: networkingv1alpha1.MultiClusterServiceSpec{ + Ports: []networkingv1alpha1.ExposurePort{ + { + Name: "foo.withdot", + Port: 16312, + }, + { + Name: "bar", + Port: 16313, + }, + }, + Types: []networkingv1alpha1.ExposureType{ + networkingv1alpha1.ExposureTypeLoadBalancer, + }, + ProviderClusters: []networkingv1alpha1.ClusterSelector{ + {Name: "member1"}, + {Name: "member2"}, + }, + ConsumerClusters: []networkingv1alpha1.ClusterSelector{ + {Name: "member1"}, + {Name: "member2"}, + }, + }, + }, + }, + req: admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Update, + OldObject: runtime.RawExtension{ + Object: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "test-namespace", + ResourceVersion: "1000", + }, + Spec: networkingv1alpha1.MultiClusterServiceSpec{ + Types: []networkingv1alpha1.ExposureType{ + networkingv1alpha1.ExposureTypeLoadBalancer, + }, + }, + }, + }, + }, + }, + want: TestResponse{ + Type: Denied, + Message: "Invalid value: \"foo.withdot\": must not contain dots", + }, + }, + { + name: "Handle_CreateMCSWithInvalidSpec_DeniesAdmission", + decoder: &fakeValidationDecoder{ + obj: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "test-namespace", + }, + Spec: networkingv1alpha1.MultiClusterServiceSpec{ + Ports: []networkingv1alpha1.ExposurePort{ + { + Name: "foo.withdot", + Port: 16312, + }, + { + Name: "bar", + Port: 16313, + }, + }, + Types: []networkingv1alpha1.ExposureType{ + networkingv1alpha1.ExposureTypeLoadBalancer, + }, + ProviderClusters: []networkingv1alpha1.ClusterSelector{ + {Name: "member1"}, + {Name: "member2"}, + }, + ConsumerClusters: []networkingv1alpha1.ClusterSelector{ + {Name: "member1"}, + {Name: "member2"}, + }, + }, + }, + }, + req: admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Create, + }, + }, + want: TestResponse{ + Type: Denied, + Message: "Invalid value: \"foo.withdot\": must not contain dots", + }, + }, + { + name: "Handle_ValidationSucceeds_AllowsAdmission", + decoder: &fakeValidationDecoder{ + obj: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "test-namespace", + ResourceVersion: "1001", + }, + Spec: networkingv1alpha1.MultiClusterServiceSpec{ + Ports: []networkingv1alpha1.ExposurePort{ + { + Name: "foo", + Port: 16312, + }, + { + Name: "bar", + Port: 16313, + }, + }, + ProviderClusters: []networkingv1alpha1.ClusterSelector{ + {Name: "member1"}, + {Name: "member2"}, + }, + ConsumerClusters: []networkingv1alpha1.ClusterSelector{ + {Name: "member1"}, + {Name: "member2"}, + }, + Types: []networkingv1alpha1.ExposureType{ + networkingv1alpha1.ExposureTypeLoadBalancer, + }, + }, + }, + }, + req: admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Update, + OldObject: runtime.RawExtension{ + Object: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "test-namespace", + ResourceVersion: "1000", + }, + Spec: networkingv1alpha1.MultiClusterServiceSpec{ + Types: []networkingv1alpha1.ExposureType{ + networkingv1alpha1.ExposureTypeLoadBalancer, + }, + }, + }, + }, + }, + }, + want: TestResponse{ + Type: Allowed, + Message: "", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + v := &ValidatingAdmission{ + Decoder: tt.decoder, + } + got := v.Handle(context.Background(), tt.req) + + // Extract type and message from the actual response. + gotType := extractResponseType(got) + gotMessage := extractErrorMessage(got) + + if gotType != tt.want.Type || !strings.Contains(gotMessage, tt.want.Message) { + t.Errorf("Handle() = {Type: %v, Message: %v}, want {Type: %v, Message: %v}", gotType, gotMessage, tt.want.Type, tt.want.Message) + } + }) + } +} + func TestValidateMultiClusterServiceSpec(t *testing.T) { validator := &ValidatingAdmission{} specFld := field.NewPath("spec") @@ -191,12 +457,12 @@ func TestValidateMultiClusterServiceSpec(t *testing.T) { networkingv1alpha1.ExposureTypeCrossCluster, }, ProviderClusters: []networkingv1alpha1.ClusterSelector{ - {Name: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"}, + {Name: strings.Repeat("a", 49)}, }, ConsumerClusters: []networkingv1alpha1.ClusterSelector{}, }, }, - expectedErr: field.ErrorList{field.Invalid(specFld.Child("range").Child("providerClusters").Index(0), "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", "must be no more than 48 characters")}, + expectedErr: field.ErrorList{field.Invalid(specFld.Child("range").Child("providerClusters").Index(0), strings.Repeat("a", 49), "must be no more than 48 characters")}, }, } for _, tt := range tests { @@ -207,3 +473,152 @@ func TestValidateMultiClusterServiceSpec(t *testing.T) { }) } } + +func TestValidatingSpec_validateMCSUpdate(t *testing.T) { + tests := []struct { + name string + oldMcs *networkingv1alpha1.MultiClusterService + newMcs *networkingv1alpha1.MultiClusterService + wantErr bool + errMsg string + }{ + { + name: "validateMCSUpdate_ValidMetadataUpdate_NoError", + oldMcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "test-namespace", + Labels: map[string]string{"key": "oldValue"}, + ResourceVersion: "1000", + }, + Spec: networkingv1alpha1.MultiClusterServiceSpec{ + Types: []networkingv1alpha1.ExposureType{ + networkingv1alpha1.ExposureTypeLoadBalancer, + }, + }, + }, + newMcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "test-namespace", + Labels: map[string]string{"key": "newValue"}, + ResourceVersion: "1001", + }, + Spec: networkingv1alpha1.MultiClusterServiceSpec{ + Types: []networkingv1alpha1.ExposureType{ + networkingv1alpha1.ExposureTypeLoadBalancer, + }, + }, + }, + wantErr: false, + }, + { + name: "validateMCSUpdate_InvalidMetadataUpdate_Error", + oldMcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "test-namespace", + ResourceVersion: "1000", + }, + }, + newMcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "invalid-name", + Namespace: "test-namespace", + ResourceVersion: "1001", + }, + }, + wantErr: true, + errMsg: "metadata.name: Invalid value: \"invalid-name\"", + }, + { + name: "validateMCSUpdate_InvalidTypesUpdate_Error", + oldMcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "test-namespace", + ResourceVersion: "1000", + }, + Spec: networkingv1alpha1.MultiClusterServiceSpec{ + Types: []networkingv1alpha1.ExposureType{ + networkingv1alpha1.ExposureTypeLoadBalancer, + }, + }, + }, + newMcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "test-namespace", + ResourceVersion: "1001", + }, + Spec: networkingv1alpha1.MultiClusterServiceSpec{ + Types: []networkingv1alpha1.ExposureType{ + networkingv1alpha1.ExposureTypeCrossCluster, + }, + }, + }, + wantErr: true, + errMsg: "MultiClusterService types are immutable", + }, + { + name: "validateMCSUpdate_InvalidLoadBalancerStatus_Error", + oldMcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "test-namespace", + ResourceVersion: "1000", + }, + }, + newMcs: &networkingv1alpha1.MultiClusterService{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-mcs", + Namespace: "test-namespace", + ResourceVersion: "1001", + }, + Status: corev1.ServiceStatus{ + LoadBalancer: corev1.LoadBalancerStatus{ + Ingress: []corev1.LoadBalancerIngress{ + {IP: "invalid IP"}, + }, + }, + }, + }, + wantErr: true, + errMsg: "Invalid value: \"invalid IP\": must be a valid IP address", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + v := &ValidatingAdmission{} + errs := v.validateMCSUpdate(tt.oldMcs, tt.newMcs) + if (len(errs) > 0) != tt.wantErr { + t.Errorf("validateMCSUpdate() gotErr = %v, wantErr %v", len(errs) > 0, tt.wantErr) + } + if tt.wantErr && !strings.Contains(errs.ToAggregate().Error(), tt.errMsg) { + t.Errorf("Expected error message: %v, got: %v", tt.errMsg, errs.ToAggregate().Error()) + } + }) + } +} + +// extractResponseType extracts the type of admission response. +func extractResponseType(resp admission.Response) ResponseType { + if resp.Allowed { + return Allowed + } + if resp.Result != nil { + if resp.Result.Code == http.StatusBadRequest { + return Errored + } + } + return Denied +} + +// extractErrorMessage extracts the error message from a Denied/Errored response. +func extractErrorMessage(resp admission.Response) string { + if !resp.Allowed && resp.Result != nil { + return resp.Result.Message + } + return "" +} diff --git a/pkg/webhook/overridepolicy/mutating.go b/pkg/webhook/overridepolicy/mutating.go index 592b5252d02b..95a1eba3eeb4 100644 --- a/pkg/webhook/overridepolicy/mutating.go +++ b/pkg/webhook/overridepolicy/mutating.go @@ -43,6 +43,7 @@ func (a *MutatingAdmission) Handle(_ context.Context, req admission.Request) adm if err != nil { return admission.Errored(http.StatusBadRequest, err) } + klog.V(2).Infof("Mutating OverridePolicy(%s/%s) for request: %s", req.Namespace, policy.Name, req.Operation) // Set default namespace for all resource selector if not set. // We need to get the default namespace from the request because for kube-apiserver < v1.24, diff --git a/pkg/webhook/overridepolicy/mutating_test.go b/pkg/webhook/overridepolicy/mutating_test.go new file mode 100644 index 000000000000..629ea0f40549 --- /dev/null +++ b/pkg/webhook/overridepolicy/mutating_test.go @@ -0,0 +1,160 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package overridepolicy + +import ( + "context" + "encoding/json" + "errors" + "net/http" + "reflect" + "testing" + + admissionv1 "k8s.io/api/admission/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" +) + +type fakeMutationDecoder struct { + err error + obj runtime.Object +} + +// Decode mocks the Decode method of admission.Decoder. +func (f *fakeMutationDecoder) Decode(_ admission.Request, obj runtime.Object) error { + if f.err != nil { + return f.err + } + if f.obj != nil { + reflect.ValueOf(obj).Elem().Set(reflect.ValueOf(f.obj).Elem()) + } + return nil +} + +// DecodeRaw mocks the DecodeRaw method of admission.Decoder. +func (f *fakeMutationDecoder) DecodeRaw(_ runtime.RawExtension, obj runtime.Object) error { + if f.err != nil { + return f.err + } + if f.obj != nil { + reflect.ValueOf(obj).Elem().Set(reflect.ValueOf(f.obj).Elem()) + } + return nil +} + +func TestMutatingAdmission_Handle(t *testing.T) { + tests := []struct { + name string + decoder admission.Decoder + req admission.Request + want admission.Response + }{ + { + name: "Handle_DecodeError_DeniesAdmission", + decoder: &fakeValidationDecoder{ + err: errors.New("decode error"), + }, + req: admission.Request{}, + want: admission.Errored(http.StatusBadRequest, errors.New("decode error")), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m := MutatingAdmission{ + Decoder: tt.decoder, + } + got := m.Handle(context.Background(), tt.req) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("Handle() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestMutatingAdmission_Handle_FullCoverage(t *testing.T) { + // Define the op name and namespace to be used in the test. + policyName := "test-override-policy" + namespace := "test-namespace" + + // Mock a request with a specific namespace. + req := admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Namespace: namespace, + Operation: admissionv1.Create, + }, + } + + // Create the initial op with default values for testing. + op := &policyv1alpha1.OverridePolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: policyName, + }, + Spec: policyv1alpha1.OverrideSpec{ + ResourceSelectors: []policyv1alpha1.ResourceSelector{ + {Namespace: ""}, + {Namespace: ""}, + }, + }, + } + + // Define the expected op after mutations. + wantOP := &policyv1alpha1.OverridePolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: policyName, + }, + Spec: policyv1alpha1.OverrideSpec{ + ResourceSelectors: []policyv1alpha1.ResourceSelector{ + {Namespace: namespace}, + {Namespace: namespace}, + }, + }, + } + + // Mock decoder that decodes the request into the policy object. + decoder := &fakeMutationDecoder{ + obj: op, + } + + // Marshal the expected op to simulate the final mutated object. + wantBytes, err := json.Marshal(wantOP) + if err != nil { + t.Fatalf("Failed to marshal expected override policy: %v", err) + } + req.Object.Raw = wantBytes + + // Instantiate the mutating handler. + mutatingHandler := MutatingAdmission{ + Decoder: decoder, + } + + // Call the Handle function. + got := mutatingHandler.Handle(context.Background(), req) + + // Check if there are any patches applied. There should be no patches if the override policy is handled correctly. + if len(got.Patches) > 0 { + t.Errorf("Handle() returned patches, but no patches were expected. Got patches: %v", got.Patches) + } + + // Check if the admission request was allowed. + if !got.Allowed { + t.Errorf("Handle() got.Allowed = false, want true") + } +} diff --git a/pkg/webhook/overridepolicy/validating_test.go b/pkg/webhook/overridepolicy/validating_test.go new file mode 100644 index 000000000000..43a9c6bc3c48 --- /dev/null +++ b/pkg/webhook/overridepolicy/validating_test.go @@ -0,0 +1,432 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package overridepolicy + +import ( + "context" + "errors" + "net/http" + "reflect" + "strings" + "testing" + + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" +) + +// ResponseType represents the type of admission response. +type ResponseType string + +const ( + Denied ResponseType = "Denied" + Allowed ResponseType = "Allowed" + Errored ResponseType = "Errored" +) + +// TestResponse is used to define expected response in a test case. +type TestResponse struct { + Type ResponseType + Message string +} + +type fakeValidationDecoder struct { + err error + obj runtime.Object +} + +// Decode mocks the Decode method of admission.Decoder. +func (f *fakeValidationDecoder) Decode(_ admission.Request, obj runtime.Object) error { + if f.err != nil { + return f.err + } + if f.obj != nil { + reflect.ValueOf(obj).Elem().Set(reflect.ValueOf(f.obj).Elem()) + } + return nil +} + +// DecodeRaw mocks the DecodeRaw method of admission.Decoder. +func (f *fakeValidationDecoder) DecodeRaw(_ runtime.RawExtension, obj runtime.Object) error { + if f.err != nil { + return f.err + } + if f.obj != nil { + reflect.ValueOf(obj).Elem().Set(reflect.ValueOf(f.obj).Elem()) + } + return nil +} + +func TestValidatingAdmission_Handle(t *testing.T) { + tests := []struct { + name string + decoder admission.Decoder + req admission.Request + want TestResponse + }{ + { + name: "Handle_DecodeError_DeniesAdmission", + decoder: &fakeValidationDecoder{ + err: errors.New("decode error"), + }, + req: admission.Request{}, + want: TestResponse{ + Type: Errored, + Message: "decode error", + }, + }, + { + name: "Handle_ValidationOverrideSpecFails_DeniesAdmission", + decoder: &fakeValidationDecoder{ + obj: &policyv1alpha1.OverridePolicy{ + Spec: policyv1alpha1.OverrideSpec{ + ResourceSelectors: []policyv1alpha1.ResourceSelector{ + {APIVersion: "test-apiversion", Kind: "test"}, + }, + OverrideRules: []policyv1alpha1.RuleWithCluster{ + { + TargetCluster: &policyv1alpha1.ClusterAffinity{ + ClusterNames: []string{"member1"}, + }, + Overriders: policyv1alpha1.Overriders{ + LabelsOverrider: []policyv1alpha1.LabelAnnotationOverrider{ + { + Operator: policyv1alpha1.OverriderOpAdd, + Value: map[string]string{"testannotation~projectId": "c-m-lfx9lk92p-v86cf"}, + }, + }, + }, + }, + }, + }, + }, + }, + req: admission.Request{}, + want: TestResponse{ + Type: Denied, + Message: "Invalid value: \"testannotation~projectId\"", + }, + }, + { + name: "Handle_ValidationSucceeds_AllowsAdmission", + decoder: &fakeValidationDecoder{ + obj: &policyv1alpha1.OverridePolicy{ + Spec: policyv1alpha1.OverrideSpec{ + ResourceSelectors: []policyv1alpha1.ResourceSelector{ + {APIVersion: "test-apiversion", Kind: "test"}, + }, + OverrideRules: []policyv1alpha1.RuleWithCluster{ + { + TargetCluster: &policyv1alpha1.ClusterAffinity{ + ClusterNames: []string{"member1"}, + }, + Overriders: policyv1alpha1.Overriders{ + Plaintext: []policyv1alpha1.PlaintextOverrider{ + { + Path: "/spec/optional", + Operator: policyv1alpha1.OverriderOpRemove, + }, + }, + }, + }, + }, + }, + }, + }, + req: admission.Request{}, + want: TestResponse{ + Type: Allowed, + Message: "", + }, + }, + { + name: "Handle_FieldOverrider_ContainsBothYAMLAndJSON_DeniesAdmission", + decoder: &fakeValidationDecoder{ + obj: &policyv1alpha1.OverridePolicy{ + Spec: policyv1alpha1.OverrideSpec{ + ResourceSelectors: []policyv1alpha1.ResourceSelector{ + {APIVersion: "test-apiversion", Kind: "test"}, + }, + OverrideRules: []policyv1alpha1.RuleWithCluster{ + { + TargetCluster: &policyv1alpha1.ClusterAffinity{ + ClusterNames: []string{"member1"}, + }, + Overriders: policyv1alpha1.Overriders{ + FieldOverrider: []policyv1alpha1.FieldOverrider{ + { + FieldPath: "/data/config", + JSON: []policyv1alpha1.JSONPatchOperation{ + { + SubPath: "/db-config", + Operator: policyv1alpha1.OverriderOpReplace, + Value: apiextensionsv1.JSON{Raw: []byte(`{"db": "new"}`)}, + }, + }, + YAML: []policyv1alpha1.YAMLPatchOperation{ + { + SubPath: "/db-config", + Operator: policyv1alpha1.OverriderOpReplace, + Value: apiextensionsv1.JSON{Raw: []byte("db: new")}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + req: admission.Request{}, + want: TestResponse{ + Type: Denied, + Message: "FieldOverrider has both YAML and JSON set. Only one is allowed", + }, + }, + { + name: "Handle_InvalidFieldPathInYAML_DeniesAdmission", + decoder: &fakeValidationDecoder{ + obj: &policyv1alpha1.OverridePolicy{ + Spec: policyv1alpha1.OverrideSpec{ + OverrideRules: []policyv1alpha1.RuleWithCluster{ + { + Overriders: policyv1alpha1.Overriders{ + FieldOverrider: []policyv1alpha1.FieldOverrider{ + { + FieldPath: "invalidPath", + YAML: []policyv1alpha1.YAMLPatchOperation{ + { + SubPath: "/db-config", + Operator: policyv1alpha1.OverriderOpReplace, + Value: apiextensionsv1.JSON{Raw: []byte("db: new")}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + req: admission.Request{}, + want: TestResponse{ + Type: Denied, + Message: "spec.overrideRules[0].overriders.fieldOverrider[0].fieldPath: Invalid value: \"invalidPath\": JSON pointer must be empty or start with a \"/", + }, + }, + { + name: "Handle_InvalidJSONSubPath_DeniesAdmission", + decoder: &fakeValidationDecoder{ + obj: &policyv1alpha1.OverridePolicy{ + Spec: policyv1alpha1.OverrideSpec{ + OverrideRules: []policyv1alpha1.RuleWithCluster{ + { + Overriders: policyv1alpha1.Overriders{ + FieldOverrider: []policyv1alpha1.FieldOverrider{ + { + FieldPath: "/data/config", + JSON: []policyv1alpha1.JSONPatchOperation{ + { + SubPath: "invalidSubPath", + Operator: policyv1alpha1.OverriderOpReplace, + Value: apiextensionsv1.JSON{Raw: []byte(`{"db": "new"}`)}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + req: admission.Request{}, + want: TestResponse{ + Type: Denied, + Message: "spec.overrideRules[0].overriders.fieldOverrider[0].json[0].subPath: Invalid value: \"invalidSubPath\": JSON pointer must be empty or start with a \"/", + }, + }, + { + name: "Handle_InvalidYAMLSubPath_DeniesAdmission", + decoder: &fakeValidationDecoder{ + obj: &policyv1alpha1.OverridePolicy{ + Spec: policyv1alpha1.OverrideSpec{ + OverrideRules: []policyv1alpha1.RuleWithCluster{ + { + Overriders: policyv1alpha1.Overriders{ + FieldOverrider: []policyv1alpha1.FieldOverrider{ + { + FieldPath: "/data/config", + YAML: []policyv1alpha1.YAMLPatchOperation{ + { + SubPath: "invalidSubPath", + Operator: policyv1alpha1.OverriderOpReplace, + Value: apiextensionsv1.JSON{Raw: []byte("db: new")}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + req: admission.Request{}, + want: TestResponse{ + Type: Denied, + Message: "spec.overrideRules[0].overriders.fieldOverrider[0].yaml[0].subPath: Invalid value: \"invalidSubPath\": JSON pointer must be empty or start with a \"/", + }, + }, + { + name: "Handle_InvalidJSONValue_DeniesAdmission_OverriderOpReplace", + decoder: &fakeValidationDecoder{ + obj: &policyv1alpha1.OverridePolicy{ + Spec: policyv1alpha1.OverrideSpec{ + OverrideRules: []policyv1alpha1.RuleWithCluster{ + { + Overriders: policyv1alpha1.Overriders{ + FieldOverrider: []policyv1alpha1.FieldOverrider{ + { + FieldPath: "/data/config", + JSON: []policyv1alpha1.JSONPatchOperation{ + { + SubPath: "/db-config", + Operator: policyv1alpha1.OverriderOpReplace, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + req: admission.Request{}, + want: TestResponse{ + Type: Denied, + Message: "spec.overrideRules[0].overriders.fieldOverrider[0].json[0].value: Invalid value: v1.JSON{Raw:[]uint8(nil)}: value is required for add or replace operation", + }, + }, + { + name: "Handle_InvalidJSONValue_DeniesAdmission_OverriderOpAdd", + decoder: &fakeValidationDecoder{ + obj: &policyv1alpha1.OverridePolicy{ + Spec: policyv1alpha1.OverrideSpec{ + OverrideRules: []policyv1alpha1.RuleWithCluster{ + { + Overriders: policyv1alpha1.Overriders{ + FieldOverrider: []policyv1alpha1.FieldOverrider{ + { + FieldPath: "/data/config", + JSON: []policyv1alpha1.JSONPatchOperation{ + { + SubPath: "/db-config", + Operator: policyv1alpha1.OverriderOpAdd, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + req: admission.Request{}, + want: TestResponse{ + Type: Denied, + Message: "spec.overrideRules[0].overriders.fieldOverrider[0].json[0].value: Invalid value: v1.JSON{Raw:[]uint8(nil)}: value is required for add or replace operation", + }, + }, + { + name: "Handle_InvalidJSONValue_DeniesAdmission_OverriderOpRemove", + decoder: &fakeValidationDecoder{ + obj: &policyv1alpha1.OverridePolicy{ + Spec: policyv1alpha1.OverrideSpec{ + OverrideRules: []policyv1alpha1.RuleWithCluster{ + { + Overriders: policyv1alpha1.Overriders{ + FieldOverrider: []policyv1alpha1.FieldOverrider{ + { + FieldPath: "/data/config", + JSON: []policyv1alpha1.JSONPatchOperation{ + { + SubPath: "/db-config", + Operator: policyv1alpha1.OverriderOpRemove, + Value: apiextensionsv1.JSON{Raw: []byte(`{"db": "new"}`)}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + req: admission.Request{}, + want: TestResponse{ + Type: Denied, + Message: "spec.overrideRules[0].overriders.fieldOverrider[0].json[0].value: Invalid value: v1.JSON{Raw:[]uint8{0x7b, 0x22, 0x64, 0x62, 0x22, 0x3a, 0x20, 0x22, 0x6e, 0x65, 0x77, 0x22, 0x7d}}: value is not allowed for remove operation", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + v := &ValidatingAdmission{ + Decoder: tt.decoder, + } + got := v.Handle(context.Background(), tt.req) + + // Extract type and message from the actual response. + gotType := extractResponseType(got) + gotMessage := extractErrorMessage(got) + + if gotType != tt.want.Type || !strings.Contains(gotMessage, tt.want.Message) { + t.Errorf("Handle() = {Type: %v, Message: %v}, want {Type: %v, Message: %v}", gotType, gotMessage, tt.want.Type, tt.want.Message) + } + }) + } +} + +// extractResponseType extracts the type of admission response. +func extractResponseType(resp admission.Response) ResponseType { + if resp.Allowed { + return Allowed + } + if resp.Result != nil { + if resp.Result.Code == http.StatusBadRequest { + return Errored + } + } + return Denied +} + +// extractErrorMessage extracts the error message from a Denied/Errored response. +func extractErrorMessage(resp admission.Response) string { + if !resp.Allowed && resp.Result != nil { + return resp.Result.Message + } + return "" +} diff --git a/pkg/webhook/propagationpolicy/mutating.go b/pkg/webhook/propagationpolicy/mutating.go index 98d8185dd6ee..9393ebfb39ca 100644 --- a/pkg/webhook/propagationpolicy/mutating.go +++ b/pkg/webhook/propagationpolicy/mutating.go @@ -19,7 +19,6 @@ package propagationpolicy import ( "context" "encoding/json" - "fmt" "net/http" "github.com/google/uuid" @@ -31,7 +30,6 @@ import ( policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" "github.com/karmada-io/karmada/pkg/util" "github.com/karmada-io/karmada/pkg/util/helper" - "github.com/karmada-io/karmada/pkg/util/validation" ) // MutatingAdmission mutates API request if necessary. @@ -75,9 +73,6 @@ func (a *MutatingAdmission) Handle(_ context.Context, req admission.Request) adm } } - if len(policy.Name) > validation.LabelValueMaxLength { - return admission.Errored(http.StatusBadRequest, fmt.Errorf("PropagationPolicy's name should be no more than %d characters", validation.LabelValueMaxLength)) - } // Set default spread constraints if both 'SpreadByField' and 'SpreadByLabel' not set. helper.SetDefaultSpreadConstraints(policy.Spec.Placement.SpreadConstraints) helper.AddTolerations(&policy.Spec.Placement, helper.NewNotReadyToleration(a.DefaultNotReadyTolerationSeconds), diff --git a/pkg/webhook/propagationpolicy/mutating_test.go b/pkg/webhook/propagationpolicy/mutating_test.go new file mode 100644 index 000000000000..ef1d0c8f8581 --- /dev/null +++ b/pkg/webhook/propagationpolicy/mutating_test.go @@ -0,0 +1,228 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package propagationpolicy + +import ( + "context" + "encoding/json" + "errors" + "net/http" + "reflect" + "testing" + + admissionv1 "k8s.io/api/admission/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + mcsv1alpha1 "sigs.k8s.io/mcs-api/pkg/apis/v1alpha1" + + policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" + "github.com/karmada-io/karmada/pkg/util" + "github.com/karmada-io/karmada/pkg/util/helper" +) + +var ( + notReadyTolerationSeconds int64 = 300 + unreachableTolerationSeconds int64 = 300 + failOverGracePeriodSeconds int32 = 600 +) + +type fakeMutationDecoder struct { + err error + obj runtime.Object +} + +// Decode mocks the Decode method of admission.Decoder. +func (f *fakeMutationDecoder) Decode(_ admission.Request, obj runtime.Object) error { + if f.err != nil { + return f.err + } + if f.obj != nil { + reflect.ValueOf(obj).Elem().Set(reflect.ValueOf(f.obj).Elem()) + } + return nil +} + +// DecodeRaw mocks the DecodeRaw method of admission.Decoder. +func (f *fakeMutationDecoder) DecodeRaw(_ runtime.RawExtension, obj runtime.Object) error { + if f.err != nil { + return f.err + } + if f.obj != nil { + reflect.ValueOf(obj).Elem().Set(reflect.ValueOf(f.obj).Elem()) + } + return nil +} + +func TestMutatingAdmission_Handle(t *testing.T) { + tests := []struct { + name string + decoder admission.Decoder + req admission.Request + want admission.Response + }{ + { + name: "Handle_DecodeError_DeniesAdmission", + decoder: &fakeMutationDecoder{ + err: errors.New("decode error"), + }, + req: admission.Request{}, + want: admission.Errored(http.StatusBadRequest, errors.New("decode error")), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m := NewMutatingHandler( + notReadyTolerationSeconds, unreachableTolerationSeconds, tt.decoder, + ) + got := m.Handle(context.Background(), tt.req) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("Handle() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestMutatingAdmission_Handle_FullCoverage(t *testing.T) { + // Define the pp name and namespace to be used in the test. + policyName := "test-propagation-policy" + namespace := "test-namespace" + + // Mock a request with a specific namespace. + req := admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Namespace: namespace, + Operation: admissionv1.Create, + }, + } + + // Create the initial pp with default values for testing. + pp := &policyv1alpha1.PropagationPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: policyName, + }, + Spec: policyv1alpha1.PropagationSpec{ + Placement: policyv1alpha1.Placement{ + SpreadConstraints: []policyv1alpha1.SpreadConstraint{ + {SpreadByLabel: "", SpreadByField: "", MinGroups: 0}, + }, + ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{ + ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDivided, + }, + }, + PropagateDeps: false, + ResourceSelectors: []policyv1alpha1.ResourceSelector{ + { + Namespace: "", + Kind: util.ServiceImportKind, + APIVersion: mcsv1alpha1.GroupVersion.String(), + }, + {Namespace: ""}, + }, + Failover: &policyv1alpha1.FailoverBehavior{ + Application: &policyv1alpha1.ApplicationFailoverBehavior{ + PurgeMode: policyv1alpha1.Graciously, + GracePeriodSeconds: nil, + }, + }, + }, + } + + // Define the expected pp after mutations. + wantPP := &policyv1alpha1.PropagationPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: policyName, + Labels: map[string]string{ + policyv1alpha1.PropagationPolicyPermanentIDLabel: "some-unique-uuid", + }, + Finalizers: []string{util.PropagationPolicyControllerFinalizer}, + }, + Spec: policyv1alpha1.PropagationSpec{ + Placement: policyv1alpha1.Placement{ + SpreadConstraints: []policyv1alpha1.SpreadConstraint{ + { + SpreadByField: policyv1alpha1.SpreadByFieldCluster, + MinGroups: 1, + }, + }, + ReplicaScheduling: &policyv1alpha1.ReplicaSchedulingStrategy{ + ReplicaSchedulingType: policyv1alpha1.ReplicaSchedulingTypeDivided, + ReplicaDivisionPreference: policyv1alpha1.ReplicaDivisionPreferenceWeighted, + }, + ClusterTolerations: []corev1.Toleration{ + *helper.NewNotReadyToleration(notReadyTolerationSeconds), + *helper.NewUnreachableToleration(unreachableTolerationSeconds), + }, + }, + PropagateDeps: true, + ResourceSelectors: []policyv1alpha1.ResourceSelector{ + { + Namespace: namespace, + Kind: util.ServiceImportKind, + APIVersion: mcsv1alpha1.GroupVersion.String(), + }, + {Namespace: namespace}, + }, + Failover: &policyv1alpha1.FailoverBehavior{ + Application: &policyv1alpha1.ApplicationFailoverBehavior{ + PurgeMode: policyv1alpha1.Graciously, + GracePeriodSeconds: ptr.To[int32](failOverGracePeriodSeconds), + }, + }, + }, + } + + // Mock decoder that decodes the request into the pp object. + decoder := &fakeMutationDecoder{ + obj: pp, + } + + // Marshal the expected pp to simulate the final mutated object. + wantBytes, err := json.Marshal(wantPP) + if err != nil { + t.Fatalf("Failed to marshal expected propagation policy: %v", err) + } + req.Object.Raw = wantBytes + + // Instantiate the mutating handler. + mutatingHandler := NewMutatingHandler( + notReadyTolerationSeconds, unreachableTolerationSeconds, decoder, + ) + + // Call the Handle function. + got := mutatingHandler.Handle(context.Background(), req) + + // Check if exactly one patch is applied. + if len(got.Patches) != 1 { + t.Errorf("Handle() returned an unexpected number of patches. Expected one patch, received: %v", got.Patches) + } + + // Verify that the only patch applied is for the UUID label + // If any other patches are present, it indicates that the propagation policy was not handled as expected. + firstPatch := got.Patches[0] + if firstPatch.Operation != "replace" || firstPatch.Path != "/metadata/labels/propagationpolicy.karmada.io~1permanent-id" { + t.Errorf("Handle() returned unexpected patches. Only the UUID patch was expected. Received patches: %v", got.Patches) + } + + // Check if the admission request was allowed. + if !got.Allowed { + t.Errorf("Handle() got.Allowed = false, want true") + } +} diff --git a/pkg/webhook/propagationpolicy/validating_test.go b/pkg/webhook/propagationpolicy/validating_test.go new file mode 100644 index 000000000000..192650ee3f0b --- /dev/null +++ b/pkg/webhook/propagationpolicy/validating_test.go @@ -0,0 +1,180 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package propagationpolicy + +import ( + "context" + "errors" + "fmt" + "net/http" + "reflect" + "testing" + + admissionv1 "k8s.io/api/admission/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" + "github.com/karmada-io/karmada/pkg/scheduler" +) + +type fakeValidationDecoder struct { + err error + obj runtime.Object +} + +// Decode mocks the Decode method of admission.Decoder. +func (f *fakeValidationDecoder) Decode(_ admission.Request, obj runtime.Object) error { + if f.err != nil { + return f.err + } + if f.obj != nil { + reflect.ValueOf(obj).Elem().Set(reflect.ValueOf(f.obj).Elem()) + } + return nil +} + +// DecodeRaw mocks the DecodeRaw method of admission.Decoder. +func (f *fakeValidationDecoder) DecodeRaw(rawObject runtime.RawExtension, obj runtime.Object) error { + if f.err != nil { + return f.err + } + if rawObject.Object != nil { + reflect.ValueOf(obj).Elem().Set(reflect.ValueOf(rawObject.Object).Elem()) + } + return nil +} + +func TestValidatingAdmission_Handle(t *testing.T) { + tests := []struct { + name string + decoder admission.Decoder + req admission.Request + want admission.Response + }{ + { + name: "Handle_DecodeError_DeniesAdmission", + decoder: &fakeValidationDecoder{ + err: errors.New("decode error"), + }, + req: admission.Request{}, + want: admission.Errored(http.StatusBadRequest, errors.New("decode error")), + }, + { + name: "Handle_SchedulerNameUpdated_DeniesAdmission", + decoder: &fakeValidationDecoder{ + obj: &policyv1alpha1.PropagationPolicy{ + Spec: policyv1alpha1.PropagationSpec{ + SchedulerName: "new-scheduler", + }, + }, + }, + req: admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Update, + OldObject: runtime.RawExtension{ + Object: &policyv1alpha1.PropagationPolicy{ + Spec: policyv1alpha1.PropagationSpec{ + SchedulerName: scheduler.DefaultScheduler, + }, + }, + }, + }, + }, + want: admission.Denied("the schedulerName should not be updated"), + }, + { + name: "Handle_PermanentIDLabelUpdated_DeniesAdmission", + decoder: &fakeValidationDecoder{ + obj: &policyv1alpha1.PropagationPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + policyv1alpha1.PropagationPolicyPermanentIDLabel: "new-id", + }, + }, + }, + }, + req: admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Update, + OldObject: runtime.RawExtension{ + Object: &policyv1alpha1.PropagationPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + policyv1alpha1.PropagationPolicyPermanentIDLabel: "old-id", + }, + }, + }, + }, + }, + }, + want: admission.Denied(fmt.Sprintf("label %s is immutable, it can only be set by the system "+ + "during creation", policyv1alpha1.PropagationPolicyPermanentIDLabel)), + }, + { + name: "Handle_PermanentIDLabelMissing_DeniesAdmission", + decoder: &fakeValidationDecoder{ + obj: &policyv1alpha1.PropagationPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{}, + }, + }, + }, + req: admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Create, + }, + }, + want: admission.Denied(fmt.Sprintf("label %s is required, it should be set by the mutating "+ + "admission webhook during creation", policyv1alpha1.PropagationPolicyPermanentIDLabel)), + }, + { + name: "Handle_ValidationSucceeds_AllowsAdmission", + decoder: &fakeValidationDecoder{ + obj: &policyv1alpha1.PropagationPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + policyv1alpha1.PropagationPolicyPermanentIDLabel: "new-id", + }, + }, + Spec: policyv1alpha1.PropagationSpec{ + SchedulerName: scheduler.DefaultScheduler, + }, + }, + }, + req: admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Create, + }, + }, + want: admission.Allowed(""), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + v := &ValidatingAdmission{ + Decoder: tt.decoder, + } + got := v.Handle(context.Background(), tt.req) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("Handle() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/pkg/webhook/resourcebinding/mutating_test.go b/pkg/webhook/resourcebinding/mutating_test.go new file mode 100644 index 000000000000..68a9356a56f2 --- /dev/null +++ b/pkg/webhook/resourcebinding/mutating_test.go @@ -0,0 +1,183 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resourcebinding + +import ( + "context" + "encoding/json" + "errors" + "net/http" + "reflect" + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2" +) + +type fakeMutationDecoder struct { + err error + obj runtime.Object +} + +// Decode mocks the Decode method of admission.Decoder. +func (f *fakeMutationDecoder) Decode(_ admission.Request, obj runtime.Object) error { + if f.err != nil { + return f.err + } + if f.obj != nil { + reflect.ValueOf(obj).Elem().Set(reflect.ValueOf(f.obj).Elem()) + } + return nil +} + +// DecodeRaw mocks the DecodeRaw method of admission.Decoder. +func (f *fakeMutationDecoder) DecodeRaw(_ runtime.RawExtension, obj runtime.Object) error { + if f.err != nil { + return f.err + } + if f.obj != nil { + reflect.ValueOf(obj).Elem().Set(reflect.ValueOf(f.obj).Elem()) + } + return nil +} + +func TestMutatingAdmission_Handle(t *testing.T) { + tests := []struct { + name string + decoder admission.Decoder + req admission.Request + want admission.Response + }{ + { + name: "Handle_DecodeError_DeniesAdmission", + decoder: &fakeMutationDecoder{ + err: errors.New("decode error"), + }, + req: admission.Request{}, + want: admission.Errored(http.StatusBadRequest, errors.New("decode error")), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m := MutatingAdmission{ + Decoder: tt.decoder, + } + got := m.Handle(context.Background(), tt.req) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("Handle() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestMutatingAdmission_Handle_FullCoverage(t *testing.T) { + // Define the rb name and namespace to be used in the test. + name := "test-resource-binding" + namespace := "test-namespace" + podName := "test-pod" + + // Mock an admission request. + req := admission.Request{} + + // Create the initial rb object with default values for testing. + rb := &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "v1", + Kind: "Pod", + Namespace: namespace, + Name: podName, + }, + Clusters: []workv1alpha2.TargetCluster{ + { + Name: "member1", + Replicas: 1, + }, + }, + }, + } + + // Define the expected rb object after mutations. + wantRB := &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: map[string]string{ + workv1alpha2.ResourceBindingPermanentIDLabel: "some-unique-uuid", + }, + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "v1", + Kind: "Pod", + Namespace: namespace, + Name: podName, + }, + Clusters: []workv1alpha2.TargetCluster{ + { + Name: "member1", + Replicas: 1, + }, + }, + }, + } + + // Mock decoder that decodes the request into the rb object. + decoder := &fakeMutationDecoder{ + obj: rb, + } + + // Marshal the expected rb object to simulate the final mutated object. + wantBytes, err := json.Marshal(wantRB) + if err != nil { + t.Fatalf("Failed to marshal expected resource binding: %v", err) + } + req.Object.Raw = wantBytes + + // Instantiate the mutating handler. + mutatingHandler := MutatingAdmission{ + Decoder: decoder, + } + + // Call the Handle function. + got := mutatingHandler.Handle(context.Background(), req) + + // Check if exactly one patch is applied. + if len(got.Patches) != 1 { + t.Errorf("Handle() returned an unexpected number of patches. Expected one patch, received: %v", got.Patches) + } + + // Verify that the only patch applied is for the UUID label. + // If any other patches are present, it indicates that the rb object was not handled as expected. + firstPatch := got.Patches[0] + if firstPatch.Operation != "replace" || firstPatch.Path != "/metadata/labels/resourcebinding.karmada.io~1permanent-id" { + t.Errorf("Handle() returned unexpected patches. Only the UUID patch was expected. Received patches: %v", got.Patches) + } + + // Check if the admission request was allowed. + if !got.Allowed { + t.Errorf("Handle() got.Allowed = false, want true") + } +} diff --git a/pkg/webhook/resourcedeletionprotection/validating_test.go b/pkg/webhook/resourcedeletionprotection/validating_test.go new file mode 100644 index 000000000000..fcdd27803fba --- /dev/null +++ b/pkg/webhook/resourcedeletionprotection/validating_test.go @@ -0,0 +1,167 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resourcedeletionprotection + +import ( + "context" + "errors" + "fmt" + "net/http" + "reflect" + "testing" + + admissionv1 "k8s.io/api/admission/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2" +) + +type fakeValidationDecoder struct { + err error + obj runtime.Object +} + +// Decode mocks the Decode method of admission.Decoder. +func (f *fakeValidationDecoder) Decode(_ admission.Request, obj runtime.Object) error { + if f.err != nil { + return f.err + } + if f.obj != nil { + reflect.ValueOf(obj).Elem().Set(reflect.ValueOf(f.obj).Elem()) + } + return nil +} + +// DecodeRaw mocks the DecodeRaw method of admission.Decoder. +func (f *fakeValidationDecoder) DecodeRaw(rawObj runtime.RawExtension, obj runtime.Object) error { + if f.err != nil { + return f.err + } + if rawObj.Object != nil { + reflect.ValueOf(obj).Elem().Set(reflect.ValueOf(rawObj.Object).Elem()) + } + return nil +} + +func TestValidatingAdmission_Handle(t *testing.T) { + tests := []struct { + name string + decoder admission.Decoder + req admission.Request + want admission.Response + }{ + { + name: "Handle_DecodeRawError_DeniesAdmission", + decoder: &fakeValidationDecoder{ + err: errors.New("decode error"), + }, + req: admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Delete, + }, + }, + want: admission.Errored(http.StatusBadRequest, errors.New("decode error")), + }, + { + name: "Handle_DeleteWithProtectionLabel_DeniesAdmission", + decoder: &fakeValidationDecoder{}, + req: admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Delete, + OldObject: runtime.RawExtension{ + Object: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "labels": map[string]interface{}{ + workv1alpha2.DeletionProtectionLabelKey: workv1alpha2.DeletionProtectionAlways, + }, + }, + }, + }, + }, + }, + }, + want: admission.Denied(fmt.Sprintf("This resource is protected, please make sure to remove the label: %s", workv1alpha2.DeletionProtectionLabelKey)), + }, + { + name: "Handle_DeleteWithoutLabel_AllowsAdmission", + decoder: &fakeValidationDecoder{}, + req: admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Delete, + OldObject: runtime.RawExtension{ + Object: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "labels": map[string]interface{}{}, + }, + }, + }, + }, + }, + }, + want: admission.Allowed(""), + }, + { + name: "Handle_DeleteWithDifferentLabel_AllowsAdmission", + decoder: &fakeValidationDecoder{}, + req: admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Delete, + OldObject: runtime.RawExtension{ + Object: &unstructured.Unstructured{ + Object: map[string]interface{}{ + "metadata": map[string]interface{}{ + "labels": map[string]interface{}{ + "some-other-label": "some-value", + }, + }, + }, + }, + }, + }, + }, + want: admission.Allowed(""), + }, + { + name: "Handle_CreateOperation_AllowsAdmission", + decoder: &fakeValidationDecoder{ + err: errors.New("decode error"), + }, + req: admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Operation: admissionv1.Create, + }, + }, + want: admission.Allowed(""), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + v := &ValidatingAdmission{ + Decoder: tt.decoder, + } + got := v.Handle(context.Background(), tt.req) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("Handle() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/pkg/webhook/resourceinterpretercustomization/validation_test.go b/pkg/webhook/resourceinterpretercustomization/validation_test.go new file mode 100644 index 000000000000..be92d098508c --- /dev/null +++ b/pkg/webhook/resourceinterpretercustomization/validation_test.go @@ -0,0 +1,195 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resourceinterpretercustomization + +import ( + "context" + "errors" + "net/http" + "reflect" + "strings" + "testing" + + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + configv1alpha1 "github.com/karmada-io/karmada/pkg/apis/config/v1alpha1" +) + +// ResponseType represents the type of admission response. +type ResponseType string + +const ( + Denied ResponseType = "Denied" + Allowed ResponseType = "Allowed" + Errored ResponseType = "Errored" +) + +// TestResponse is used to define expected response in a test case. +type TestResponse struct { + Type ResponseType + Message string +} + +type fakeValidationDecoder struct { + err error + obj runtime.Object +} + +// Decode mocks the Decode method of admission.Decoder. +func (f *fakeValidationDecoder) Decode(_ admission.Request, obj runtime.Object) error { + if f.err != nil { + return f.err + } + if f.obj != nil { + reflect.ValueOf(obj).Elem().Set(reflect.ValueOf(f.obj).Elem()) + } + return nil +} + +// DecodeRaw mocks the DecodeRaw method of admission.Decoder. +func (f *fakeValidationDecoder) DecodeRaw(_ runtime.RawExtension, obj runtime.Object) error { + if f.err != nil { + return f.err + } + if f.obj != nil { + reflect.ValueOf(obj).Elem().Set(reflect.ValueOf(f.obj).Elem()) + } + return nil +} + +// fakeClient is a mock implementation of the client.Client interface for testing. +type fakeClient struct { + client.Client + listError error +} + +func (f *fakeClient) List(_ context.Context, _ client.ObjectList, _ ...client.ListOption) error { + if f.listError != nil { + return f.listError + } + return nil +} + +func TestValidatingAdmission_Handle(t *testing.T) { + tests := []struct { + name string + decoder admission.Decoder + req admission.Request + want TestResponse + listError error + }{ + { + name: "Handle_DecodeError_DeniesAdmission", + decoder: &fakeValidationDecoder{ + err: errors.New("decode error"), + }, + req: admission.Request{}, + want: TestResponse{ + Type: Errored, + Message: "decode error", + }, + }, + { + name: "Handle_ListError_InternalError", + decoder: &fakeValidationDecoder{ + obj: &configv1alpha1.ResourceInterpreterCustomization{}, + }, + req: admission.Request{}, + listError: errors.New("list error"), + want: TestResponse{ + Type: Errored, + Message: "list error", + }, + }, + { + name: "Handle_WrongLuaCustomizationRetentionScript_DeniesAdmission", + decoder: &fakeValidationDecoder{ + obj: &configv1alpha1.ResourceInterpreterCustomization{ + Spec: configv1alpha1.ResourceInterpreterCustomizationSpec{ + Customizations: configv1alpha1.CustomizationRules{ + Retention: &configv1alpha1.LocalValueRetention{LuaScript: `function Retain(desiredObj, observedObj)`}, + }, + }, + }, + }, + req: admission.Request{}, + want: TestResponse{ + Type: Denied, + Message: "Lua script error: at EOF", + }, + }, + { + name: "Handle_ValidRequest_AllowsAdmission", + decoder: &fakeValidationDecoder{ + obj: &configv1alpha1.ResourceInterpreterCustomization{ + Spec: configv1alpha1.ResourceInterpreterCustomizationSpec{ + Target: configv1alpha1.CustomizationTarget{ + APIVersion: "foo/v1", + Kind: "bar", + }, + }, + }, + }, + req: admission.Request{}, + want: TestResponse{ + Type: Allowed, + Message: "", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + v := &ValidatingAdmission{ + Client: &fakeClient{listError: tt.listError}, + Decoder: tt.decoder, + } + got := v.Handle(context.Background(), tt.req) + + // Extract type and message from the actual response. + gotType := extractResponseType(got) + gotMessage := extractErrorMessage(got) + + if gotType != tt.want.Type || !strings.Contains(gotMessage, tt.want.Message) { + t.Errorf("Handle() = {Type: %v, Message: %v}, want {Type: %v, Message: %v}", gotType, gotMessage, tt.want.Type, tt.want.Message) + } + }) + } +} + +// extractResponseType extracts the type of admission response. +func extractResponseType(resp admission.Response) ResponseType { + if resp.Allowed { + return Allowed + } + if resp.Result != nil { + if resp.Result.Code == http.StatusBadRequest || resp.Result.Code == http.StatusInternalServerError { + return Errored + } + } + return Denied +} + +// extractErrorMessage extracts the error message from a Denied/Errored response. +func extractErrorMessage(resp admission.Response) string { + if !resp.Allowed && resp.Result != nil { + return resp.Result.Message + } + return "" +} diff --git a/pkg/webhook/work/mutating_test.go b/pkg/webhook/work/mutating_test.go new file mode 100644 index 000000000000..5b3e7e8951b0 --- /dev/null +++ b/pkg/webhook/work/mutating_test.go @@ -0,0 +1,346 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package work + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "reflect" + "strings" + "testing" + "time" + + admissionv1 "k8s.io/api/admission/v1" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + + workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha1" + workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2" +) + +type fakeMutationDecoder struct { + err error + obj runtime.Object +} + +// Decode mocks the Decode method of admission.Decoder. +func (f *fakeMutationDecoder) Decode(_ admission.Request, obj runtime.Object) error { + if f.err != nil { + return f.err + } + if f.obj != nil { + reflect.ValueOf(obj).Elem().Set(reflect.ValueOf(f.obj).Elem()) + } + return nil +} + +// DecodeRaw mocks the DecodeRaw method of admission.Decoder. +func (f *fakeMutationDecoder) DecodeRaw(_ runtime.RawExtension, obj runtime.Object) error { + if f.err != nil { + return f.err + } + if f.obj != nil { + reflect.ValueOf(obj).Elem().Set(reflect.ValueOf(f.obj).Elem()) + } + return nil +} + +func TestMutatingAdmission_Handle(t *testing.T) { + tests := []struct { + name string + decoder admission.Decoder + req admission.Request + want admission.Response + }{ + { + name: "Handle_DecodeError_DeniesAdmission", + decoder: &fakeMutationDecoder{ + err: errors.New("decode error"), + }, + req: admission.Request{}, + want: admission.Errored(http.StatusBadRequest, errors.New("decode error")), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m := MutatingAdmission{ + Decoder: tt.decoder, + } + got := m.Handle(context.Background(), tt.req) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("Handle() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestMutatingAdmission_Handle_FullCoverage(t *testing.T) { + // Define the work object name and namespace to be used in the test. + name := "test-work" + namespace := "test-namespace" + + deployment := &appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "apps/v1", + Kind: "Deployment", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + Namespace: namespace, + CreationTimestamp: metav1.Time{Time: time.Date(2024, 1, 1, 12, 0, 0, 0, time.UTC)}, + DeletionTimestamp: nil, + DeletionGracePeriodSeconds: func(i int64) *int64 { return &i }(30), + Generation: 2, + ManagedFields: []metav1.ManagedFieldsEntry{ + { + Manager: "kube-controller-manager", + Operation: "Apply", + APIVersion: "apps/v1", + }, + }, + ResourceVersion: "123456", + SelfLink: fmt.Sprintf("/apis/apps/v1/namespaces/%s/deployments/test-deployment", namespace), + UID: "abcd-1234-efgh-5678", + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "owner-deployment", + UID: "owner-uid-1234", + }, + }, + Finalizers: []string{"foregroundDeletion"}, + Labels: map[string]string{}, + Annotations: nil, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: func(i int32) *int32 { return &i }(3), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "test", + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "test", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "nginx", + Image: "nginx:stable-alpine-perl", + Ports: []corev1.ContainerPort{ + { + ContainerPort: 80, + }, + }, + }, + }, + }, + }, + }, + Status: appsv1.DeploymentStatus{}, + } + + // Marshal the Deployment object to JSON + deploymentRaw, err := json.Marshal(deployment) + if err != nil { + fmt.Println("Error marshaling deployment:", err) + return + } + + // Mock a request with a specific namespace. + req := admission.Request{ + AdmissionRequest: admissionv1.AdmissionRequest{ + Namespace: namespace, + Operation: admissionv1.Create, + }, + } + + // Create the initial work object with default values for testing. + workObj := &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: map[string]string{}, + }, + Spec: workv1alpha1.WorkSpec{ + Workload: workv1alpha1.WorkloadTemplate{ + Manifests: []workv1alpha1.Manifest{ + { + RawExtension: runtime.RawExtension{ + Raw: deploymentRaw, + }, + }, + }, + }, + }, + } + + // Define the expected work object after mutations. + wantDeployment := &appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "apps/v1", + Kind: "Deployment", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + Namespace: namespace, + Annotations: map[string]string{ + workv1alpha2.ManagedAnnotation: strings.Join( + []string{workv1alpha2.ManagedAnnotation, workv1alpha2.ManagedLabels}, ",", + ), + workv1alpha2.ManagedLabels: workv1alpha2.WorkPermanentIDLabel, + }, + Labels: map[string]string{ + workv1alpha2.WorkPermanentIDLabel: "some-unique-id", + }, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: func(i int32) *int32 { return &i }(3), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "test", + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "test", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "nginx", + Image: "nginx:stable-alpine-perl", + Ports: []corev1.ContainerPort{ + { + ContainerPort: 80, + }, + }, + }, + }, + }, + }, + }, + } + + // Marshal appsv1.Deployment object into JSON. + wantDeploymentJSON, err := json.Marshal(wantDeployment) + if err != nil { + t.Errorf("Error marshaling deployment: %v", err) + } + + // Convert the deployment object to unstructured to be able to delete some keys + // expected after mutations. + wantDeploymentUnstructured := &unstructured.Unstructured{} + wantDeploymentUnstructured.SetGroupVersionKind(schema.GroupVersionKind{ + Group: "apps", + Version: "v1", + Kind: "Deployment", + }) + + // Unmarshal JSON into unstructured object. + if err = json.Unmarshal(wantDeploymentJSON, wantDeploymentUnstructured); err != nil { + t.Errorf("Error unmarshaling to unstructured: %v", err) + } + + // Remove the status and creationTimestamp fields to simulate what happen after mutations. + unstructured.RemoveNestedField(wantDeploymentUnstructured.Object, "metadata", "creationTimestamp") + unstructured.RemoveNestedField(wantDeploymentUnstructured.Object, "status") + + // Marshal the unstructured object back to JSON. + wantDeploymentJSON, err = json.Marshal(wantDeploymentUnstructured) + if err != nil { + fmt.Println("Error marshaling modified deployment:", err) + return + } + + // Define the expected work object after mutations. + wantWorkObj := &workv1alpha1.Work{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: map[string]string{ + workv1alpha2.WorkPermanentIDLabel: "some-unique-id", + }, + }, + Spec: workv1alpha1.WorkSpec{ + Workload: workv1alpha1.WorkloadTemplate{ + Manifests: []workv1alpha1.Manifest{ + { + RawExtension: runtime.RawExtension{ + Raw: wantDeploymentJSON, + }, + }, + }, + }, + }, + } + + // Mock decoder that decodes the request into the work object. + decoder := &fakeMutationDecoder{ + obj: workObj, + } + + // Marshal the expected work object to simulate the final mutated object. + wantBytes, err := json.Marshal(wantWorkObj) + if err != nil { + t.Fatalf("Failed to marshal expected work object: %v", err) + } + req.Object.Raw = wantBytes + + // Instantiate the mutating handler. + mutatingHandler := MutatingAdmission{ + Decoder: decoder, + } + + // Call the Handle function. + got := mutatingHandler.Handle(context.Background(), req) + + // Check if exactly two patches are applied. + if len(got.Patches) != 2 { + t.Errorf("Handle() returned an unexpected number of patches. Expected 2 patches, received: %v", got.Patches) + } + + // Verify that the patches are for the UUID label. + // If any other patches are present, it indicates that the work object was not handled as expected. + for _, patch := range got.Patches { + if patch.Operation != "replace" || + (patch.Path != "/metadata/labels/work.karmada.io~1permanent-id" && + patch.Path != "/spec/workload/manifests/0/metadata/labels/work.karmada.io~1permanent-id") { + t.Errorf("Handle() returned unexpected patches. Only the two UUID patches were expected. Received patches: %v", got.Patches) + } + } + + // Check if the admission request was allowed. + if !got.Allowed { + t.Errorf("Handle() got.Allowed = false, want true") + } +} diff --git a/test/e2e/clusteroverridepolicy_test.go b/test/e2e/clusteroverridepolicy_test.go index a5cada094230..db939e782783 100644 --- a/test/e2e/clusteroverridepolicy_test.go +++ b/test/e2e/clusteroverridepolicy_test.go @@ -154,7 +154,7 @@ var _ = framework.SerialDescribe("The ClusterOverridePolicy with nil resourceSel Path: "/spec/template/spec/containers/0/image", }, Component: "Registry", - Operator: "replace", + Operator: policyv1alpha1.OverriderOpReplace, Value: "fictional.registry.us", }, }, diff --git a/test/e2e/clusterpropagationpolicy_test.go b/test/e2e/clusterpropagationpolicy_test.go index c019c9480d4d..3aa0266b9c1f 100644 --- a/test/e2e/clusterpropagationpolicy_test.go +++ b/test/e2e/clusterpropagationpolicy_test.go @@ -43,13 +43,22 @@ import ( testhelper "github.com/karmada-io/karmada/test/helper" ) -var _ = ginkgo.Describe("[BasicClusterPropagation] propagation testing", func() { +var _ = ginkgo.Describe("[BasicCase] ClusterPropagationPolicy testing", func() { + var policyName string + var policy *policyv1alpha1.ClusterPropagationPolicy + + ginkgo.JustBeforeEach(func() { + framework.CreateClusterPropagationPolicy(karmadaClient, policy) + ginkgo.DeferCleanup(func() { + framework.RemoveClusterPropagationPolicy(karmadaClient, policy.Name) + }) + }) + ginkgo.Context("CustomResourceDefinition propagation testing", func() { var crdGroup string var randStr string var crdSpecNames apiextensionsv1.CustomResourceDefinitionNames var crd *apiextensionsv1.CustomResourceDefinition - var crdPolicy *policyv1alpha1.ClusterPropagationPolicy ginkgo.BeforeEach(func() { crdGroup = fmt.Sprintf("example-%s.karmada.io", rand.String(RandomStrLength)) @@ -61,7 +70,8 @@ var _ = ginkgo.Describe("[BasicClusterPropagation] propagation testing", func() Singular: fmt.Sprintf("foo%s", randStr), } crd = testhelper.NewCustomResourceDefinition(crdGroup, crdSpecNames, apiextensionsv1.NamespaceScoped) - crdPolicy = testhelper.NewClusterPropagationPolicy(crd.Name, []policyv1alpha1.ResourceSelector{ + policyName = crd.Name + policy = testhelper.NewClusterPropagationPolicy(policyName, []policyv1alpha1.ResourceSelector{ { APIVersion: crd.APIVersion, Kind: crd.Kind, @@ -75,10 +85,8 @@ var _ = ginkgo.Describe("[BasicClusterPropagation] propagation testing", func() }) ginkgo.BeforeEach(func() { - framework.CreateClusterPropagationPolicy(karmadaClient, crdPolicy) framework.CreateCRD(dynamicClient, crd) ginkgo.DeferCleanup(func() { - framework.RemoveClusterPropagationPolicy(karmadaClient, crdPolicy.Name) framework.RemoveCRD(dynamicClient, crd.Name) framework.WaitCRDDisappearedOnClusters(framework.ClusterNames(), crd.Name) }) @@ -94,8 +102,6 @@ var _ = ginkgo.Describe("[BasicClusterPropagation] propagation testing", func() ginkgo.Context("ClusterRole propagation testing", func() { var ( clusterRoleName string - policyName string - policy *policyv1alpha1.ClusterPropagationPolicy clusterRole *rbacv1.ClusterRole ) @@ -118,10 +124,8 @@ var _ = ginkgo.Describe("[BasicClusterPropagation] propagation testing", func() }) ginkgo.BeforeEach(func() { - framework.CreateClusterPropagationPolicy(karmadaClient, policy) framework.CreateClusterRole(kubeClient, clusterRole) ginkgo.DeferCleanup(func() { - framework.RemoveClusterPropagationPolicy(karmadaClient, policy.Name) framework.RemoveClusterRole(kubeClient, clusterRole.Name) framework.WaitClusterRoleDisappearOnClusters(framework.ClusterNames(), clusterRole.Name) }) @@ -138,8 +142,6 @@ var _ = ginkgo.Describe("[BasicClusterPropagation] propagation testing", func() ginkgo.Context("ClusterRoleBinding propagation testing", func() { var ( clusterRoleBindingName string - policyName string - policy *policyv1alpha1.ClusterPropagationPolicy clusterRoleBinding *rbacv1.ClusterRoleBinding ) @@ -162,10 +164,8 @@ var _ = ginkgo.Describe("[BasicClusterPropagation] propagation testing", func() }) ginkgo.BeforeEach(func() { - framework.CreateClusterPropagationPolicy(karmadaClient, policy) framework.CreateClusterRoleBinding(kubeClient, clusterRoleBinding) ginkgo.DeferCleanup(func() { - framework.RemoveClusterPropagationPolicy(karmadaClient, policy.Name) framework.RemoveClusterRoleBinding(kubeClient, clusterRoleBinding.Name) framework.WaitClusterRoleBindingDisappearOnClusters(framework.ClusterNames(), clusterRoleBinding.Name) }) @@ -180,13 +180,12 @@ var _ = ginkgo.Describe("[BasicClusterPropagation] propagation testing", func() }) ginkgo.Context("Deployment propagation testing", func() { - var policy *policyv1alpha1.ClusterPropagationPolicy var deployment *appsv1.Deployment var targetMember string ginkgo.BeforeEach(func() { targetMember = framework.ClusterNames()[0] - policyName := cppNamePrefix + rand.String(RandomStrLength) + policyName = cppNamePrefix + rand.String(RandomStrLength) deploymentName := deploymentNamePrefix + rand.String(RandomStrLength) deployment = testhelper.NewDeployment(testNamespace, deploymentName) @@ -204,10 +203,8 @@ var _ = ginkgo.Describe("[BasicClusterPropagation] propagation testing", func() }) ginkgo.BeforeEach(func() { - framework.CreateClusterPropagationPolicy(karmadaClient, policy) framework.CreateDeployment(kubeClient, deployment) ginkgo.DeferCleanup(func() { - framework.RemoveClusterPropagationPolicy(karmadaClient, policy.Name) framework.RemoveDeployment(kubeClient, deployment.Namespace, deployment.Name) }) }) @@ -227,7 +224,62 @@ var _ = ginkgo.Describe("[BasicClusterPropagation] propagation testing", func() }) }) -var _ = ginkgo.Describe("[AdvancedClusterPropagation] propagation testing", func() { +var _ = ginkgo.Describe("[CornerCase] ClusterPropagationPolicy testing", func() { + var policyName string + var policy *policyv1alpha1.ClusterPropagationPolicy + + ginkgo.JustBeforeEach(func() { + framework.CreateClusterPropagationPolicy(karmadaClient, policy) + ginkgo.DeferCleanup(func() { + framework.RemoveClusterPropagationPolicy(karmadaClient, policy.Name) + }) + }) + + ginkgo.Context("Deployment propagation testing", func() { + var deployment *appsv1.Deployment + var targetMember string + + ginkgo.BeforeEach(func() { + targetMember = framework.ClusterNames()[0] + policyName = cppNamePrefix + rand.String(RandomStrLength) + deploymentName := deploymentNamePrefix + rand.String(RandomStrLength) + + deployment = testhelper.NewDeployment(testNamespace, deploymentName) + policy = testhelper.NewClusterPropagationPolicy(policyName, []policyv1alpha1.ResourceSelector{ + { + APIVersion: deployment.APIVersion, + Kind: deployment.Kind, + Name: deployment.Name, + }}, policyv1alpha1.Placement{ + ClusterAffinity: &policyv1alpha1.ClusterAffinity{ + ClusterNames: []string{targetMember}, + }, + }) + }) + + ginkgo.BeforeEach(func() { + framework.CreateDeployment(kubeClient, deployment) + ginkgo.DeferCleanup(func() { + framework.RemoveDeployment(kubeClient, deployment.Namespace, deployment.Name) + }) + }) + + ginkgo.It("deployment propagation testing", func() { + framework.WaitDeploymentPresentOnClusterFitWith(targetMember, deployment.Namespace, deployment.Name, + func(d *appsv1.Deployment) bool { + return *d.Spec.Replicas == *deployment.Spec.Replicas + }) + + framework.UpdateDeploymentReplicas(kubeClient, deployment, updateDeploymentReplicas) + framework.WaitDeploymentPresentOnClusterFitWith(targetMember, deployment.Namespace, deployment.Name, + func(deployment *appsv1.Deployment) bool { + return *deployment.Spec.Replicas == updateDeploymentReplicas + }) + }) + }) +}) + +var _ = ginkgo.Describe("[AdvancedCase] ClusterPropagationPolicy testing", func() { ginkgo.Context("Edit ClusterPropagationPolicy ResourceSelectors", func() { ginkgo.When("propagate namespace scope resource", func() { var policy *policyv1alpha1.ClusterPropagationPolicy @@ -552,7 +604,7 @@ var _ = ginkgo.Describe("[AdvancedClusterPropagation] propagation testing", func // ImplicitPriority more than one PP matches the object, we should choose the most suitable one. // Set it to run sequentially to avoid affecting other test cases. -var _ = framework.SerialDescribe("[ImplicitPriority] propagation testing", func() { +var _ = framework.SerialDescribe("[ImplicitPriority] ClusterPropagationPolicy testing", func() { ginkgo.Context("priorityMatchName/priorityMatchLabel/priorityMatchAll propagation testing", func() { var priorityMatchName, priorityMatchLabelSelector, priorityMatchAll string var deploymentNamespace, deploymentName string @@ -652,7 +704,7 @@ var _ = framework.SerialDescribe("[ImplicitPriority] propagation testing", func( // ExplicitPriority more than one CPP matches the object, we should select the one with the highest explicit priority, if the // explicit priority is same, select the one with the highest implicit priority. -var _ = ginkgo.Describe("[ExplicitPriority] propagation testing", func() { +var _ = ginkgo.Describe("[ExplicitPriority] ClusterPropagationPolicy testing", func() { ginkgo.Context("high explicit/low priority/implicit priority ClusterPropagationPolicy propagation testing", func() { var higherPriorityLabelSelector, lowerPriorityMatchName, implicitPriorityMatchName string var deploymentNamespace, deploymentName string @@ -809,7 +861,7 @@ var _ = ginkgo.Describe("[ExplicitPriority] propagation testing", func() { // Delete when delete a clusterPropagationPolicy, and no more clusterPropagationPolicy matches the object, something like // labels should be cleaned. -var _ = ginkgo.Describe("[Delete] clusterPropagation testing", func() { +var _ = ginkgo.Describe("[DeleteCase] ClusterPropagationPolicy testing", func() { ginkgo.Context("delete clusterPropagation and remove the labels and annotations from the resource template and reference binding", func() { var policy *policyv1alpha1.ClusterPropagationPolicy var deployment *appsv1.Deployment @@ -1023,8 +1075,7 @@ var _ = ginkgo.Describe("[Delete] clusterPropagation testing", func() { }) }) -// Suspend dispatching of ClusterPropagationPolicy -var _ = ginkgo.Describe("[Suspend] clusterPropagation testing", func() { +var _ = ginkgo.Describe("[Suspension] ClusterPropagationPolicy testing", func() { var policy *policyv1alpha1.ClusterPropagationPolicy var clusterRole *rbacv1.ClusterRole var targetMember string @@ -1054,27 +1105,30 @@ var _ = ginkgo.Describe("[Suspend] clusterPropagation testing", func() { ginkgo.BeforeEach(func() { framework.CreateClusterPropagationPolicy(karmadaClient, policy) framework.CreateClusterRole(kubeClient, clusterRole) + framework.WaitClusterRolePresentOnClusterFitWith(targetMember, clusterRole.Name, func(*rbacv1.ClusterRole) bool { + return true + }) ginkgo.DeferCleanup(func() { framework.RemoveClusterPropagationPolicy(karmadaClient, policy.Name) framework.RemoveClusterRole(kubeClient, clusterRole.Name) }) }) - ginkgo.BeforeEach(func() { - policy.Spec.Suspension = &policyv1alpha1.Suspension{ - Dispatching: ptr.To(true), - } - framework.UpdateClusterPropagationPolicyWithSpec(karmadaClient, policy.Name, policy.Spec) - }) + ginkgo.It("suspend the CPP dispatching", func() { + ginkgo.By("update the cpp suspension dispatching to true", func() { + policy.Spec.Suspension = &policyv1alpha1.Suspension{ + Dispatching: ptr.To(true), + } + framework.UpdateClusterPropagationPolicyWithSpec(karmadaClient, policy.Name, policy.Spec) + }) - ginkgo.Context("suspend the ClusterPropagationPolicy dispatching", func() { - ginkgo.It("suspends ClusterResourceBinding", func() { + ginkgo.By("check CRB suspension spec", func() { framework.WaitClusterResourceBindingFitWith(karmadaClient, resourceBindingName, func(binding *workv1alpha2.ClusterResourceBinding) bool { return binding.Spec.Suspension != nil && ptr.Deref(binding.Spec.Suspension.Dispatching, false) }) }) - ginkgo.It("suspends Work", func() { + ginkgo.By("check Work suspension spec", func() { esName := names.GenerateExecutionSpaceName(targetMember) gomega.Eventually(func() bool { work, err := karmadaClient.WorkV1alpha1().Works(esName).Get(context.TODO(), workName, metav1.GetOptions{}) @@ -1085,7 +1139,7 @@ var _ = ginkgo.Describe("[Suspend] clusterPropagation testing", func() { }, pollTimeout, pollInterval).Should(gomega.Equal(true)) }) - ginkgo.It("adds suspend dispatching condition to Work", func() { + ginkgo.By("check Work Dispatching status condition", func() { esName := names.GenerateExecutionSpaceName(targetMember) gomega.Eventually(func() bool { work, err := karmadaClient.WorkV1alpha1().Works(esName).Get(context.TODO(), workName, metav1.GetOptions{}) diff --git a/test/e2e/coverage_docs/overridepolicy_test.md b/test/e2e/coverage_docs/overridepolicy_test.md index 042b93564a97..9de4380e37c0 100644 --- a/test/e2e/coverage_docs/overridepolicy_test.md +++ b/test/e2e/coverage_docs/overridepolicy_test.md @@ -8,6 +8,8 @@ | Check if the OverridePolicy will update the deployment's image value | deployment imageOverride testing | | | Check if the OverridePolicy will update the pod's image value | pod imageOverride testing | | | Check if the OverridePolicy will update the specific image value | deployment imageOverride testing | | +| Check if the OverridePolicy will update the value inside JSON | deployment fieldOverride testing | | +| Check if the OverridePolicy will update the value inside YAML | deployment fieldOverride testing | | #### OverridePolicy with nil resourceSelector testing | Test Case | E2E Describe Text | Comments | diff --git a/test/e2e/failover_test.go b/test/e2e/failover_test.go index 2900a03d61fa..b8114a30ac8e 100644 --- a/test/e2e/failover_test.go +++ b/test/e2e/failover_test.go @@ -354,7 +354,7 @@ var _ = framework.SerialDescribe("failover testing", func() { ImageOverrider: []policyv1alpha1.ImageOverrider{ { Component: "Registry", - Operator: "replace", + Operator: policyv1alpha1.OverriderOpReplace, Value: "fake", }, }, @@ -405,7 +405,7 @@ var _ = framework.SerialDescribe("failover testing", func() { // modify gracePeriodSeconds to create a time difference with tolerationSecond to avoid cluster interference patch := []map[string]interface{}{ { - "op": "replace", + "op": policyv1alpha1.OverriderOpReplace, "path": "/spec/failover/application/gracePeriodSeconds", "value": ptr.To[int32](gracePeriodSeconds), }, @@ -432,7 +432,7 @@ var _ = framework.SerialDescribe("failover testing", func() { ImageOverrider: []policyv1alpha1.ImageOverrider{ { Component: "Registry", - Operator: "replace", + Operator: policyv1alpha1.OverriderOpReplace, Value: "fake", }, }, @@ -545,7 +545,7 @@ var _ = framework.SerialDescribe("failover testing", func() { ImageOverrider: []policyv1alpha1.ImageOverrider{ { Component: "Registry", - Operator: "replace", + Operator: policyv1alpha1.OverriderOpReplace, Value: "fake", }, }, diff --git a/test/e2e/framework/resourcebinding.go b/test/e2e/framework/resourcebinding.go index a48d5f2522e9..eb12b70ccd6f 100644 --- a/test/e2e/framework/resourcebinding.go +++ b/test/e2e/framework/resourcebinding.go @@ -50,17 +50,17 @@ func AssertBindingScheduledClusters(client karmada.Interface, namespace, name st if err != nil { return err } - scheduledClutsers := make([]string, 0, len(binding.Spec.Clusters)) + scheduledClusters := make([]string, 0, len(binding.Spec.Clusters)) for _, scheduledCluster := range binding.Spec.Clusters { - scheduledClutsers = append(scheduledClutsers, scheduledCluster.Name) + scheduledClusters = append(scheduledClusters, scheduledCluster.Name) } - sort.Strings(scheduledClutsers) - for _, expectedClutsers := range expectedResults { - if reflect.DeepEqual(scheduledClutsers, expectedClutsers) { + sort.Strings(scheduledClusters) + for _, expectedClusters := range expectedResults { + if reflect.DeepEqual(scheduledClusters, expectedClusters) { return nil } } - return fmt.Errorf("scheduled clusters: %+v, expected possible results: %+v", scheduledClutsers, expectedResults) + return fmt.Errorf("scheduled clusters: %+v, expected possible results: %+v", scheduledClusters, expectedResults) }, pollTimeout, pollInterval).ShouldNot(gomega.HaveOccurred()) }) } diff --git a/test/e2e/framework/work.go b/test/e2e/framework/work.go new file mode 100644 index 000000000000..d2f31a03f491 --- /dev/null +++ b/test/e2e/framework/work.go @@ -0,0 +1,44 @@ +/* +Copyright 2024 The Karmada Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework + +import ( + "context" + "fmt" + + "github.com/onsi/gomega" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog/v2" + + karmada "github.com/karmada-io/karmada/pkg/generated/clientset/versioned" +) + +// WaitForWorkToDisappear waiting for work to disappear util timeout +func WaitForWorkToDisappear(client karmada.Interface, namespace, name string) { + klog.Infof("Waiting for work(%s/%s) to disappear", namespace, name) + gomega.Eventually(func() error { + _, err := client.WorkV1alpha1().Works(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err == nil { + return fmt.Errorf("work(%s/%s) still exist", namespace, name) + } + if err != nil && !apierrors.IsNotFound(err) { + return fmt.Errorf("failed to get work(%s/%s), err: %w", namespace, name, err) + } + return nil + }, pollTimeout, pollInterval).ShouldNot(gomega.HaveOccurred()) +} diff --git a/test/e2e/framework/workloadrebalancer.go b/test/e2e/framework/workloadrebalancer.go index d5f95c75b652..074d7caa1ef9 100644 --- a/test/e2e/framework/workloadrebalancer.go +++ b/test/e2e/framework/workloadrebalancer.go @@ -47,7 +47,7 @@ func RemoveWorkloadRebalancer(client karmada.Interface, name string) { }) } -// UpdateWorkloadRebalancer udpate WorkloadRebalancer with karmada client. +// UpdateWorkloadRebalancer updates WorkloadRebalancer with karmada client. // if workloads/ttl is a nil pointer, keep previous value unchanged. func UpdateWorkloadRebalancer(client karmada.Interface, name string, workloads *[]appsv1alpha1.ObjectReference, ttl *int32) { ginkgo.By(fmt.Sprintf("Updating WorkloadRebalancer(%s)'s workloads", name), func() { diff --git a/test/e2e/karmadactl_test.go b/test/e2e/karmadactl_test.go index a340d8e86660..d201ee134308 100644 --- a/test/e2e/karmadactl_test.go +++ b/test/e2e/karmadactl_test.go @@ -546,7 +546,8 @@ var _ = ginkgo.Describe("Karmadactl top testing", func() { for _, clusterName := range framework.ClusterNames() { cmd := framework.NewKarmadactlCommand(kubeconfig, karmadaContext, karmadactlPath, "", karmadactlTimeout, "top", "pod", podName, "-n", testNamespace, "-C", clusterName) _, err := cmd.ExecOrDie() - gomega.Expect(strings.Contains(err.Error(), fmt.Sprintf("pods \"%s\" not found", podName))).To(gomega.BeTrue(), "should not found") + gomega.Expect(err).Should(gomega.HaveOccurred()) + gomega.Expect(strings.Contains(err.Error(), fmt.Sprintf("pods \"%s\" not found", podName))).To(gomega.BeTrue(), "should not found", fmt.Sprintf("errMsg: %s", err.Error())) } }) }) @@ -589,28 +590,30 @@ var _ = ginkgo.Describe("Karmadactl top testing", func() { }) ginkgo.It("Karmadactl top existing pod", func() { - for _, clusterName := range framework.ClusterNames() { - cmd := framework.NewKarmadactlCommand(kubeconfig, karmadaContext, karmadactlPath, pod.Namespace, karmadactlTimeout, "top", "pod", pod.Name, "-n", pod.Namespace, "-C", clusterName) + ginkgo.By("Karmadactl top existing pod", func() { + for _, clusterName := range framework.ClusterNames() { + cmd := framework.NewKarmadactlCommand(kubeconfig, karmadaContext, karmadactlPath, pod.Namespace, karmadactlTimeout, "top", "pod", pod.Name, "-n", pod.Namespace, "-C", clusterName) + _, err := cmd.ExecOrDie() + gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) + } + }) + + ginkgo.By("Karmadactl top existing pod without setting cluster flag", func() { + cmd := framework.NewKarmadactlCommand(kubeconfig, karmadaContext, karmadactlPath, pod.Namespace, karmadactlTimeout, "top", "pod", pod.Name, "-n", pod.Namespace) _, err := cmd.ExecOrDie() gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) - } - }) - - ginkgo.It("Karmadactl top existing pod without setting cluster flag", func() { - cmd := framework.NewKarmadactlCommand(kubeconfig, karmadaContext, karmadactlPath, pod.Namespace, karmadactlTimeout, "top", "pod", pod.Name, "-n", pod.Namespace) - _, err := cmd.ExecOrDie() - gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) - }) + }) - ginkgo.It("Karmadactl top pod without specific podName", func() { - cmd := framework.NewKarmadactlCommand(kubeconfig, karmadaContext, karmadactlPath, pod.Namespace, karmadactlTimeout, "top", "pod", "-A") - _, err := cmd.ExecOrDie() - gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) - for _, clusterName := range framework.ClusterNames() { - cmd := framework.NewKarmadactlCommand(kubeconfig, karmadaContext, karmadactlPath, pod.Namespace, karmadactlTimeout, "top", "pod", "-A", "-C", clusterName) + ginkgo.By("Karmadactl top pod without specific podName", func() { + cmd := framework.NewKarmadactlCommand(kubeconfig, karmadaContext, karmadactlPath, pod.Namespace, karmadactlTimeout, "top", "pod", "-A") _, err := cmd.ExecOrDie() gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) - } + for _, clusterName := range framework.ClusterNames() { + cmd := framework.NewKarmadactlCommand(kubeconfig, karmadaContext, karmadactlPath, pod.Namespace, karmadactlTimeout, "top", "pod", "-A", "-C", clusterName) + _, err := cmd.ExecOrDie() + gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) + } + }) }) }) }) diff --git a/test/e2e/seamless_migration_test.go b/test/e2e/migration_and_rollback_test.go similarity index 57% rename from test/e2e/seamless_migration_test.go rename to test/e2e/migration_and_rollback_test.go index 180627cc660c..eaff50ea59ad 100644 --- a/test/e2e/seamless_migration_test.go +++ b/test/e2e/migration_and_rollback_test.go @@ -30,15 +30,17 @@ import ( "k8s.io/apimachinery/pkg/util/rand" "k8s.io/client-go/kubernetes" "k8s.io/klog/v2" + "k8s.io/utils/ptr" policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2" + pkgutil "github.com/karmada-io/karmada/pkg/util" "github.com/karmada-io/karmada/pkg/util/names" "github.com/karmada-io/karmada/test/e2e/framework" "github.com/karmada-io/karmada/test/helper" ) -var _ = ginkgo.Describe("Seamless migration testing", func() { +var _ = ginkgo.Describe("Seamless migration and rollback testing", func() { var member1 string var member1Client kubernetes.Interface @@ -51,7 +53,7 @@ var _ = ginkgo.Describe("Seamless migration testing", func() { ginkgo.Context("Test migrate namespaced resource: Deployment", func() { var deployment *appsv1.Deployment var propagationPolicy *policyv1alpha1.PropagationPolicy - var bindingName string + var bindingName, workName, workNamespace string ginkgo.BeforeEach(func() { deployment = helper.NewDeployment(testNamespace, deploymentNamePrefix+rand.String(RandomStrLength)) @@ -65,6 +67,8 @@ var _ = ginkgo.Describe("Seamless migration testing", func() { ClusterAffinity: &policyv1alpha1.ClusterAffinity{ClusterNames: []string{member1}}, }) bindingName = names.GenerateBindingName(deployment.Kind, deployment.Name) + workName = names.GenerateWorkName(deployment.Kind, deployment.Name, deployment.Namespace) + workNamespace = names.GenerateExecutionSpaceName(member1) }) ginkgo.BeforeEach(func() { @@ -76,14 +80,10 @@ var _ = ginkgo.Describe("Seamless migration testing", func() { framework.CreatePropagationPolicy(karmadaClient, propagationPolicy) ginkgo.DeferCleanup(func() { - // Delete Deployment in karmada control plane - framework.RemoveDeployment(kubeClient, deployment.Namespace, deployment.Name) + // Delete Deployment in member cluster + framework.RemoveDeployment(member1Client, deployment.Namespace, deployment.Name) // Delete PropagationPolicy in karmada control plane framework.RemovePropagationPolicy(karmadaClient, propagationPolicy.Namespace, propagationPolicy.Name) - - // Verify Deployment in member cluster will be deleted automatically after promotion since it has been deleted from Karmada - klog.Infof("Waiting for Deployment deleted from cluster(%s)", member1) - framework.WaitDeploymentDisappearOnCluster(member1, testNamespace, deployment.Name) }) }) @@ -103,9 +103,10 @@ var _ = ginkgo.Describe("Seamless migration testing", func() { }, pollTimeout, pollInterval).Should(gomega.Equal(true)) }) - // Step 2, Update PropagationPolicy in karmada control plane with conflictResolution=Overwrite + // Step 2, Update PropagationPolicy in karmada control plane with conflictResolution=Overwrite and preserveResourcesOnDeletion=true ginkgo.By(fmt.Sprintf("Update PropagationPolicy %s in karmada control plane with conflictResolution=Overwrite", propagationPolicy.Name), func() { propagationPolicy.Spec.ConflictResolution = policyv1alpha1.ConflictOverwrite + propagationPolicy.Spec.PreserveResourcesOnDeletion = ptr.To[bool](true) framework.UpdatePropagationPolicyWithSpec(karmadaClient, propagationPolicy.Namespace, propagationPolicy.Name, propagationPolicy.Spec) }) @@ -122,6 +123,19 @@ var _ = ginkgo.Describe("Seamless migration testing", func() { gomega.Expect(items[0].Applied).Should(gomega.BeTrue()) gomega.Expect(items[0].Health).Should(gomega.Equal(workv1alpha2.ResourceHealthy)) }) + + // Step 4, Delete resource template and check whether member cluster resource is preserved + ginkgo.By("Delete resource template and check whether member cluster resource is preserved", func() { + // Delete Deployment in karmada control plane + framework.RemoveDeployment(kubeClient, deployment.Namespace, deployment.Name) + + // Wait for work deleted + framework.WaitForWorkToDisappear(karmadaClient, workNamespace, workName) + + // Check member cluster resource is preserved + framework.WaitDeploymentPresentOnClusterFitWith(member1, deployment.Namespace, deployment.Name, isResourceNotManagedByKarmada) + + }) }) }) @@ -129,7 +143,7 @@ var _ = ginkgo.Describe("Seamless migration testing", func() { var clusterRoleName string var clusterRole *rbacv1.ClusterRole var cpp *policyv1alpha1.ClusterPropagationPolicy - var bindingName string + var bindingName, workName, workNamespace string ginkgo.BeforeEach(func() { clusterRoleName = clusterRoleNamePrefix + rand.String(RandomStrLength) @@ -151,7 +165,10 @@ var _ = ginkgo.Describe("Seamless migration testing", func() { ClusterAffinity: &policyv1alpha1.ClusterAffinity{ClusterNames: []string{member1}}, }) cpp.Spec.ConflictResolution = policyv1alpha1.ConflictOverwrite + cpp.Spec.PreserveResourcesOnDeletion = ptr.To[bool](true) bindingName = names.GenerateBindingName(clusterRole.Kind, clusterRole.Name) + workName = names.GenerateWorkName(clusterRole.Kind, clusterRole.Name, clusterRole.Namespace) + workNamespace = names.GenerateExecutionSpaceName(member1) }) ginkgo.BeforeEach(func() { @@ -163,14 +180,10 @@ var _ = ginkgo.Describe("Seamless migration testing", func() { framework.CreateClusterPropagationPolicy(karmadaClient, cpp) ginkgo.DeferCleanup(func() { - // Delete ClusterRole in karmada control plane - framework.RemoveClusterRole(kubeClient, clusterRoleName) + // Delete ClusterRole in member cluster + framework.RemoveClusterRole(member1Client, clusterRoleName) // Delete ClusterPropagationPolicy in karmada control plane framework.RemoveClusterPropagationPolicy(karmadaClient, cpp.Name) - - // Verify ClusterRole in member cluster will be deleted automatically after promotion since it has been deleted from Karmada - klog.Infof("Waiting for ClusterRole deleted from cluster(%s)", member1) - framework.WaitClusterRoleDisappearOnCluster(member1, clusterRoleName) }) }) @@ -187,6 +200,18 @@ var _ = ginkgo.Describe("Seamless migration testing", func() { return e1 == nil && e2 == nil && len(binding.Status.AggregatedStatus) > 0 && binding.Status.AggregatedStatus[0].Applied }, pollTimeout, pollInterval).Should(gomega.Equal(true)) }) + + ginkgo.By("Delete resource template and check whether member cluster resource is preserved", func() { + // Delete ClusterRole in karmada control plane + framework.RemoveClusterRole(kubeClient, clusterRole.Name) + + // Wait for work deleted + framework.WaitForWorkToDisappear(karmadaClient, workNamespace, workName) + + // Check member cluster resource is preserved + framework.WaitClusterRolePresentOnClusterFitWith(member1, clusterRole.Name, isResourceNotManagedByKarmada) + + }) }) }) @@ -194,7 +219,7 @@ var _ = ginkgo.Describe("Seamless migration testing", func() { var serviceName string var service *corev1.Service var pp *policyv1alpha1.PropagationPolicy - var bindingName string + var bindingName, workName, workNamespace string ginkgo.BeforeEach(func() { serviceName = serviceNamePrefix + rand.String(RandomStrLength) @@ -209,7 +234,10 @@ var _ = ginkgo.Describe("Seamless migration testing", func() { ClusterAffinity: &policyv1alpha1.ClusterAffinity{ClusterNames: []string{member1}}, }) pp.Spec.ConflictResolution = policyv1alpha1.ConflictOverwrite + pp.Spec.PreserveResourcesOnDeletion = ptr.To[bool](true) bindingName = names.GenerateBindingName(service.Kind, service.Name) + workName = names.GenerateWorkName(service.Kind, service.Name, service.Namespace) + workNamespace = names.GenerateExecutionSpaceName(member1) }) ginkgo.BeforeEach(func() { @@ -221,19 +249,15 @@ var _ = ginkgo.Describe("Seamless migration testing", func() { framework.CreatePropagationPolicy(karmadaClient, pp) ginkgo.DeferCleanup(func() { - // Delete Service in karmada control plane - framework.RemoveService(kubeClient, testNamespace, serviceName) + // Delete Service in member cluster + framework.RemoveService(member1Client, testNamespace, serviceName) // Delete PropagationPolicy in karmada control plane framework.RemovePropagationPolicy(karmadaClient, testNamespace, pp.Name) - - // Verify Service in member cluster will be deleted automatically after promotion since it has been deleted from Karmada - klog.Infof("Waiting for Service deleted from cluster(%s)", member1) - framework.WaitServiceDisappearOnCluster(member1, testNamespace, serviceName) }) }) ginkgo.It("Verify migrate a Service from member cluster", func() { - ginkgo.By(fmt.Sprintf("Verify PropagationPolicy %s got Applied by overwriting conflict resource", bindingName), func() { + ginkgo.By(fmt.Sprintf("Verify ResourceBinding %s got Applied by overwriting conflict resource", bindingName), func() { klog.Infof("Waiting to verify ResourceBinding %s got Applied by overwriting conflict resource", bindingName) gomega.Eventually(func() bool { framework.WaitServicePresentOnClusterFitWith(member1, testNamespace, serviceName, func(*corev1.Service) bool { @@ -245,6 +269,121 @@ var _ = ginkgo.Describe("Seamless migration testing", func() { return e1 == nil && e2 == nil && len(binding.Status.AggregatedStatus) > 0 && binding.Status.AggregatedStatus[0].Applied }, pollTimeout, pollInterval).Should(gomega.Equal(true)) }) + + ginkgo.By("Delete resource template and check whether member cluster resource is preserved", func() { + // Delete Service in karmada control plane + framework.RemoveService(kubeClient, service.Namespace, service.Name) + + // Wait for work deleted + framework.WaitForWorkToDisappear(karmadaClient, workNamespace, workName) + + // Check member cluster resource is preserved + framework.WaitServicePresentOnClusterFitWith(member1, service.Namespace, service.Name, isResourceNotManagedByKarmada) + }) + }) + }) + + ginkgo.Context("Test migrate dependent resource", func() { + var secret *corev1.Secret + var volume []corev1.Volume + var deployment *appsv1.Deployment + var propagationPolicy *policyv1alpha1.PropagationPolicy + var bindingName, workName, workNamespace string + + ginkgo.BeforeEach(func() { + secret = helper.NewSecret(testNamespace, secretNamePrefix+rand.String(RandomStrLength), map[string][]byte{"test": []byte("test")}) + volume = []corev1.Volume{{ + Name: secret.Name, + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: secret.Name, + }, + }, + }} + deployment = helper.NewDeploymentWithVolumes(testNamespace, deploymentNamePrefix+rand.String(RandomStrLength), volume) + propagationPolicy = helper.NewPropagationPolicy(deployment.Namespace, deployment.Name, []policyv1alpha1.ResourceSelector{ + { + APIVersion: deployment.APIVersion, + Kind: deployment.Kind, + Name: deployment.Name, + }, + }, policyv1alpha1.Placement{ + ClusterAffinity: &policyv1alpha1.ClusterAffinity{ClusterNames: []string{member1}}, + }) + propagationPolicy.Spec.PropagateDeps = true + propagationPolicy.Spec.ConflictResolution = policyv1alpha1.ConflictOverwrite + propagationPolicy.Spec.PreserveResourcesOnDeletion = ptr.To[bool](true) + bindingName = names.GenerateBindingName(secret.Kind, secret.Name) + workName = names.GenerateWorkName(secret.Kind, secret.Name, secret.Namespace) + workNamespace = names.GenerateExecutionSpaceName(member1) + }) + + ginkgo.BeforeEach(func() { + // Create Deployment in member1 cluster + framework.CreateDeployment(member1Client, deployment) + // Create Secret in member1 cluster + framework.CreateSecret(member1Client, secret) + // Create Deployment in karmada control plane + framework.CreateDeployment(kubeClient, deployment) + // Create Secret in karmada control plane + framework.CreateSecret(kubeClient, secret) + // Create PropagationPolicy in karmada control plane without conflictResolution field + framework.CreatePropagationPolicy(karmadaClient, propagationPolicy) + + ginkgo.DeferCleanup(func() { + // Delete Deployment in control plane and member cluster + framework.RemoveDeployment(kubeClient, deployment.Namespace, deployment.Name) + framework.RemoveDeployment(member1Client, deployment.Namespace, deployment.Name) + // Delete Secret in member cluster + framework.RemoveSecret(member1Client, secret.Namespace, secret.Name) + // Delete PropagationPolicy in karmada control plane + framework.RemovePropagationPolicy(karmadaClient, propagationPolicy.Namespace, propagationPolicy.Name) + }) + }) + + ginkgo.It("Verify migrate a dependent secret from member cluster", func() { + + ginkgo.By(fmt.Sprintf("Verify ResourceBinding %s got Applied by overwriting conflict resource", bindingName), func() { + klog.Infof("Waiting to verify ResourceBinding %s got Applied by overwriting conflict resource", bindingName) + gomega.Eventually(func() bool { + framework.WaitSecretPresentOnClusterFitWith(member1, testNamespace, secret.Name, func(*corev1.Secret) bool { + return true + }) + _, e1 := kubeClient.CoreV1().Secrets(testNamespace).Get(context.TODO(), secret.Name, metav1.GetOptions{}) + binding, e2 := karmadaClient.WorkV1alpha2().ResourceBindings(testNamespace).Get(context.TODO(), bindingName, metav1.GetOptions{}) + + return e1 == nil && e2 == nil && len(binding.Status.AggregatedStatus) > 0 && binding.Status.AggregatedStatus[0].Applied + }, pollTimeout, pollInterval).Should(gomega.Equal(true)) + }) + + ginkgo.By("Delete dependent secret template and check whether member cluster secret is preserved", func() { + // Delete dependent secret in karmada control plane + framework.RemoveSecret(kubeClient, secret.Namespace, secret.Name) + + // Wait for work deleted + framework.WaitForWorkToDisappear(karmadaClient, workNamespace, workName) + + // Check member cluster secret is preserved + framework.WaitSecretPresentOnClusterFitWith(member1, secret.Namespace, secret.Name, isResourceNotManagedByKarmada) + }) }) }) }) + +// isResourceNotManagedByKarmada checks if resource is missing all karmada managed labels/annotations +// which indicates that it's not managed by Karmada. +func isResourceNotManagedByKarmada[T metav1.Object](obj T) bool { + for _, key := range pkgutil.ManagedResourceLabels { + if _, exist := obj.GetLabels()[key]; exist { + return false + } + } + + for _, key := range pkgutil.ManagedResourceAnnotations { + if _, exist := obj.GetAnnotations()[key]; exist { + return false + } + } + + return true +} diff --git a/test/e2e/overridepolicy_test.go b/test/e2e/overridepolicy_test.go index ec7b4f424956..663e4951d79e 100644 --- a/test/e2e/overridepolicy_test.go +++ b/test/e2e/overridepolicy_test.go @@ -17,9 +17,13 @@ limitations under the License. package e2e import ( + "fmt" + "strings" + "github.com/onsi/ginkgo/v2" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/util/rand" "k8s.io/klog/v2" @@ -74,20 +78,20 @@ var _ = ginkgo.Describe("[OverridePolicy] apply overriders testing", func() { }, policyv1alpha1.Overriders{ LabelsOverrider: []policyv1alpha1.LabelAnnotationOverrider{ { - Operator: "replace", + Operator: policyv1alpha1.OverriderOpReplace, Value: map[string]string{ "foo": "exist", "non-exist": "non-exist", }, }, { - Operator: "add", + Operator: policyv1alpha1.OverriderOpAdd, Value: map[string]string{ "app": "nginx", }, }, { - Operator: "remove", + Operator: policyv1alpha1.OverriderOpRemove, Value: map[string]string{ "bar": "bar", }, @@ -159,20 +163,20 @@ var _ = ginkgo.Describe("[OverridePolicy] apply overriders testing", func() { }, policyv1alpha1.Overriders{ AnnotationsOverrider: []policyv1alpha1.LabelAnnotationOverrider{ { - Operator: "replace", + Operator: policyv1alpha1.OverriderOpReplace, Value: map[string]string{ "foo": "exist", "non-exist": "non-exist", }, }, { - Operator: "add", + Operator: policyv1alpha1.OverriderOpAdd, Value: map[string]string{ "app": "nginx", }, }, { - Operator: "remove", + Operator: policyv1alpha1.OverriderOpRemove, Value: map[string]string{ "bar": "bar", }, @@ -241,17 +245,17 @@ var _ = ginkgo.Describe("[OverridePolicy] apply overriders testing", func() { ImageOverrider: []policyv1alpha1.ImageOverrider{ { Component: "Registry", - Operator: "replace", + Operator: policyv1alpha1.OverriderOpReplace, Value: "fictional.registry.us", }, { Component: "Repository", - Operator: "replace", + Operator: policyv1alpha1.OverriderOpReplace, Value: "busybox", }, { Component: "Tag", - Operator: "replace", + Operator: policyv1alpha1.OverriderOpReplace, Value: "1.0", }, }, @@ -319,17 +323,17 @@ var _ = ginkgo.Describe("[OverridePolicy] apply overriders testing", func() { ImageOverrider: []policyv1alpha1.ImageOverrider{ { Component: "Registry", - Operator: "replace", + Operator: policyv1alpha1.OverriderOpReplace, Value: "fictional.registry.us", }, { Component: "Repository", - Operator: "replace", + Operator: policyv1alpha1.OverriderOpReplace, Value: "busybox", }, { Component: "Tag", - Operator: "replace", + Operator: policyv1alpha1.OverriderOpReplace, Value: "1.0", }, }, @@ -399,7 +403,7 @@ var _ = ginkgo.Describe("[OverridePolicy] apply overriders testing", func() { Path: "/spec/template/spec/containers/0/image", }, Component: "Registry", - Operator: "replace", + Operator: policyv1alpha1.OverriderOpReplace, Value: "fictional.registry.us", }, }, @@ -425,6 +429,221 @@ var _ = ginkgo.Describe("[OverridePolicy] apply overriders testing", func() { }) }) }) + + ginkgo.Context("[FieldOverrider] apply field overrider testing to update JSON values in ConfigMap", func() { + var configMapNamespace, configMapName string + var configMap *corev1.ConfigMap + + ginkgo.BeforeEach(func() { + configMapNamespace = testNamespace + configMapName = configMapNamePrefix + rand.String(RandomStrLength) + propagationPolicyNamespace = testNamespace + propagationPolicyName = configMapName + overridePolicyNamespace = testNamespace + overridePolicyName = configMapName + + configMapData := map[string]string{ + "deploy.json": fmt.Sprintf(`{ + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": { + "name": "nginx-deploy", + "namespace": "%s" + }, + "spec": { + "replicas": 3, + "selector": { + "matchLabels": { + "app": "nginx" + } + }, + "template": { + "metadata": { + "labels": { + "app": "nginx" + } + }, + "spec": { + "containers": [ + { + "name": "nginx", + "image": "nginx:1.19.0" + } + ] + } + } + } + }`, configMapNamespace), + } + + configMap = helper.NewConfigMap(configMapNamespace, configMapName, configMapData) + propagationPolicy = helper.NewPropagationPolicy(propagationPolicyNamespace, propagationPolicyName, []policyv1alpha1.ResourceSelector{ + { + APIVersion: configMap.APIVersion, + Kind: configMap.Kind, + Name: configMap.Name, + }, + }, policyv1alpha1.Placement{ + ClusterAffinity: &policyv1alpha1.ClusterAffinity{ + ClusterNames: framework.ClusterNames(), + }, + }) + + overridePolicy = helper.NewOverridePolicy(overridePolicyNamespace, overridePolicyName, []policyv1alpha1.ResourceSelector{ + { + APIVersion: configMap.APIVersion, + Kind: configMap.Kind, + Name: configMap.Name, + }, + }, policyv1alpha1.ClusterAffinity{ + ClusterNames: framework.ClusterNames(), + }, policyv1alpha1.Overriders{ + FieldOverrider: []policyv1alpha1.FieldOverrider{ + { + FieldPath: "/data/deploy.json", + JSON: []policyv1alpha1.JSONPatchOperation{ + { + SubPath: "/spec/replicas", + Operator: policyv1alpha1.OverriderOpReplace, + Value: apiextensionsv1.JSON{Raw: []byte(`5`)}, + }, + { + SubPath: "/spec/template/spec/containers/-", + Operator: policyv1alpha1.OverriderOpAdd, + Value: apiextensionsv1.JSON{Raw: []byte(`{"name": "nginx-helper", "image": "nginx:1.19.1"}`)}, + }, + { + SubPath: "/spec/template/spec/containers/0/image", + Operator: policyv1alpha1.OverriderOpRemove, + }, + }, + }, + }, + }) + }) + + ginkgo.BeforeEach(func() { + framework.CreatePropagationPolicy(karmadaClient, propagationPolicy) + framework.CreateOverridePolicy(karmadaClient, overridePolicy) + framework.CreateConfigMap(kubeClient, configMap) + ginkgo.DeferCleanup(func() { + framework.RemovePropagationPolicy(karmadaClient, propagationPolicy.Namespace, propagationPolicy.Name) + framework.RemoveOverridePolicy(karmadaClient, overridePolicy.Namespace, overridePolicy.Name) + framework.RemoveConfigMap(kubeClient, configMap.Namespace, configMap.Name) + }) + }) + + ginkgo.It("should override JSON field in ConfigMap", func() { + klog.Infof("check if configMap present on member clusters has the correct JSON field value.") + framework.WaitConfigMapPresentOnClustersFitWith(framework.ClusterNames(), configMap.Namespace, configMap.Name, + func(cm *corev1.ConfigMap) bool { + return strings.Contains(cm.Data["deploy.json"], `"replicas":5`) && + strings.Contains(cm.Data["deploy.json"], `"name":"nginx-helper"`) && + !strings.Contains(cm.Data["deploy.json"], `"image":"nginx:1.19.0"`) + }) + }) + }) + + ginkgo.Context("[FieldOverrider] apply field overrider testing to update YAML values in ConfigMap", func() { + var configMapNamespace, configMapName string + var configMap *corev1.ConfigMap + + ginkgo.BeforeEach(func() { + configMapNamespace = testNamespace + configMapName = configMapNamePrefix + rand.String(RandomStrLength) + propagationPolicyNamespace = testNamespace + propagationPolicyName = configMapName + overridePolicyNamespace = testNamespace + overridePolicyName = configMapName + + // Define the ConfigMap data + configMapData := map[string]string{ + "nginx.yaml": ` +server: + listen: 80 + server_name: localhost + location /: + root: /usr/share/nginx/html + index: + - index.html + - index.htm + error_page: + - code: 500 + - code: 502 + - code: 503 + - code: 504 + location /50x.html: + root: /usr/share/nginx/html +`, + } + configMap = helper.NewConfigMap(configMapNamespace, configMapName, configMapData) + propagationPolicy = helper.NewPropagationPolicy(propagationPolicyNamespace, propagationPolicyName, []policyv1alpha1.ResourceSelector{ + { + APIVersion: configMap.APIVersion, + Kind: configMap.Kind, + Name: configMap.Name, + }, + }, policyv1alpha1.Placement{ + ClusterAffinity: &policyv1alpha1.ClusterAffinity{ + ClusterNames: framework.ClusterNames(), + }, + }) + + overridePolicy = helper.NewOverridePolicy(overridePolicyNamespace, overridePolicyName, []policyv1alpha1.ResourceSelector{ + { + APIVersion: configMap.APIVersion, + Kind: configMap.Kind, + Name: configMap.Name, + }, + }, policyv1alpha1.ClusterAffinity{ + ClusterNames: framework.ClusterNames(), + }, policyv1alpha1.Overriders{ + FieldOverrider: []policyv1alpha1.FieldOverrider{ + { + FieldPath: "/data/nginx.yaml", + YAML: []policyv1alpha1.YAMLPatchOperation{ + { + SubPath: "/server/location ~1/root", + Operator: policyv1alpha1.OverriderOpReplace, + Value: apiextensionsv1.JSON{Raw: []byte(`"/var/www/html"`)}, + }, + { + SubPath: "/server/error_page/-", + Operator: policyv1alpha1.OverriderOpAdd, + Value: apiextensionsv1.JSON{Raw: []byte(`{"code": 400}`)}, + }, + { + SubPath: "/server/location ~1/index", + Operator: policyv1alpha1.OverriderOpRemove, + }, + }, + }, + }, + }) + }) + + ginkgo.BeforeEach(func() { + framework.CreatePropagationPolicy(karmadaClient, propagationPolicy) + framework.CreateOverridePolicy(karmadaClient, overridePolicy) + framework.CreateConfigMap(kubeClient, configMap) + ginkgo.DeferCleanup(func() { + framework.RemovePropagationPolicy(karmadaClient, propagationPolicy.Namespace, propagationPolicy.Name) + framework.RemoveOverridePolicy(karmadaClient, overridePolicy.Namespace, overridePolicy.Name) + framework.RemoveConfigMap(kubeClient, configMap.Namespace, configMap.Name) + }) + }) + + ginkgo.It("should override YAML field in ConfigMap", func() { + klog.Infof("check if configMap present on member clusters has the correct YAML field value.") + framework.WaitConfigMapPresentOnClustersFitWith(framework.ClusterNames(), configMap.Namespace, configMap.Name, + func(cm *corev1.ConfigMap) bool { + return strings.Contains(cm.Data["nginx.yaml"], "root: /var/www/html") && + strings.Contains(cm.Data["nginx.yaml"], "code: 400") && + !strings.Contains(cm.Data["nginx.yaml"], "- index.html") + }) + }) + }) + }) var _ = framework.SerialDescribe("OverridePolicy with nil resourceSelector testing", func() { @@ -464,7 +683,7 @@ var _ = framework.SerialDescribe("OverridePolicy with nil resourceSelector testi Path: "/spec/template/spec/containers/0/image", }, Component: "Registry", - Operator: "replace", + Operator: policyv1alpha1.OverriderOpReplace, Value: "fictional.registry.us", }, }, @@ -538,17 +757,17 @@ var _ = ginkgo.Describe("[OverrideRules] apply overriders testing", func() { ImageOverrider: []policyv1alpha1.ImageOverrider{ { Component: "Registry", - Operator: "replace", + Operator: policyv1alpha1.OverriderOpReplace, Value: "fictional.registry.us", }, { Component: "Repository", - Operator: "replace", + Operator: policyv1alpha1.OverriderOpReplace, Value: "busybox", }, { Component: "Tag", - Operator: "replace", + Operator: policyv1alpha1.OverriderOpReplace, Value: "1.0", }, }, @@ -621,17 +840,17 @@ var _ = ginkgo.Describe("[OverrideRules] apply overriders testing", func() { ImageOverrider: []policyv1alpha1.ImageOverrider{ { Component: "Registry", - Operator: "replace", + Operator: policyv1alpha1.OverriderOpReplace, Value: "fictional.registry.us", }, { Component: "Repository", - Operator: "replace", + Operator: policyv1alpha1.OverriderOpReplace, Value: "busybox", }, { Component: "Tag", - Operator: "replace", + Operator: policyv1alpha1.OverriderOpReplace, Value: "1.0", }, }, @@ -706,7 +925,7 @@ var _ = ginkgo.Describe("[OverrideRules] apply overriders testing", func() { Path: "/spec/template/spec/containers/0/image", }, Component: "Registry", - Operator: "replace", + Operator: policyv1alpha1.OverriderOpReplace, Value: "fictional.registry.us", }, }, @@ -777,7 +996,7 @@ var _ = framework.SerialDescribe("OverrideRules with nil resourceSelector testin Path: "/spec/template/spec/containers/0/image", }, Component: "Registry", - Operator: "replace", + Operator: policyv1alpha1.OverriderOpReplace, Value: "fictional.registry.us", }, }, diff --git a/test/e2e/propagationpolicy_test.go b/test/e2e/propagationpolicy_test.go index 4dfc479ce594..3440d8949ac7 100644 --- a/test/e2e/propagationpolicy_test.go +++ b/test/e2e/propagationpolicy_test.go @@ -53,12 +53,20 @@ import ( ) // BasicPropagation focus on basic propagation functionality testing. -var _ = ginkgo.Describe("[BasicPropagation] propagation testing", func() { +var _ = ginkgo.Describe("[BasicCase] PropagationPolicy testing", func() { + var policyNamespace, policyName string + var policy *policyv1alpha1.PropagationPolicy + + ginkgo.JustBeforeEach(func() { + framework.CreatePropagationPolicy(karmadaClient, policy) + ginkgo.DeferCleanup(func() { + framework.RemovePropagationPolicy(karmadaClient, policy.Namespace, policy.Name) + }) + }) + ginkgo.Context("Deployment propagation testing", func() { - var policyNamespace, policyName string var deploymentNamespace, deploymentName string var deployment *appsv1.Deployment - var policy *policyv1alpha1.PropagationPolicy ginkgo.BeforeEach(func() { policyNamespace = testNamespace @@ -81,10 +89,8 @@ var _ = ginkgo.Describe("[BasicPropagation] propagation testing", func() { }) ginkgo.BeforeEach(func() { - framework.CreatePropagationPolicy(karmadaClient, policy) framework.CreateDeployment(kubeClient, deployment) ginkgo.DeferCleanup(func() { - framework.RemovePropagationPolicy(karmadaClient, policy.Namespace, policy.Name) framework.RemoveDeployment(kubeClient, deployment.Namespace, deployment.Name) framework.WaitDeploymentDisappearOnClusters(framework.ClusterNames(), deployment.Namespace, deployment.Name) }) @@ -114,10 +120,8 @@ var _ = ginkgo.Describe("[BasicPropagation] propagation testing", func() { }) ginkgo.Context("Service propagation testing", func() { - var policyNamespace, policyName string var serviceNamespace, serviceName string var service *corev1.Service - var policy *policyv1alpha1.PropagationPolicy ginkgo.BeforeEach(func() { policyNamespace = testNamespace @@ -140,10 +144,8 @@ var _ = ginkgo.Describe("[BasicPropagation] propagation testing", func() { }) ginkgo.BeforeEach(func() { - framework.CreatePropagationPolicy(karmadaClient, policy) framework.CreateService(kubeClient, service) ginkgo.DeferCleanup(func() { - framework.RemovePropagationPolicy(karmadaClient, policy.Namespace, policy.Name) framework.RemoveService(kubeClient, service.Namespace, service.Name) framework.WaitServiceDisappearOnClusters(framework.ClusterNames(), service.Namespace, service.Name) }) @@ -166,10 +168,8 @@ var _ = ginkgo.Describe("[BasicPropagation] propagation testing", func() { }) ginkgo.Context("Pod propagation testing", func() { - var policyNamespace, policyName string var podNamespace, podName string var pod *corev1.Pod - var policy *policyv1alpha1.PropagationPolicy ginkgo.BeforeEach(func() { policyNamespace = testNamespace policyName = podNamePrefix + rand.String(RandomStrLength) @@ -191,10 +191,8 @@ var _ = ginkgo.Describe("[BasicPropagation] propagation testing", func() { }) ginkgo.BeforeEach(func() { - framework.CreatePropagationPolicy(karmadaClient, policy) framework.CreatePod(kubeClient, pod) ginkgo.DeferCleanup(func() { - framework.RemovePropagationPolicy(karmadaClient, policy.Namespace, policy.Name) framework.RemovePod(kubeClient, pod.Namespace, pod.Name) framework.WaitPodDisappearOnClusters(framework.ClusterNames(), pod.Namespace, pod.Name) }) @@ -226,7 +224,6 @@ var _ = ginkgo.Describe("[BasicPropagation] propagation testing", func() { var crGVR schema.GroupVersionResource var crAPIVersion string var cr *unstructured.Unstructured - var crPolicy *policyv1alpha1.PropagationPolicy ginkgo.BeforeEach(func() { crdGroup = fmt.Sprintf("example-%s.karmada.io", rand.String(RandomStrLength)) @@ -256,7 +253,7 @@ var _ = ginkgo.Describe("[BasicPropagation] propagation testing", func() { crAPIVersion = fmt.Sprintf("%s/%s", crd.Spec.Group, "v1alpha1") cr = testhelper.NewCustomResource(crAPIVersion, crd.Spec.Names.Kind, crNamespace, crName) - crPolicy = testhelper.NewPropagationPolicy(crNamespace, crName, []policyv1alpha1.ResourceSelector{ + policy = testhelper.NewPropagationPolicy(crNamespace, crName, []policyv1alpha1.ResourceSelector{ { APIVersion: crAPIVersion, Kind: crd.Spec.Names.Kind, @@ -272,6 +269,8 @@ var _ = ginkgo.Describe("[BasicPropagation] propagation testing", func() { ginkgo.BeforeEach(func() { framework.CreateClusterPropagationPolicy(karmadaClient, crdPolicy) framework.CreateCRD(dynamicClient, crd) + framework.WaitCRDPresentOnClusters(karmadaClient, framework.ClusterNames(), + fmt.Sprintf("%s/%s", crd.Spec.Group, "v1alpha1"), crd.Spec.Names.Kind) ginkgo.DeferCleanup(func() { framework.RemoveClusterPropagationPolicy(karmadaClient, crdPolicy.Name) framework.RemoveCRD(dynamicClient, crd.Name) @@ -279,15 +278,6 @@ var _ = ginkgo.Describe("[BasicPropagation] propagation testing", func() { }) ginkgo.It("namespaceScoped cr propagation testing", func() { - framework.GetCRD(dynamicClient, crd.Name) - framework.WaitCRDPresentOnClusters(karmadaClient, framework.ClusterNames(), - fmt.Sprintf("%s/%s", crd.Spec.Group, "v1alpha1"), crd.Spec.Names.Kind) - - framework.CreatePropagationPolicy(karmadaClient, crPolicy) - ginkgo.DeferCleanup(func() { - framework.RemovePropagationPolicy(karmadaClient, crPolicy.Namespace, crPolicy.Name) - }) - ginkgo.By(fmt.Sprintf("creating cr(%s/%s)", crNamespace, crName), func() { _, err := dynamicClient.Resource(crGVR).Namespace(crNamespace).Create(context.TODO(), cr, metav1.CreateOptions{}) gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) @@ -383,10 +373,8 @@ var _ = ginkgo.Describe("[BasicPropagation] propagation testing", func() { }) ginkgo.Context("Job propagation testing", func() { - var policyNamespace, policyName string var jobNamespace, jobName string var job *batchv1.Job - var policy *policyv1alpha1.PropagationPolicy ginkgo.BeforeEach(func() { policyNamespace = testNamespace @@ -409,10 +397,8 @@ var _ = ginkgo.Describe("[BasicPropagation] propagation testing", func() { }) ginkgo.BeforeEach(func() { - framework.CreatePropagationPolicy(karmadaClient, policy) framework.CreateJob(kubeClient, job) ginkgo.DeferCleanup(func() { - framework.RemovePropagationPolicy(karmadaClient, policy.Namespace, policy.Name) framework.RemoveJob(kubeClient, job.Namespace, job.Name) framework.WaitJobDisappearOnClusters(framework.ClusterNames(), job.Namespace, job.Name) }) @@ -439,8 +425,6 @@ var _ = ginkgo.Describe("[BasicPropagation] propagation testing", func() { ginkgo.Context("Role propagation testing", func() { var ( roleNamespace, roleName string - policyName string - policy *policyv1alpha1.PropagationPolicy role *rbacv1.Role ) @@ -464,10 +448,8 @@ var _ = ginkgo.Describe("[BasicPropagation] propagation testing", func() { }) ginkgo.BeforeEach(func() { - framework.CreatePropagationPolicy(karmadaClient, policy) framework.CreateRole(kubeClient, role) ginkgo.DeferCleanup(func() { - framework.RemovePropagationPolicy(karmadaClient, policy.Namespace, policy.Name) framework.RemoveRole(kubeClient, role.Namespace, role.Name) framework.WaitRoleDisappearOnClusters(framework.ClusterNames(), role.Namespace, role.Name) }) @@ -484,8 +466,6 @@ var _ = ginkgo.Describe("[BasicPropagation] propagation testing", func() { ginkgo.Context("RoleBinding propagation testing", func() { var ( roleBindingNamespace, roleBindingName string - policyName string - policy *policyv1alpha1.PropagationPolicy roleBinding *rbacv1.RoleBinding ) @@ -509,10 +489,8 @@ var _ = ginkgo.Describe("[BasicPropagation] propagation testing", func() { }) ginkgo.BeforeEach(func() { - framework.CreatePropagationPolicy(karmadaClient, policy) framework.CreateRoleBinding(kubeClient, roleBinding) ginkgo.DeferCleanup(func() { - framework.RemovePropagationPolicy(karmadaClient, policy.Namespace, policy.Name) framework.RemoveRoleBinding(kubeClient, roleBinding.Namespace, roleBinding.Name) framework.WaitRoleBindingDisappearOnClusters(framework.ClusterNames(), roleBinding.Namespace, roleBinding.Name) }) @@ -527,9 +505,67 @@ var _ = ginkgo.Describe("[BasicPropagation] propagation testing", func() { }) }) +var _ = ginkgo.Describe("[CornerCase] PropagationPolicy testing", func() { + var policyNamespace, policyName string + var policy *policyv1alpha1.PropagationPolicy + + ginkgo.JustBeforeEach(func() { + framework.CreatePropagationPolicy(karmadaClient, policy) + ginkgo.DeferCleanup(func() { + framework.RemovePropagationPolicy(karmadaClient, policy.Namespace, policy.Name) + }) + }) + + ginkgo.Context("Propagate Deployment with long pp name (exceed 63 character)", func() { + var deploymentNamespace, deploymentName string + var deployment *appsv1.Deployment + + ginkgo.BeforeEach(func() { + policyNamespace = testNamespace + policyName = deploymentNamePrefix + "-longname-longname-longname-longname-longname-longname-" + rand.String(RandomStrLength) + deploymentNamespace = testNamespace + deploymentName = policyName + + deployment = testhelper.NewDeployment(deploymentNamespace, deploymentName) + policy = testhelper.NewPropagationPolicy(policyNamespace, policyName, []policyv1alpha1.ResourceSelector{ + { + APIVersion: deployment.APIVersion, + Kind: deployment.Kind, + Name: deployment.Name, + }, + }, policyv1alpha1.Placement{ + ClusterAffinity: &policyv1alpha1.ClusterAffinity{ + ClusterNames: framework.ClusterNames(), + }, + }) + }) + + ginkgo.BeforeEach(func() { + framework.CreateDeployment(kubeClient, deployment) + ginkgo.DeferCleanup(func() { + framework.RemoveDeployment(kubeClient, deployment.Namespace, deployment.Name) + framework.WaitDeploymentDisappearOnClusters(framework.ClusterNames(), deployment.Namespace, deployment.Name) + }) + }) + + ginkgo.It("deployment propagation testing", func() { + framework.WaitDeploymentPresentOnClustersFitWith(framework.ClusterNames(), deployment.Namespace, deployment.Name, + func(*appsv1.Deployment) bool { + return true + }) + + framework.UpdateDeploymentReplicas(kubeClient, deployment, updateDeploymentReplicas) + framework.WaitDeploymentPresentOnClustersFitWith(framework.ClusterNames(), deployment.Namespace, deployment.Name, + func(deployment *appsv1.Deployment) bool { + return *deployment.Spec.Replicas == updateDeploymentReplicas + }) + }) + }) +}) + // ImplicitPriority more than one PP matches the object, we should choose the most suitable one. // Set it to run sequentially to avoid affecting other test cases. -var _ = framework.SerialDescribe("[ImplicitPriority] propagation testing", func() { +var _ = framework.SerialDescribe("[ImplicitPriority] PropagationPolicy testing", func() { ginkgo.Context("priorityMatchName propagation testing", func() { var policyNamespace, priorityMatchName, priorityMatchLabelSelector, priorityMatchAll string var deploymentNamespace, deploymentName string @@ -621,7 +657,7 @@ var _ = framework.SerialDescribe("[ImplicitPriority] propagation testing", func( // ExplicitPriority more than one PP matches the object, we should select the one with the highest explicit priority, if the // explicit priority is same, select the one with the highest implicit priority. -var _ = ginkgo.Describe("[ExplicitPriority] propagation testing", func() { +var _ = ginkgo.Describe("[ExplicitPriority] PropagationPolicy testing", func() { ginkgo.Context("high explicit/low priority/implicit priority PropagationPolicy propagation testing", func() { var policyNamespace, higherPriorityLabelSelector, lowerPriorityMatchName, implicitPriorityMatchName string var deploymentNamespace, deploymentName string @@ -781,7 +817,7 @@ var _ = ginkgo.Describe("[ExplicitPriority] propagation testing", func() { }) // AdvancedPropagation focus on some advanced propagation testing. -var _ = ginkgo.Describe("[AdvancedPropagation] propagation testing", func() { +var _ = ginkgo.Describe("[AdvancedCase] PropagationPolicy testing", func() { ginkgo.Context("Edit PropagationPolicy ResourceSelectors", func() { var policy *policyv1alpha1.PropagationPolicy var deployment01, deployment02 *appsv1.Deployment @@ -1125,7 +1161,7 @@ var _ = ginkgo.Describe("[AdvancedPropagation] propagation testing", func() { }) }) -var _ = ginkgo.Describe("[Suspend] PropagationPolicy testing", func() { +var _ = ginkgo.Describe("[Suspension] PropagationPolicy testing", func() { var policy *policyv1alpha1.PropagationPolicy var deployment *appsv1.Deployment var targetMember string @@ -1149,36 +1185,33 @@ var _ = ginkgo.Describe("[Suspend] PropagationPolicy testing", func() { ginkgo.BeforeEach(func() { framework.CreatePropagationPolicy(karmadaClient, policy) - ginkgo.DeferCleanup(func() { - framework.RemovePropagationPolicy(karmadaClient, policy.Namespace, policy.Name) - }) framework.CreateDeployment(kubeClient, deployment) framework.WaitDeploymentPresentOnClusterFitWith(targetMember, deployment.Namespace, deployment.Name, func(*appsv1.Deployment) bool { return true }) + ginkgo.DeferCleanup(func() { + framework.RemovePropagationPolicy(karmadaClient, policy.Namespace, policy.Name) + framework.RemoveDeployment(kubeClient, deployment.Namespace, deployment.Name) + }) }) - ginkgo.BeforeEach(func() { - policy.Spec.Suspension = &policyv1alpha1.Suspension{ - Dispatching: ptr.To(true), - } - framework.UpdatePropagationPolicyWithSpec(karmadaClient, policy.Namespace, policy.Name, policy.Spec) - }) - - ginkgo.Context("suspend the PropagationPolicy dispatching", func() { - ginkgo.AfterEach(func() { - framework.RemoveDeployment(kubeClient, deployment.Namespace, deployment.Name) + ginkgo.It("suspend the PP dispatching", func() { + ginkgo.By("update the pp suspension dispatching to true", func() { + policy.Spec.Suspension = &policyv1alpha1.Suspension{ + Dispatching: ptr.To(true), + } + framework.UpdatePropagationPolicyWithSpec(karmadaClient, policy.Namespace, policy.Name, policy.Spec) }) - ginkgo.It("suspends ResourceBinding", func() { + ginkgo.By("check RB suspension spec", func() { framework.WaitResourceBindingFitWith(karmadaClient, deployment.Namespace, names.GenerateBindingName(deployment.Kind, deployment.Name), func(binding *workv1alpha2.ResourceBinding) bool { return binding.Spec.Suspension != nil && ptr.Deref(binding.Spec.Suspension.Dispatching, false) }) }) - ginkgo.It("suspends Work", func() { + ginkgo.By("check Work suspension spec", func() { workName := names.GenerateWorkName(deployment.Kind, deployment.Name, deployment.Namespace) esName := names.GenerateExecutionSpaceName(targetMember) gomega.Eventually(func() bool { @@ -1190,7 +1223,7 @@ var _ = ginkgo.Describe("[Suspend] PropagationPolicy testing", func() { }, pollTimeout, pollInterval).Should(gomega.Equal(true)) }) - ginkgo.It("adds suspend dispatching condition to Work", func() { + ginkgo.By("check Work Dispatching status condition", func() { workName := names.GenerateWorkName(deployment.Kind, deployment.Name, deployment.Namespace) esName := names.GenerateExecutionSpaceName(targetMember) gomega.Eventually(func() bool { @@ -1202,7 +1235,7 @@ var _ = ginkgo.Describe("[Suspend] PropagationPolicy testing", func() { }, pollTimeout, pollInterval).Should(gomega.Equal(true)) }) - ginkgo.It("adds dispatching event with suspend message", func() { + ginkgo.By("check dispatching event", func() { workName := names.GenerateWorkName(deployment.Kind, deployment.Name, deployment.Namespace) esName := names.GenerateExecutionSpaceName(targetMember) framework.WaitEventFitWith(kubeClient, esName, workName, @@ -1213,34 +1246,21 @@ var _ = ginkgo.Describe("[Suspend] PropagationPolicy testing", func() { }) }) - ginkgo.Context("update resource in the control plane", func() { - ginkgo.JustBeforeEach(func() { + ginkgo.It("suspension resume", func() { + ginkgo.By("update deployment replicas", func() { framework.UpdateDeploymentReplicas(kubeClient, deployment, updateDeploymentReplicas) }) - ginkgo.AfterEach(func() { - framework.RemoveDeployment(kubeClient, deployment.Namespace, deployment.Name) + ginkgo.By("resume the propagationPolicy", func() { + policy.Spec.Suspension = &policyv1alpha1.Suspension{} + framework.UpdatePropagationPolicyWithSpec(karmadaClient, policy.Namespace, policy.Name, policy.Spec) }) - ginkgo.It("suspends updating deployment replicas in member cluster", func() { + ginkgo.By("check deployment replicas", func() { framework.WaitDeploymentPresentOnClusterFitWith(targetMember, deployment.Namespace, deployment.Name, func(d *appsv1.Deployment) bool { - return *d.Spec.Replicas != updateDeploymentReplicas + return *d.Spec.Replicas == updateDeploymentReplicas }) }) - - ginkgo.When("propagation is resumed", func() { - ginkgo.JustBeforeEach(func() { - policy.Spec.Suspension = &policyv1alpha1.Suspension{} - framework.UpdatePropagationPolicyWithSpec(karmadaClient, policy.Namespace, policy.Name, policy.Spec) - }) - - ginkgo.It("updates deployment replicas in member cluster", func() { - framework.WaitDeploymentPresentOnClusterFitWith(targetMember, deployment.Namespace, deployment.Name, - func(d *appsv1.Deployment) bool { - return *d.Spec.Replicas == updateDeploymentReplicas - }) - }) - }) }) }) diff --git a/vendor/github.com/BurntSushi/toml/.gitignore b/vendor/github.com/BurntSushi/toml/.gitignore index cd11be965309..fe79e3adda29 100644 --- a/vendor/github.com/BurntSushi/toml/.gitignore +++ b/vendor/github.com/BurntSushi/toml/.gitignore @@ -1,2 +1,2 @@ -toml.test +/toml.test /toml-test diff --git a/vendor/github.com/BurntSushi/toml/COMPATIBLE b/vendor/github.com/BurntSushi/toml/COMPATIBLE deleted file mode 100644 index f621b01196cb..000000000000 --- a/vendor/github.com/BurntSushi/toml/COMPATIBLE +++ /dev/null @@ -1 +0,0 @@ -Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0). diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md index cc13f8667fbd..639e6c39983d 100644 --- a/vendor/github.com/BurntSushi/toml/README.md +++ b/vendor/github.com/BurntSushi/toml/README.md @@ -1,6 +1,5 @@ TOML stands for Tom's Obvious, Minimal Language. This Go package provides a -reflection interface similar to Go's standard library `json` and `xml` -packages. +reflection interface similar to Go's standard library `json` and `xml` packages. Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0). @@ -10,7 +9,7 @@ See the [releases page](https://github.com/BurntSushi/toml/releases) for a changelog; this information is also in the git tag annotations (e.g. `git show v0.4.0`). -This library requires Go 1.13 or newer; install it with: +This library requires Go 1.18 or newer; add it to your go.mod with: % go get github.com/BurntSushi/toml@latest @@ -19,16 +18,7 @@ It also comes with a TOML validator CLI tool: % go install github.com/BurntSushi/toml/cmd/tomlv@latest % tomlv some-toml-file.toml -### Testing -This package passes all tests in [toml-test] for both the decoder and the -encoder. - -[toml-test]: https://github.com/BurntSushi/toml-test - ### Examples -This package works similar to how the Go standard library handles XML and JSON. -Namely, data is loaded into Go values via reflection. - For the simplest example, consider some TOML file as just a list of keys and values: @@ -40,7 +30,7 @@ Perfection = [ 6, 28, 496, 8128 ] DOB = 1987-07-05T05:45:00Z ``` -Which could be defined in Go as: +Which can be decoded with: ```go type Config struct { @@ -48,20 +38,15 @@ type Config struct { Cats []string Pi float64 Perfection []int - DOB time.Time // requires `import time` + DOB time.Time } -``` - -And then decoded with: -```go var conf Config -err := toml.Decode(tomlData, &conf) -// handle error +_, err := toml.Decode(tomlData, &conf) ``` -You can also use struct tags if your struct field name doesn't map to a TOML -key value directly: +You can also use struct tags if your struct field name doesn't map to a TOML key +value directly: ```toml some_key_NAME = "wat" @@ -73,139 +58,63 @@ type TOML struct { } ``` -Beware that like other most other decoders **only exported fields** are -considered when encoding and decoding; private fields are silently ignored. +Beware that like other decoders **only exported fields** are considered when +encoding and decoding; private fields are silently ignored. ### Using the `Marshaler` and `encoding.TextUnmarshaler` interfaces -Here's an example that automatically parses duration strings into -`time.Duration` values: +Here's an example that automatically parses values in a `mail.Address`: ```toml -[[song]] -name = "Thunder Road" -duration = "4m49s" - -[[song]] -name = "Stairway to Heaven" -duration = "8m03s" -``` - -Which can be decoded with: - -```go -type song struct { - Name string - Duration duration -} -type songs struct { - Song []song -} -var favorites songs -if _, err := toml.Decode(blob, &favorites); err != nil { - log.Fatal(err) -} - -for _, s := range favorites.Song { - fmt.Printf("%s (%s)\n", s.Name, s.Duration) -} +contacts = [ + "Donald Duck ", + "Scrooge McDuck ", +] ``` -And you'll also need a `duration` type that satisfies the -`encoding.TextUnmarshaler` interface: +Can be decoded with: ```go -type duration struct { - time.Duration +// Create address type which satisfies the encoding.TextUnmarshaler interface. +type address struct { + *mail.Address } -func (d *duration) UnmarshalText(text []byte) error { +func (a *address) UnmarshalText(text []byte) error { var err error - d.Duration, err = time.ParseDuration(string(text)) + a.Address, err = mail.ParseAddress(string(text)) return err } + +// Decode it. +func decode() { + blob := ` + contacts = [ + "Donald Duck ", + "Scrooge McDuck ", + ] + ` + + var contacts struct { + Contacts []address + } + + _, err := toml.Decode(blob, &contacts) + if err != nil { + log.Fatal(err) + } + + for _, c := range contacts.Contacts { + fmt.Printf("%#v\n", c.Address) + } + + // Output: + // &mail.Address{Name:"Donald Duck", Address:"donald@duckburg.com"} + // &mail.Address{Name:"Scrooge McDuck", Address:"scrooge@duckburg.com"} +} ``` To target TOML specifically you can implement `UnmarshalTOML` TOML interface in a similar way. ### More complex usage -Here's an example of how to load the example from the official spec page: - -```toml -# This is a TOML document. Boom. - -title = "TOML Example" - -[owner] -name = "Tom Preston-Werner" -organization = "GitHub" -bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." -dob = 1979-05-27T07:32:00Z # First class dates? Why not? - -[database] -server = "192.168.1.1" -ports = [ 8001, 8001, 8002 ] -connection_max = 5000 -enabled = true - -[servers] - - # You can indent as you please. Tabs or spaces. TOML don't care. - [servers.alpha] - ip = "10.0.0.1" - dc = "eqdc10" - - [servers.beta] - ip = "10.0.0.2" - dc = "eqdc10" - -[clients] -data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it - -# Line breaks are OK when inside arrays -hosts = [ - "alpha", - "omega" -] -``` - -And the corresponding Go types are: - -```go -type tomlConfig struct { - Title string - Owner ownerInfo - DB database `toml:"database"` - Servers map[string]server - Clients clients -} - -type ownerInfo struct { - Name string - Org string `toml:"organization"` - Bio string - DOB time.Time -} - -type database struct { - Server string - Ports []int - ConnMax int `toml:"connection_max"` - Enabled bool -} - -type server struct { - IP string - DC string -} - -type clients struct { - Data [][]interface{} - Hosts []string -} -``` - -Note that a case insensitive match will be tried if an exact match can't be -found. - -A working example of the above can be found in `_example/example.{go,toml}`. +See the [`_example/`](/_example) directory for a more complex example. diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go index e24f0c5d5c04..7aaf462c94a4 100644 --- a/vendor/github.com/BurntSushi/toml/decode.go +++ b/vendor/github.com/BurntSushi/toml/decode.go @@ -1,32 +1,66 @@ package toml import ( + "bytes" "encoding" + "encoding/json" "fmt" "io" - "io/ioutil" + "io/fs" "math" "os" "reflect" + "strconv" "strings" + "time" ) // Unmarshaler is the interface implemented by objects that can unmarshal a // TOML description of themselves. type Unmarshaler interface { - UnmarshalTOML(interface{}) error + UnmarshalTOML(any) error } -// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`. -func Unmarshal(p []byte, v interface{}) error { - _, err := Decode(string(p), v) +// Unmarshal decodes the contents of data in TOML format into a pointer v. +// +// See [Decoder] for a description of the decoding process. +func Unmarshal(data []byte, v any) error { + _, err := NewDecoder(bytes.NewReader(data)).Decode(v) return err } +// Decode the TOML data in to the pointer v. +// +// See [Decoder] for a description of the decoding process. +func Decode(data string, v any) (MetaData, error) { + return NewDecoder(strings.NewReader(data)).Decode(v) +} + +// DecodeFile reads the contents of a file and decodes it with [Decode]. +func DecodeFile(path string, v any) (MetaData, error) { + fp, err := os.Open(path) + if err != nil { + return MetaData{}, err + } + defer fp.Close() + return NewDecoder(fp).Decode(v) +} + +// DecodeFS reads the contents of a file from [fs.FS] and decodes it with +// [Decode]. +func DecodeFS(fsys fs.FS, path string, v any) (MetaData, error) { + fp, err := fsys.Open(path) + if err != nil { + return MetaData{}, err + } + defer fp.Close() + return NewDecoder(fp).Decode(v) +} + // Primitive is a TOML value that hasn't been decoded into a Go value. // // This type can be used for any value, which will cause decoding to be delayed. -// You can use the PrimitiveDecode() function to "manually" decode these values. +// You can use [PrimitiveDecode] to "manually" decode these values. // // NOTE: The underlying representation of a `Primitive` value is subject to // change. Do not rely on it. @@ -35,43 +69,29 @@ func Unmarshal(p []byte, v interface{}) error { // overhead of reflection. They can be useful when you don't know the exact type // of TOML data until runtime. type Primitive struct { - undecoded interface{} + undecoded any context Key } // The significand precision for float32 and float64 is 24 and 53 bits; this is // the range a natural number can be stored in a float without loss of data. const ( - maxSafeFloat32Int = 16777215 // 2^24-1 - maxSafeFloat64Int = 9007199254740991 // 2^53-1 + maxSafeFloat32Int = 16777215 // 2^24-1 + maxSafeFloat64Int = int64(9007199254740991) // 2^53-1 ) -// PrimitiveDecode is just like the other `Decode*` functions, except it -// decodes a TOML value that has already been parsed. Valid primitive values -// can *only* be obtained from values filled by the decoder functions, -// including this method. (i.e., `v` may contain more `Primitive` -// values.) -// -// Meta data for primitive values is included in the meta data returned by -// the `Decode*` functions with one exception: keys returned by the Undecoded -// method will only reflect keys that were decoded. Namely, any keys hidden -// behind a Primitive will be considered undecoded. Executing this method will -// update the undecoded keys in the meta data. (See the example.) -func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error { - md.context = primValue.context - defer func() { md.context = nil }() - return md.unify(primValue.undecoded, rvalue(v)) -} - // Decoder decodes TOML data. // -// TOML tables correspond to Go structs or maps (dealer's choice ā€“ they can be -// used interchangeably). +// TOML tables correspond to Go structs or maps; they can be used +// interchangeably, but structs offer better type safety. // // TOML table arrays correspond to either a slice of structs or a slice of maps. // -// TOML datetimes correspond to Go time.Time values. Local datetimes are parsed -// in the local timezone. +// TOML datetimes correspond to [time.Time]. Local datetimes are parsed in the +// local timezone. +// +// [time.Duration] types are treated as nanoseconds if the TOML value is an +// integer, or they're parsed with time.ParseDuration() if they're strings. // // All other TOML types (float, string, int, bool and array) correspond to the // obvious Go types. @@ -80,9 +100,9 @@ func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error { // interface, in which case any primitive TOML value (floats, strings, integers, // booleans, datetimes) will be converted to a []byte and given to the value's // UnmarshalText method. See the Unmarshaler example for a demonstration with -// time duration strings. +// email addresses. // -// Key mapping +// # Key mapping // // TOML keys can map to either keys in a Go map or field names in a Go struct. // The special `toml` struct tag can be used to map TOML keys to struct fields @@ -109,10 +129,11 @@ func NewDecoder(r io.Reader) *Decoder { var ( unmarshalToml = reflect.TypeOf((*Unmarshaler)(nil)).Elem() unmarshalText = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() + primitiveType = reflect.TypeOf((*Primitive)(nil)).Elem() ) // Decode TOML data in to the pointer `v`. -func (dec *Decoder) Decode(v interface{}) (MetaData, error) { +func (dec *Decoder) Decode(v any) (MetaData, error) { rv := reflect.ValueOf(v) if rv.Kind() != reflect.Ptr { s := "%q" @@ -120,25 +141,25 @@ func (dec *Decoder) Decode(v interface{}) (MetaData, error) { s = "%v" } - return MetaData{}, e("cannot decode to non-pointer "+s, reflect.TypeOf(v)) + return MetaData{}, fmt.Errorf("toml: cannot decode to non-pointer "+s, reflect.TypeOf(v)) } if rv.IsNil() { - return MetaData{}, e("cannot decode to nil value of %q", reflect.TypeOf(v)) + return MetaData{}, fmt.Errorf("toml: cannot decode to nil value of %q", reflect.TypeOf(v)) } - // Check if this is a supported type: struct, map, interface{}, or something - // that implements UnmarshalTOML or UnmarshalText. + // Check if this is a supported type: struct, map, any, or something that + // implements UnmarshalTOML or UnmarshalText. rv = indirect(rv) rt := rv.Type() if rv.Kind() != reflect.Struct && rv.Kind() != reflect.Map && !(rv.Kind() == reflect.Interface && rv.NumMethod() == 0) && !rt.Implements(unmarshalToml) && !rt.Implements(unmarshalText) { - return MetaData{}, e("cannot decode to type %s", rt) + return MetaData{}, fmt.Errorf("toml: cannot decode to type %s", rt) } // TODO: parser should read from io.Reader? Or at the very least, make it // read from []byte rather than string - data, err := ioutil.ReadAll(dec.r) + data, err := io.ReadAll(dec.r) if err != nil { return MetaData{}, err } @@ -150,30 +171,29 @@ func (dec *Decoder) Decode(v interface{}) (MetaData, error) { md := MetaData{ mapping: p.mapping, - types: p.types, + keyInfo: p.keyInfo, keys: p.ordered, decoded: make(map[string]struct{}, len(p.ordered)), context: nil, + data: data, } return md, md.unify(p.mapping, rv) } -// Decode the TOML data in to the pointer v. +// PrimitiveDecode is just like the other Decode* functions, except it decodes a +// TOML value that has already been parsed. Valid primitive values can *only* be +// obtained from values filled by the decoder functions, including this method. +// (i.e., v may contain more [Primitive] values.) // -// See the documentation on Decoder for a description of the decoding process. -func Decode(data string, v interface{}) (MetaData, error) { - return NewDecoder(strings.NewReader(data)).Decode(v) -} - -// DecodeFile is just like Decode, except it will automatically read the -// contents of the file at path and decode it for you. -func DecodeFile(path string, v interface{}) (MetaData, error) { - fp, err := os.Open(path) - if err != nil { - return MetaData{}, err - } - defer fp.Close() - return NewDecoder(fp).Decode(v) +// Meta data for primitive values is included in the meta data returned by the +// Decode* functions with one exception: keys returned by the Undecoded method +// will only reflect keys that were decoded. Namely, any keys hidden behind a +// Primitive will be considered undecoded. Executing this method will update the +// undecoded keys in the meta data. (See the example.) +func (md *MetaData) PrimitiveDecode(primValue Primitive, v any) error { + md.context = primValue.context + defer func() { md.context = nil }() + return md.unify(primValue.undecoded, rvalue(v)) } // unify performs a sort of type unification based on the structure of `rv`, @@ -181,10 +201,10 @@ func DecodeFile(path string, v interface{}) (MetaData, error) { // // Any type mismatch produces an error. Finding a type that we don't know // how to handle produces an unsupported type error. -func (md *MetaData) unify(data interface{}, rv reflect.Value) error { +func (md *MetaData) unify(data any, rv reflect.Value) error { // Special case. Look for a `Primitive` value. // TODO: #76 would make this superfluous after implemented. - if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() { + if rv.Type() == primitiveType { // Save the undecoded data and the key context into the primitive // value. context := make(Key, len(md.context)) @@ -196,17 +216,18 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error { return nil } - // Special case. Unmarshaler Interface support. - if rv.CanAddr() { - if v, ok := rv.Addr().Interface().(Unmarshaler); ok { - return v.UnmarshalTOML(data) + rvi := rv.Interface() + if v, ok := rvi.(Unmarshaler); ok { + err := v.UnmarshalTOML(data) + if err != nil { + return md.parseErr(err) } + return nil } - - // Special case. Look for a value satisfying the TextUnmarshaler interface. - if v, ok := rv.Interface().(encoding.TextUnmarshaler); ok { + if v, ok := rvi.(encoding.TextUnmarshaler); ok { return md.unifyText(data, v) } + // TODO: // The behavior here is incorrect whenever a Go type satisfies the // encoding.TextUnmarshaler interface but also corresponds to a TOML hash or @@ -217,19 +238,10 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error { k := rv.Kind() - // laziness if k >= reflect.Int && k <= reflect.Uint64 { return md.unifyInt(data, rv) } switch k { - case reflect.Ptr: - elem := reflect.New(rv.Type().Elem()) - err := md.unify(data, reflect.Indirect(elem)) - if err != nil { - return err - } - rv.Set(elem) - return nil case reflect.Struct: return md.unifyStruct(data, rv) case reflect.Map: @@ -243,25 +255,23 @@ func (md *MetaData) unify(data interface{}, rv reflect.Value) error { case reflect.Bool: return md.unifyBool(data, rv) case reflect.Interface: - // we only support empty interfaces. - if rv.NumMethod() > 0 { - return e("unsupported type %s", rv.Type()) + if rv.NumMethod() > 0 { /// Only empty interfaces are supported. + return md.e("unsupported type %s", rv.Type()) } return md.unifyAnything(data, rv) case reflect.Float32, reflect.Float64: return md.unifyFloat64(data, rv) } - return e("unsupported type %s", rv.Kind()) + return md.e("unsupported type %s", rv.Kind()) } -func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { - tmap, ok := mapping.(map[string]interface{}) +func (md *MetaData) unifyStruct(mapping any, rv reflect.Value) error { + tmap, ok := mapping.(map[string]any) if !ok { if mapping == nil { return nil } - return e("type mismatch for %s: expected table but found %T", - rv.Type().String(), mapping) + return md.e("type mismatch for %s: expected table but found %s", rv.Type().String(), fmtType(mapping)) } for key, datum := range tmap { @@ -286,27 +296,28 @@ func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { if isUnifiable(subv) { md.decoded[md.context.add(key).String()] = struct{}{} md.context = append(md.context, key) + err := md.unify(datum, subv) if err != nil { return err } md.context = md.context[0 : len(md.context)-1] } else if f.name != "" { - return e("cannot write unexported field %s.%s", rv.Type().String(), f.name) + return md.e("cannot write unexported field %s.%s", rv.Type().String(), f.name) } } } return nil } -func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error { - if k := rv.Type().Key().Kind(); k != reflect.String { - return fmt.Errorf( - "toml: cannot decode to a map with non-string key type (%s in %q)", - k, rv.Type()) +func (md *MetaData) unifyMap(mapping any, rv reflect.Value) error { + keyType := rv.Type().Key().Kind() + if keyType != reflect.String && keyType != reflect.Interface { + return fmt.Errorf("toml: cannot decode to a map with non-string key type (%s in %q)", + keyType, rv.Type()) } - tmap, ok := mapping.(map[string]interface{}) + tmap, ok := mapping.(map[string]any) if !ok { if tmap == nil { return nil @@ -321,19 +332,28 @@ func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error { md.context = append(md.context, k) rvval := reflect.Indirect(reflect.New(rv.Type().Elem())) - if err := md.unify(v, rvval); err != nil { + + err := md.unify(v, indirect(rvval)) + if err != nil { return err } md.context = md.context[0 : len(md.context)-1] rvkey := indirect(reflect.New(rv.Type().Key())) - rvkey.SetString(k) + + switch keyType { + case reflect.Interface: + rvkey.Set(reflect.ValueOf(k)) + case reflect.String: + rvkey.SetString(k) + } + rv.SetMapIndex(rvkey, rvval) } return nil } -func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error { +func (md *MetaData) unifyArray(data any, rv reflect.Value) error { datav := reflect.ValueOf(data) if datav.Kind() != reflect.Slice { if !datav.IsValid() { @@ -342,12 +362,12 @@ func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error { return md.badtype("slice", data) } if l := datav.Len(); l != rv.Len() { - return e("expected array length %d; got TOML array of length %d", rv.Len(), l) + return md.e("expected array length %d; got TOML array of length %d", rv.Len(), l) } return md.unifySliceArray(datav, rv) } -func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error { +func (md *MetaData) unifySlice(data any, rv reflect.Value) error { datav := reflect.ValueOf(data) if datav.Kind() != reflect.Slice { if !datav.IsValid() { @@ -374,7 +394,19 @@ func (md *MetaData) unifySliceArray(data, rv reflect.Value) error { return nil } -func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error { +func (md *MetaData) unifyString(data any, rv reflect.Value) error { + _, ok := rv.Interface().(json.Number) + if ok { + if i, ok := data.(int64); ok { + rv.SetString(strconv.FormatInt(i, 10)) + } else if f, ok := data.(float64); ok { + rv.SetString(strconv.FormatFloat(f, 'f', -1, 64)) + } else { + return md.badtype("string", data) + } + return nil + } + if s, ok := data.(string); ok { rv.SetString(s) return nil @@ -382,12 +414,14 @@ func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error { return md.badtype("string", data) } -func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error { +func (md *MetaData) unifyFloat64(data any, rv reflect.Value) error { + rvk := rv.Kind() + if num, ok := data.(float64); ok { - switch rv.Kind() { + switch rvk { case reflect.Float32: if num < -math.MaxFloat32 || num > math.MaxFloat32 { - return e("value %f is out of range for float32", num) + return md.parseErr(errParseRange{i: num, size: rvk.String()}) } fallthrough case reflect.Float64: @@ -399,74 +433,61 @@ func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error { } if num, ok := data.(int64); ok { - switch rv.Kind() { - case reflect.Float32: - if num < -maxSafeFloat32Int || num > maxSafeFloat32Int { - return e("value %d is out of range for float32", num) - } - fallthrough - case reflect.Float64: - if num < -maxSafeFloat64Int || num > maxSafeFloat64Int { - return e("value %d is out of range for float64", num) - } - rv.SetFloat(float64(num)) - default: - panic("bug") + if (rvk == reflect.Float32 && (num < -maxSafeFloat32Int || num > maxSafeFloat32Int)) || + (rvk == reflect.Float64 && (num < -maxSafeFloat64Int || num > maxSafeFloat64Int)) { + return md.parseErr(errUnsafeFloat{i: num, size: rvk.String()}) } + rv.SetFloat(float64(num)) return nil } return md.badtype("float", data) } -func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error { - if num, ok := data.(int64); ok { - if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 { - switch rv.Kind() { - case reflect.Int, reflect.Int64: - // No bounds checking necessary. - case reflect.Int8: - if num < math.MinInt8 || num > math.MaxInt8 { - return e("value %d is out of range for int8", num) - } - case reflect.Int16: - if num < math.MinInt16 || num > math.MaxInt16 { - return e("value %d is out of range for int16", num) - } - case reflect.Int32: - if num < math.MinInt32 || num > math.MaxInt32 { - return e("value %d is out of range for int32", num) - } +func (md *MetaData) unifyInt(data any, rv reflect.Value) error { + _, ok := rv.Interface().(time.Duration) + if ok { + // Parse as string duration, and fall back to regular integer parsing + // (as nanosecond) if this is not a string. + if s, ok := data.(string); ok { + dur, err := time.ParseDuration(s) + if err != nil { + return md.parseErr(errParseDuration{s}) } - rv.SetInt(num) - } else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 { - unum := uint64(num) - switch rv.Kind() { - case reflect.Uint, reflect.Uint64: - // No bounds checking necessary. - case reflect.Uint8: - if num < 0 || unum > math.MaxUint8 { - return e("value %d is out of range for uint8", num) - } - case reflect.Uint16: - if num < 0 || unum > math.MaxUint16 { - return e("value %d is out of range for uint16", num) - } - case reflect.Uint32: - if num < 0 || unum > math.MaxUint32 { - return e("value %d is out of range for uint32", num) - } - } - rv.SetUint(unum) - } else { - panic("unreachable") + rv.SetInt(int64(dur)) + return nil } - return nil } - return md.badtype("integer", data) + + num, ok := data.(int64) + if !ok { + return md.badtype("integer", data) + } + + rvk := rv.Kind() + switch { + case rvk >= reflect.Int && rvk <= reflect.Int64: + if (rvk == reflect.Int8 && (num < math.MinInt8 || num > math.MaxInt8)) || + (rvk == reflect.Int16 && (num < math.MinInt16 || num > math.MaxInt16)) || + (rvk == reflect.Int32 && (num < math.MinInt32 || num > math.MaxInt32)) { + return md.parseErr(errParseRange{i: num, size: rvk.String()}) + } + rv.SetInt(num) + case rvk >= reflect.Uint && rvk <= reflect.Uint64: + unum := uint64(num) + if rvk == reflect.Uint8 && (num < 0 || unum > math.MaxUint8) || + rvk == reflect.Uint16 && (num < 0 || unum > math.MaxUint16) || + rvk == reflect.Uint32 && (num < 0 || unum > math.MaxUint32) { + return md.parseErr(errParseRange{i: num, size: rvk.String()}) + } + rv.SetUint(unum) + default: + panic("unreachable") + } + return nil } -func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error { +func (md *MetaData) unifyBool(data any, rv reflect.Value) error { if b, ok := data.(bool); ok { rv.SetBool(b) return nil @@ -474,12 +495,12 @@ func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error { return md.badtype("boolean", data) } -func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error { +func (md *MetaData) unifyAnything(data any, rv reflect.Value) error { rv.Set(reflect.ValueOf(data)) return nil } -func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) error { +func (md *MetaData) unifyText(data any, v encoding.TextUnmarshaler) error { var s string switch sdata := data.(type) { case Marshaler: @@ -488,7 +509,7 @@ func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) erro return err } s = string(text) - case TextMarshaler: + case encoding.TextMarshaler: text, err := sdata.MarshalText() if err != nil { return err @@ -508,17 +529,40 @@ func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) erro return md.badtype("primitive (string-like)", data) } if err := v.UnmarshalText([]byte(s)); err != nil { - return err + return md.parseErr(err) } return nil } -func (md *MetaData) badtype(dst string, data interface{}) error { - return e("incompatible types: TOML key %q has type %T; destination has type %s", md.context, data, dst) +func (md *MetaData) badtype(dst string, data any) error { + return md.e("incompatible types: TOML value has type %s; destination has type %s", fmtType(data), dst) +} + +func (md *MetaData) parseErr(err error) error { + k := md.context.String() + return ParseError{ + LastKey: k, + Position: md.keyInfo[k].pos, + Line: md.keyInfo[k].pos.Line, + err: err, + input: string(md.data), + } +} + +func (md *MetaData) e(format string, args ...any) error { + f := "toml: " + if len(md.context) > 0 { + f = fmt.Sprintf("toml: (last key %q): ", md.context) + p := md.keyInfo[md.context.String()].pos + if p.Line > 0 { + f = fmt.Sprintf("toml: line %d (last key %q): ", p.Line, md.context) + } + } + return fmt.Errorf(f+format, args...) } // rvalue returns a reflect.Value of `v`. All pointers are resolved. -func rvalue(v interface{}) reflect.Value { +func rvalue(v any) reflect.Value { return indirect(reflect.ValueOf(v)) } @@ -533,7 +577,11 @@ func indirect(v reflect.Value) reflect.Value { if v.Kind() != reflect.Ptr { if v.CanSet() { pv := v.Addr() - if _, ok := pv.Interface().(encoding.TextUnmarshaler); ok { + pvi := pv.Interface() + if _, ok := pvi.(encoding.TextUnmarshaler); ok { + return pv + } + if _, ok := pvi.(Unmarshaler); ok { return pv } } @@ -549,12 +597,17 @@ func isUnifiable(rv reflect.Value) bool { if rv.CanSet() { return true } - if _, ok := rv.Interface().(encoding.TextUnmarshaler); ok { + rvi := rv.Interface() + if _, ok := rvi.(encoding.TextUnmarshaler); ok { + return true + } + if _, ok := rvi.(Unmarshaler); ok { return true } return false } -func e(format string, args ...interface{}) error { - return fmt.Errorf("toml: "+format, args...) +// fmt %T with "interface {}" replaced with "any", which is far more readable. +func fmtType(t any) string { + return strings.ReplaceAll(fmt.Sprintf("%T", t), "interface {}", "any") } diff --git a/vendor/github.com/BurntSushi/toml/decode_go116.go b/vendor/github.com/BurntSushi/toml/decode_go116.go deleted file mode 100644 index eddfb641b862..000000000000 --- a/vendor/github.com/BurntSushi/toml/decode_go116.go +++ /dev/null @@ -1,19 +0,0 @@ -//go:build go1.16 -// +build go1.16 - -package toml - -import ( - "io/fs" -) - -// DecodeFS is just like Decode, except it will automatically read the contents -// of the file at `path` from a fs.FS instance. -func DecodeFS(fsys fs.FS, path string, v interface{}) (MetaData, error) { - fp, err := fsys.Open(path) - if err != nil { - return MetaData{}, err - } - defer fp.Close() - return NewDecoder(fp).Decode(v) -} diff --git a/vendor/github.com/BurntSushi/toml/deprecated.go b/vendor/github.com/BurntSushi/toml/deprecated.go index c6af3f239ddf..155709a80b88 100644 --- a/vendor/github.com/BurntSushi/toml/deprecated.go +++ b/vendor/github.com/BurntSushi/toml/deprecated.go @@ -5,17 +5,25 @@ import ( "io" ) +// TextMarshaler is an alias for encoding.TextMarshaler. +// // Deprecated: use encoding.TextMarshaler type TextMarshaler encoding.TextMarshaler +// TextUnmarshaler is an alias for encoding.TextUnmarshaler. +// // Deprecated: use encoding.TextUnmarshaler type TextUnmarshaler encoding.TextUnmarshaler +// DecodeReader is an alias for NewDecoder(r).Decode(v). +// +// Deprecated: use NewDecoder(reader).Decode(&value). +func DecodeReader(r io.Reader, v any) (MetaData, error) { return NewDecoder(r).Decode(v) } + +// PrimitiveDecode is an alias for MetaData.PrimitiveDecode(). +// // Deprecated: use MetaData.PrimitiveDecode. -func PrimitiveDecode(primValue Primitive, v interface{}) error { +func PrimitiveDecode(primValue Primitive, v any) error { md := MetaData{decoded: make(map[string]struct{})} return md.unify(primValue.undecoded, rvalue(v)) } - -// Deprecated: use NewDecoder(reader).Decode(&value). -func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { return NewDecoder(r).Decode(v) } diff --git a/vendor/github.com/BurntSushi/toml/doc.go b/vendor/github.com/BurntSushi/toml/doc.go index 099c4a77d2d3..82c90a905701 100644 --- a/vendor/github.com/BurntSushi/toml/doc.go +++ b/vendor/github.com/BurntSushi/toml/doc.go @@ -1,13 +1,8 @@ -/* -Package toml implements decoding and encoding of TOML files. - -This package supports TOML v1.0.0, as listed on https://toml.io - -There is also support for delaying decoding with the Primitive type, and -querying the set of keys in a TOML document with the MetaData type. - -The github.com/BurntSushi/toml/cmd/tomlv package implements a TOML validator, -and can be used to verify if TOML document is valid. It can also be used to -print the type of each key. -*/ +// Package toml implements decoding and encoding of TOML files. +// +// This package supports TOML v1.0.0, as specified at https://toml.io +// +// The github.com/BurntSushi/toml/cmd/tomlv package implements a TOML validator, +// and can be used to verify if TOML document is valid. It can also be used to +// print the type of each key. package toml diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go index dee4e6d31961..73366c0d9a98 100644 --- a/vendor/github.com/BurntSushi/toml/encode.go +++ b/vendor/github.com/BurntSushi/toml/encode.go @@ -2,7 +2,9 @@ package toml import ( "bufio" + "bytes" "encoding" + "encoding/json" "errors" "fmt" "io" @@ -63,18 +65,38 @@ var dblQuotedReplacer = strings.NewReplacer( "\x7f", `\u007f`, ) +var ( + marshalToml = reflect.TypeOf((*Marshaler)(nil)).Elem() + marshalText = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() + timeType = reflect.TypeOf((*time.Time)(nil)).Elem() +) + // Marshaler is the interface implemented by types that can marshal themselves // into valid TOML. type Marshaler interface { MarshalTOML() ([]byte, error) } +// Marshal returns a TOML representation of the Go value. +// +// See [Encoder] for a description of the encoding process. +func Marshal(v any) ([]byte, error) { + buff := new(bytes.Buffer) + if err := NewEncoder(buff).Encode(v); err != nil { + return nil, err + } + return buff.Bytes(), nil +} + // Encoder encodes a Go to a TOML document. // // The mapping between Go values and TOML values should be precisely the same as -// for the Decode* functions. +// for [Decode]. +// +// time.Time is encoded as a RFC 3339 string, and time.Duration as its string +// representation. // -// The toml.Marshaler and encoder.TextMarshaler interfaces are supported to +// The [Marshaler] and [encoding.TextMarshaler] interfaces are supported to // encoding the value as custom TOML. // // If you want to write arbitrary binary data then you will need to use @@ -85,6 +107,17 @@ type Marshaler interface { // // Go maps will be sorted alphabetically by key for deterministic output. // +// The toml struct tag can be used to provide the key name; if omitted the +// struct field name will be used. If the "omitempty" option is present the +// following value will be skipped: +// +// - arrays, slices, maps, and string with len of 0 +// - struct with all zero values +// - bool false +// +// If omitzero is given all int and float types with a value of 0 will be +// skipped. +// // Encoding Go values without a corresponding TOML representation will return an // error. Examples of this includes maps with non-string keys, slices with nil // elements, embedded non-struct types, and nested slices containing maps or @@ -94,28 +127,24 @@ type Marshaler interface { // NOTE: only exported keys are encoded due to the use of reflection. Unexported // keys are silently discarded. type Encoder struct { - // String to use for a single indentation level; default is two spaces. - Indent string - + Indent string // string for a single indentation level; default is two spaces. + hasWritten bool // written any output to w yet? w *bufio.Writer - hasWritten bool // written any output to w yet? } // NewEncoder create a new Encoder. func NewEncoder(w io.Writer) *Encoder { - return &Encoder{ - w: bufio.NewWriter(w), - Indent: " ", - } + return &Encoder{w: bufio.NewWriter(w), Indent: " "} } -// Encode writes a TOML representation of the Go value to the Encoder's writer. +// Encode writes a TOML representation of the Go value to the [Encoder]'s writer. // // An error is returned if the value given cannot be encoded to a valid TOML // document. -func (enc *Encoder) Encode(v interface{}) error { +func (enc *Encoder) Encode(v any) error { rv := eindirect(reflect.ValueOf(v)) - if err := enc.safeEncode(Key([]string{}), rv); err != nil { + err := enc.safeEncode(Key([]string{}), rv) + if err != nil { return err } return enc.w.Flush() @@ -136,18 +165,15 @@ func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) { } func (enc *Encoder) encode(key Key, rv reflect.Value) { - // Special case: time needs to be in ISO8601 format. - // - // Special case: if we can marshal the type to text, then we used that. This - // prevents the encoder for handling these types as generic structs (or - // whatever the underlying type of a TextMarshaler is). - switch t := rv.Interface().(type) { - case time.Time, encoding.TextMarshaler, Marshaler: + // If we can marshal the type to text, then we use that. This prevents the + // encoder for handling these types as generic structs (or whatever the + // underlying type of a TextMarshaler is). + switch { + case isMarshaler(rv): enc.writeKeyValue(key, rv, false) return - // TODO: #76 would make this superfluous after implemented. - case Primitive: - enc.encode(key, reflect.ValueOf(t.undecoded)) + case rv.Type() == primitiveType: // TODO: #76 would make this superfluous after implemented. + enc.encode(key, reflect.ValueOf(rv.Interface().(Primitive).undecoded)) return } @@ -212,18 +238,44 @@ func (enc *Encoder) eElement(rv reflect.Value) { if err != nil { encPanic(err) } - enc.writeQuoted(string(s)) + if s == nil { + encPanic(errors.New("MarshalTOML returned nil and no error")) + } + enc.w.Write(s) return case encoding.TextMarshaler: s, err := v.MarshalText() if err != nil { encPanic(err) } + if s == nil { + encPanic(errors.New("MarshalText returned nil and no error")) + } enc.writeQuoted(string(s)) return + case time.Duration: + enc.writeQuoted(v.String()) + return + case json.Number: + n, _ := rv.Interface().(json.Number) + + if n == "" { /// Useful zero value. + enc.w.WriteByte('0') + return + } else if v, err := n.Int64(); err == nil { + enc.eElement(reflect.ValueOf(v)) + return + } else if v, err := n.Float64(); err == nil { + enc.eElement(reflect.ValueOf(v)) + return + } + encPanic(fmt.Errorf("unable to convert %q to int64 or float64", n)) } switch rv.Kind() { + case reflect.Ptr: + enc.eElement(rv.Elem()) + return case reflect.String: enc.writeQuoted(rv.String()) case reflect.Bool: @@ -235,18 +287,30 @@ func (enc *Encoder) eElement(rv reflect.Value) { case reflect.Float32: f := rv.Float() if math.IsNaN(f) { + if math.Signbit(f) { + enc.wf("-") + } enc.wf("nan") } else if math.IsInf(f, 0) { - enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)]) + if math.Signbit(f) { + enc.wf("-") + } + enc.wf("inf") } else { enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 32))) } case reflect.Float64: f := rv.Float() if math.IsNaN(f) { + if math.Signbit(f) { + enc.wf("-") + } enc.wf("nan") } else if math.IsInf(f, 0) { - enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)]) + if math.Signbit(f) { + enc.wf("-") + } + enc.wf("inf") } else { enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 64))) } @@ -259,7 +323,7 @@ func (enc *Encoder) eElement(rv reflect.Value) { case reflect.Interface: enc.eElement(rv.Elem()) default: - encPanic(fmt.Errorf("unexpected primitive type: %T", rv.Interface())) + encPanic(fmt.Errorf("unexpected type: %s", fmtType(rv.Interface()))) } } @@ -280,7 +344,7 @@ func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) { length := rv.Len() enc.wf("[") for i := 0; i < length; i++ { - elem := rv.Index(i) + elem := eindirect(rv.Index(i)) enc.eElement(elem) if i != length-1 { enc.wf(", ") @@ -294,7 +358,7 @@ func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) { encPanic(errNoKey) } for i := 0; i < rv.Len(); i++ { - trv := rv.Index(i) + trv := eindirect(rv.Index(i)) if isNil(trv) { continue } @@ -319,7 +383,7 @@ func (enc *Encoder) eTable(key Key, rv reflect.Value) { } func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value, inline bool) { - switch rv := eindirect(rv); rv.Kind() { + switch rv.Kind() { case reflect.Map: enc.eMap(key, rv, inline) case reflect.Struct: @@ -341,7 +405,7 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) { var mapKeysDirect, mapKeysSub []string for _, mapKey := range rv.MapKeys() { k := mapKey.String() - if typeIsTable(tomlTypeOfGo(rv.MapIndex(mapKey))) { + if typeIsTable(tomlTypeOfGo(eindirect(rv.MapIndex(mapKey)))) { mapKeysSub = append(mapKeysSub, k) } else { mapKeysDirect = append(mapKeysDirect, k) @@ -351,7 +415,7 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) { var writeMapKeys = func(mapKeys []string, trailC bool) { sort.Strings(mapKeys) for i, mapKey := range mapKeys { - val := rv.MapIndex(reflect.ValueOf(mapKey)) + val := eindirect(rv.MapIndex(reflect.ValueOf(mapKey))) if isNil(val) { continue } @@ -379,6 +443,13 @@ func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) { const is32Bit = (32 << (^uint(0) >> 63)) == 32 +func pointerTo(t reflect.Type) reflect.Type { + if t.Kind() == reflect.Ptr { + return pointerTo(t.Elem()) + } + return t +} + func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { // Write keys for fields directly under this key first, because if we write // a field that creates a new table then all keys under it will be in that @@ -395,48 +466,42 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { addFields = func(rt reflect.Type, rv reflect.Value, start []int) { for i := 0; i < rt.NumField(); i++ { f := rt.Field(i) - if f.PkgPath != "" && !f.Anonymous { /// Skip unexported fields. + isEmbed := f.Anonymous && pointerTo(f.Type).Kind() == reflect.Struct + if f.PkgPath != "" && !isEmbed { /// Skip unexported fields. + continue + } + opts := getOptions(f.Tag) + if opts.skip { continue } - frv := rv.Field(i) + frv := eindirect(rv.Field(i)) + + if is32Bit { + // Copy so it works correct on 32bit archs; not clear why this + // is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4 + // This also works fine on 64bit, but 32bit archs are somewhat + // rare and this is a wee bit faster. + copyStart := make([]int, len(start)) + copy(copyStart, start) + start = copyStart + } // Treat anonymous struct fields with tag names as though they are // not anonymous, like encoding/json does. // // Non-struct anonymous fields use the normal encoding logic. - if f.Anonymous { - t := f.Type - switch t.Kind() { - case reflect.Struct: - if getOptions(f.Tag).name == "" { - addFields(t, frv, append(start, f.Index...)) - continue - } - case reflect.Ptr: - if t.Elem().Kind() == reflect.Struct && getOptions(f.Tag).name == "" { - if !frv.IsNil() { - addFields(t.Elem(), frv.Elem(), append(start, f.Index...)) - } - continue - } + if isEmbed { + if getOptions(f.Tag).name == "" && frv.Kind() == reflect.Struct { + addFields(frv.Type(), frv, append(start, f.Index...)) + continue } } if typeIsTable(tomlTypeOfGo(frv)) { fieldsSub = append(fieldsSub, append(start, f.Index...)) } else { - // Copy so it works correct on 32bit archs; not clear why this - // is needed. See #314, and https://www.reddit.com/r/golang/comments/pnx8v4 - // This also works fine on 64bit, but 32bit archs are somewhat - // rare and this is a wee bit faster. - if is32Bit { - copyStart := make([]int, len(start)) - copy(copyStart, start) - fieldsDirect = append(fieldsDirect, append(copyStart, f.Index...)) - } else { - fieldsDirect = append(fieldsDirect, append(start, f.Index...)) - } + fieldsDirect = append(fieldsDirect, append(start, f.Index...)) } } } @@ -447,21 +512,25 @@ func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { fieldType := rt.FieldByIndex(fieldIndex) fieldVal := rv.FieldByIndex(fieldIndex) - if isNil(fieldVal) { /// Don't write anything for nil fields. + opts := getOptions(fieldType.Tag) + if opts.skip { + continue + } + if opts.omitempty && isEmpty(fieldVal) { continue } - opts := getOptions(fieldType.Tag) - if opts.skip { + fieldVal = eindirect(fieldVal) + + if isNil(fieldVal) { /// Don't write anything for nil fields. continue } + keyName := fieldType.Name if opts.name != "" { keyName = opts.name } - if opts.omitempty && isEmpty(fieldVal) { - continue - } + if opts.omitzero && isZero(fieldVal) { continue } @@ -498,6 +567,21 @@ func tomlTypeOfGo(rv reflect.Value) tomlType { if isNil(rv) || !rv.IsValid() { return nil } + + if rv.Kind() == reflect.Struct { + if rv.Type() == timeType { + return tomlDatetime + } + if isMarshaler(rv) { + return tomlString + } + return tomlHash + } + + if isMarshaler(rv) { + return tomlString + } + switch rv.Kind() { case reflect.Bool: return tomlBool @@ -509,7 +593,7 @@ func tomlTypeOfGo(rv reflect.Value) tomlType { case reflect.Float32, reflect.Float64: return tomlFloat case reflect.Array, reflect.Slice: - if typeEqual(tomlHash, tomlArrayType(rv)) { + if isTableArray(rv) { return tomlArrayHash } return tomlArray @@ -519,67 +603,35 @@ func tomlTypeOfGo(rv reflect.Value) tomlType { return tomlString case reflect.Map: return tomlHash - case reflect.Struct: - if _, ok := rv.Interface().(time.Time); ok { - return tomlDatetime - } - if isMarshaler(rv) { - return tomlString - } - return tomlHash default: - if isMarshaler(rv) { - return tomlString - } - encPanic(errors.New("unsupported type: " + rv.Kind().String())) panic("unreachable") } } func isMarshaler(rv reflect.Value) bool { - switch rv.Interface().(type) { - case encoding.TextMarshaler: - return true - case Marshaler: - return true - } - - // Someone used a pointer receiver: we can make it work for pointer values. - if rv.CanAddr() { - if _, ok := rv.Addr().Interface().(encoding.TextMarshaler); ok { - return true - } - if _, ok := rv.Addr().Interface().(Marshaler); ok { - return true - } - } - return false + return rv.Type().Implements(marshalText) || rv.Type().Implements(marshalToml) } -// tomlArrayType returns the element type of a TOML array. The type returned -// may be nil if it cannot be determined (e.g., a nil slice or a zero length -// slize). This function may also panic if it finds a type that cannot be -// expressed in TOML (such as nil elements, heterogeneous arrays or directly -// nested arrays of tables). -func tomlArrayType(rv reflect.Value) tomlType { - if isNil(rv) || !rv.IsValid() || rv.Len() == 0 { - return nil +// isTableArray reports if all entries in the array or slice are a table. +func isTableArray(arr reflect.Value) bool { + if isNil(arr) || !arr.IsValid() || arr.Len() == 0 { + return false } - /// Don't allow nil. - rvlen := rv.Len() - for i := 1; i < rvlen; i++ { - if tomlTypeOfGo(rv.Index(i)) == nil { + ret := true + for i := 0; i < arr.Len(); i++ { + tt := tomlTypeOfGo(eindirect(arr.Index(i))) + // Don't allow nil. + if tt == nil { encPanic(errArrayNilElement) } - } - firstType := tomlTypeOfGo(rv.Index(0)) - if firstType == nil { - encPanic(errArrayNilElement) + if ret && !typeEqual(tomlHash, tt) { + ret = false + } } - return firstType + return ret } type tagOptions struct { @@ -624,8 +676,26 @@ func isEmpty(rv reflect.Value) bool { switch rv.Kind() { case reflect.Array, reflect.Slice, reflect.Map, reflect.String: return rv.Len() == 0 + case reflect.Struct: + if rv.Type().Comparable() { + return reflect.Zero(rv.Type()).Interface() == rv.Interface() + } + // Need to also check if all the fields are empty, otherwise something + // like this with uncomparable types will always return true: + // + // type a struct{ field b } + // type b struct{ s []string } + // s := a{field: b{s: []string{"AAA"}}} + for i := 0; i < rv.NumField(); i++ { + if !isEmpty(rv.Field(i)) { + return false + } + } + return true case reflect.Bool: return !rv.Bool() + case reflect.Ptr: + return rv.IsNil() } return false } @@ -638,19 +708,21 @@ func (enc *Encoder) newline() { // Write a key/value pair: // -// key = +// key = // // This is also used for "k = v" in inline tables; so something like this will // be written in three calls: // -// ā”Œā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā” -// ā”‚ ā”Œā”€ā”€ā”€ā” ā”Œā”€ā”€ā”€ā”€ā”€ā”ā”‚ -// v v v v vv -// key = {k = v, k2 = v2} -// +// ā”Œā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā” +// ā”‚ ā”Œā”€ā”€ā”€ā” ā”Œā”€ā”€ā”€ā”€ā”ā”‚ +// v v v v vv +// key = {k = 1, k2 = 2} func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) { + /// Marshaler used on top-level document; call eElement() to just call + /// Marshal{TOML,Text}. if len(key) == 0 { - encPanic(errNoKey) + enc.eElement(val) + return } enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1)) enc.eElement(val) @@ -659,7 +731,7 @@ func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) { } } -func (enc *Encoder) wf(format string, v ...interface{}) { +func (enc *Encoder) wf(format string, v ...any) { _, err := fmt.Fprintf(enc.w, format, v...) if err != nil { encPanic(err) @@ -675,13 +747,25 @@ func encPanic(err error) { panic(tomlEncodeError{err}) } +// Resolve any level of pointers to the actual value (e.g. **string ā†’ string). func eindirect(v reflect.Value) reflect.Value { - switch v.Kind() { - case reflect.Ptr, reflect.Interface: - return eindirect(v.Elem()) - default: + if v.Kind() != reflect.Ptr && v.Kind() != reflect.Interface { + if isMarshaler(v) { + return v + } + if v.CanAddr() { /// Special case for marshalers; see #358. + if pv := v.Addr(); isMarshaler(pv) { + return pv + } + } return v } + + if v.IsNil() { + return v + } + + return eindirect(v.Elem()) } func isNil(rv reflect.Value) bool { diff --git a/vendor/github.com/BurntSushi/toml/error.go b/vendor/github.com/BurntSushi/toml/error.go index 36edc46554ed..b45a3f45f68a 100644 --- a/vendor/github.com/BurntSushi/toml/error.go +++ b/vendor/github.com/BurntSushi/toml/error.go @@ -5,57 +5,60 @@ import ( "strings" ) -// ParseError is returned when there is an error parsing the TOML syntax. -// -// For example invalid syntax, duplicate keys, etc. +// ParseError is returned when there is an error parsing the TOML syntax such as +// invalid syntax, duplicate keys, etc. // // In addition to the error message itself, you can also print detailed location -// information with context by using ErrorWithLocation(): +// information with context by using [ErrorWithPosition]: // -// toml: error: Key 'fruit' was already created and cannot be used as an array. +// toml: error: Key 'fruit' was already created and cannot be used as an array. // -// At line 4, column 2-7: +// At line 4, column 2-7: // -// 2 | fruit = [] -// 3 | -// 4 | [[fruit]] # Not allowed -// ^^^^^ +// 2 | fruit = [] +// 3 | +// 4 | [[fruit]] # Not allowed +// ^^^^^ // -// Furthermore, the ErrorWithUsage() can be used to print the above with some -// more detailed usage guidance: +// [ErrorWithUsage] can be used to print the above with some more detailed usage +// guidance: // -// toml: error: newlines not allowed within inline tables +// toml: error: newlines not allowed within inline tables // -// At line 1, column 18: +// At line 1, column 18: // -// 1 | x = [{ key = 42 # -// ^ +// 1 | x = [{ key = 42 # +// ^ // -// Error help: +// Error help: // -// Inline tables must always be on a single line: +// Inline tables must always be on a single line: // -// table = {key = 42, second = 43} +// table = {key = 42, second = 43} // -// It is invalid to split them over multiple lines like so: +// It is invalid to split them over multiple lines like so: // -// # INVALID -// table = { -// key = 42, -// second = 43 -// } +// # INVALID +// table = { +// key = 42, +// second = 43 +// } // -// Use regular for this: +// Use regular for this: // -// [table] -// key = 42 -// second = 43 +// [table] +// key = 42 +// second = 43 type ParseError struct { Message string // Short technical message. Usage string // Longer message with usage guidance; may be blank. Position Position // Position of the error LastKey string // Last parsed key, may be blank. - Line int // Line the error occurred. Deprecated: use Position. + + // Line the error occurred. + // + // Deprecated: use [Position]. + Line int err error input string @@ -81,9 +84,9 @@ func (pe ParseError) Error() string { pe.Position.Line, pe.LastKey, msg) } -// ErrorWithUsage() returns the error with detailed location context. +// ErrorWithPosition returns the error with detailed location context. // -// See the documentation on ParseError. +// See the documentation on [ParseError]. func (pe ParseError) ErrorWithPosition() string { if pe.input == "" { // Should never happen, but just in case. return pe.Error() @@ -111,26 +114,39 @@ func (pe ParseError) ErrorWithPosition() string { msg, pe.Position.Line, col, col+pe.Position.Len) } if pe.Position.Line > 2 { - fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-2, lines[pe.Position.Line-3]) + fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-2, expandTab(lines[pe.Position.Line-3])) } if pe.Position.Line > 1 { - fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-1, lines[pe.Position.Line-2]) + fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line-1, expandTab(lines[pe.Position.Line-2])) } - fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line, lines[pe.Position.Line-1]) - fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", col), strings.Repeat("^", pe.Position.Len)) + + /// Expand tabs, so that the ^^^s are at the correct position, but leave + /// "column 10-13" intact. Adjusting this to the visual column would be + /// better, but we don't know the tabsize of the user in their editor, which + /// can be 8, 4, 2, or something else. We can't know. So leaving it as the + /// character index is probably the "most correct". + expanded := expandTab(lines[pe.Position.Line-1]) + diff := len(expanded) - len(lines[pe.Position.Line-1]) + + fmt.Fprintf(b, "% 7d | %s\n", pe.Position.Line, expanded) + fmt.Fprintf(b, "% 10s%s%s\n", "", strings.Repeat(" ", col+diff), strings.Repeat("^", pe.Position.Len)) return b.String() } -// ErrorWithUsage() returns the error with detailed location context and usage +// ErrorWithUsage returns the error with detailed location context and usage // guidance. // -// See the documentation on ParseError. +// See the documentation on [ParseError]. func (pe ParseError) ErrorWithUsage() string { m := pe.ErrorWithPosition() if u, ok := pe.err.(interface{ Usage() string }); ok && u.Usage() != "" { - return m + "Error help:\n\n " + - strings.ReplaceAll(strings.TrimSpace(u.Usage()), "\n", "\n ") + - "\n" + lines := strings.Split(strings.TrimSpace(u.Usage()), "\n") + for i := range lines { + if lines[i] != "" { + lines[i] = " " + lines[i] + } + } + return m + "Error help:\n\n" + strings.Join(lines, "\n") + "\n" } return m } @@ -152,14 +168,49 @@ func (pe ParseError) column(lines []string) int { return col } +func expandTab(s string) string { + var ( + b strings.Builder + l int + fill = func(n int) string { + b := make([]byte, n) + for i := range b { + b[i] = ' ' + } + return string(b) + } + ) + b.Grow(len(s)) + for _, r := range s { + switch r { + case '\t': + tw := 8 - l%8 + b.WriteString(fill(tw)) + l += tw + default: + b.WriteRune(r) + l += 1 + } + } + return b.String() +} + type ( errLexControl struct{ r rune } errLexEscape struct{ r rune } errLexUTF8 struct{ b byte } - errLexInvalidNum struct{ v string } - errLexInvalidDate struct{ v string } + errParseDate struct{ v string } errLexInlineTableNL struct{} errLexStringNL struct{} + errParseRange struct { + i any // int or float + size string // "int64", "uint16", etc. + } + errUnsafeFloat struct { + i interface{} // float32 or float64 + size string // "float32" or "float64" + } + errParseDuration struct{ d string } ) func (e errLexControl) Error() string { @@ -171,14 +222,20 @@ func (e errLexEscape) Error() string { return fmt.Sprintf(`invalid escape func (e errLexEscape) Usage() string { return usageEscape } func (e errLexUTF8) Error() string { return fmt.Sprintf("invalid UTF-8 byte: 0x%02x", e.b) } func (e errLexUTF8) Usage() string { return "" } -func (e errLexInvalidNum) Error() string { return fmt.Sprintf("invalid number: %q", e.v) } -func (e errLexInvalidNum) Usage() string { return "" } -func (e errLexInvalidDate) Error() string { return fmt.Sprintf("invalid date: %q", e.v) } -func (e errLexInvalidDate) Usage() string { return "" } +func (e errParseDate) Error() string { return fmt.Sprintf("invalid datetime: %q", e.v) } +func (e errParseDate) Usage() string { return usageDate } func (e errLexInlineTableNL) Error() string { return "newlines not allowed within inline tables" } func (e errLexInlineTableNL) Usage() string { return usageInlineNewline } func (e errLexStringNL) Error() string { return "strings cannot contain newlines" } func (e errLexStringNL) Usage() string { return usageStringNewline } +func (e errParseRange) Error() string { return fmt.Sprintf("%v is out of range for %s", e.i, e.size) } +func (e errParseRange) Usage() string { return usageIntOverflow } +func (e errUnsafeFloat) Error() string { + return fmt.Sprintf("%v is out of the safe %s range", e.i, e.size) +} +func (e errUnsafeFloat) Usage() string { return usageUnsafeFloat } +func (e errParseDuration) Error() string { return fmt.Sprintf("invalid duration: %q", e.d) } +func (e errParseDuration) Usage() string { return usageDuration } const usageEscape = ` A '\' inside a "-delimited string is interpreted as an escape character. @@ -227,3 +284,73 @@ Instead use """ or ''' to split strings over multiple lines: string = """Hello, world!""" ` + +const usageIntOverflow = ` +This number is too large; this may be an error in the TOML, but it can also be a +bug in the program that uses too small of an integer. + +The maximum and minimum values are: + + size ā”‚ lowest ā”‚ highest + ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”¼ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”¼ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ā”€ + int8 ā”‚ -128 ā”‚ 127 + int16 ā”‚ -32,768 ā”‚ 32,767 + int32 ā”‚ -2,147,483,648 ā”‚ 2,147,483,647 + int64 ā”‚ -9.2 Ɨ 10Ā¹ā· ā”‚ 9.2 Ɨ 10Ā¹ā· + uint8 ā”‚ 0 ā”‚ 255 + uint16 ā”‚ 0 ā”‚ 65,535 + uint32 ā”‚ 0 ā”‚ 4,294,967,295 + uint64 ā”‚ 0 ā”‚ 1.8 Ɨ 10Ā¹āø + +int refers to int32 on 32-bit systems and int64 on 64-bit systems. +` + +const usageUnsafeFloat = ` +This number is outside of the "safe" range for floating point numbers; whole +(non-fractional) numbers outside the below range can not always be represented +accurately in a float, leading to some loss of accuracy. + +Explicitly mark a number as a fractional unit by adding ".0", which will incur +some loss of accuracy; for example: + + f = 2_000_000_000.0 + +Accuracy ranges: + + float32 = 16,777,215 + float64 = 9,007,199,254,740,991 +` + +const usageDuration = ` +A duration must be as "number", without any spaces. Valid units are: + + ns nanoseconds (billionth of a second) + us, Āµs microseconds (millionth of a second) + ms milliseconds (thousands of a second) + s seconds + m minutes + h hours + +You can combine multiple units; for example "5m10s" for 5 minutes and 10 +seconds. +` + +const usageDate = ` +A TOML datetime must be in one of the following formats: + + 2006-01-02T15:04:05Z07:00 Date and time, with timezone. + 2006-01-02T15:04:05 Date and time, but without timezone. + 2006-01-02 Date without a time or timezone. + 15:04:05 Just a time, without any timezone. + +Seconds may optionally have a fraction, up to nanosecond precision: + + 15:04:05.123 + 15:04:05.856018510 +` + +// TOML 1.1: +// The seconds part in times is optional, and may be omitted: +// 2006-01-02T15:04Z07:00 +// 2006-01-02T15:04 +// 15:04 diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go index 63ef20f47453..a1016d98a8ec 100644 --- a/vendor/github.com/BurntSushi/toml/lex.go +++ b/vendor/github.com/BurntSushi/toml/lex.go @@ -17,6 +17,7 @@ const ( itemEOF itemText itemString + itemStringEsc itemRawString itemMultilineString itemRawMultilineString @@ -46,12 +47,14 @@ func (p Position) String() string { } type lexer struct { - input string - start int - pos int - line int - state stateFn - items chan item + input string + start int + pos int + line int + state stateFn + items chan item + tomlNext bool + esc bool // Allow for backing up up to 4 runes. This is necessary because TOML // contains 3-rune tokens (""" and '''). @@ -82,18 +85,19 @@ func (lx *lexer) nextItem() item { return item default: lx.state = lx.state(lx) - //fmt.Printf(" STATE %-24s current: %-10q stack: %s\n", lx.state, lx.current(), lx.stack) + //fmt.Printf(" STATE %-24s current: %-10s stack: %s\n", lx.state, lx.current(), lx.stack) } } } -func lex(input string) *lexer { +func lex(input string, tomlNext bool) *lexer { lx := &lexer{ - input: input, - state: lexTop, - items: make(chan item, 10), - stack: make([]stateFn, 0, 10), - line: 1, + input: input, + state: lexTop, + items: make(chan item, 10), + stack: make([]stateFn, 0, 10), + line: 1, + tomlNext: tomlNext, } return lx } @@ -128,6 +132,11 @@ func (lx lexer) getPos() Position { } func (lx *lexer) emit(typ itemType) { + // Needed for multiline strings ending with an incomplete UTF-8 sequence. + if lx.start > lx.pos { + lx.error(errLexUTF8{lx.input[lx.pos]}) + return + } lx.items <- item{typ: typ, pos: lx.getPos(), val: lx.current()} lx.start = lx.pos } @@ -157,7 +166,7 @@ func (lx *lexer) next() (r rune) { } r, w := utf8.DecodeRuneInString(lx.input[lx.pos:]) - if r == utf8.RuneError { + if r == utf8.RuneError && w == 1 { lx.error(errLexUTF8{lx.input[lx.pos]}) return utf8.RuneError } @@ -263,7 +272,7 @@ func (lx *lexer) errorPos(start, length int, err error) stateFn { } // errorf is like error, and creates a new error. -func (lx *lexer) errorf(format string, values ...interface{}) stateFn { +func (lx *lexer) errorf(format string, values ...any) stateFn { if lx.atEOF { pos := lx.getPos() pos.Line-- @@ -326,9 +335,7 @@ func lexTopEnd(lx *lexer) stateFn { lx.emit(itemEOF) return nil } - return lx.errorf( - "expected a top-level item to end with a newline, comment, or EOF, but got %q instead", - r) + return lx.errorf("expected a top-level item to end with a newline, comment, or EOF, but got %q instead", r) } // lexTable lexes the beginning of a table. Namely, it makes sure that @@ -403,7 +410,7 @@ func lexTableNameEnd(lx *lexer) stateFn { // Lexes only one part, e.g. only 'a' inside 'a.b'. func lexBareName(lx *lexer) stateFn { r := lx.next() - if isBareKeyChar(r) { + if isBareKeyChar(r, lx.tomlNext) { return lexBareName } lx.backup() @@ -613,6 +620,9 @@ func lexInlineTableValue(lx *lexer) stateFn { case isWhitespace(r): return lexSkip(lx, lexInlineTableValue) case isNL(r): + if lx.tomlNext { + return lexSkip(lx, lexInlineTableValue) + } return lx.errorPrevLine(errLexInlineTableNL{}) case r == '#': lx.push(lexInlineTableValue) @@ -635,6 +645,9 @@ func lexInlineTableValueEnd(lx *lexer) stateFn { case isWhitespace(r): return lexSkip(lx, lexInlineTableValueEnd) case isNL(r): + if lx.tomlNext { + return lexSkip(lx, lexInlineTableValueEnd) + } return lx.errorPrevLine(errLexInlineTableNL{}) case r == '#': lx.push(lexInlineTableValueEnd) @@ -643,6 +656,9 @@ func lexInlineTableValueEnd(lx *lexer) stateFn { lx.ignore() lx.skip(isWhitespace) if lx.peek() == '}' { + if lx.tomlNext { + return lexInlineTableValueEnd + } return lx.errorf("trailing comma not allowed in inline tables") } return lexInlineTableValue @@ -682,7 +698,12 @@ func lexString(lx *lexer) stateFn { return lexStringEscape case r == '"': lx.backup() - lx.emit(itemString) + if lx.esc { + lx.esc = false + lx.emit(itemStringEsc) + } else { + lx.emit(itemString) + } lx.next() lx.ignore() return lx.pop() @@ -711,7 +732,17 @@ func lexMultilineString(lx *lexer) stateFn { if lx.peek() == '"' { /// Check if we already lexed 5 's; if so we have 6 now, and /// that's just too many man! - if strings.HasSuffix(lx.current(), `"""""`) { + /// + /// Second check is for the edge case: + /// + /// two quotes allowed. + /// vv + /// """lol \"""""" + /// ^^ ^^^---- closing three + /// escaped + /// + /// But ugly, but it works + if strings.HasSuffix(lx.current(), `"""""`) && !strings.HasSuffix(lx.current(), `\"""""`) { return lx.errorf(`unexpected '""""""'`) } lx.backup() @@ -722,6 +753,7 @@ func lexMultilineString(lx *lexer) stateFn { lx.backup() /// backup: don't include the """ in the item. lx.backup() lx.backup() + lx.esc = false lx.emit(itemMultilineString) lx.next() /// Read over ''' again and discard it. lx.next() @@ -755,8 +787,8 @@ func lexRawString(lx *lexer) stateFn { } } -// lexMultilineRawString consumes a raw string. Nothing can be escaped in such -// a string. It assumes that the beginning "'''" has already been consumed and +// lexMultilineRawString consumes a raw string. Nothing can be escaped in such a +// string. It assumes that the beginning triple-' has already been consumed and // ignored. func lexMultilineRawString(lx *lexer) stateFn { r := lx.next() @@ -802,8 +834,7 @@ func lexMultilineRawString(lx *lexer) stateFn { // lexMultilineStringEscape consumes an escaped character. It assumes that the // preceding '\\' has already been consumed. func lexMultilineStringEscape(lx *lexer) stateFn { - // Handle the special case first: - if isNL(lx.next()) { + if isNL(lx.next()) { /// \ escaping newline. return lexMultilineString } lx.backup() @@ -812,8 +843,14 @@ func lexMultilineStringEscape(lx *lexer) stateFn { } func lexStringEscape(lx *lexer) stateFn { + lx.esc = true r := lx.next() switch r { + case 'e': + if !lx.tomlNext { + return lx.error(errLexEscape{r}) + } + fallthrough case 'b': fallthrough case 't': @@ -832,6 +869,11 @@ func lexStringEscape(lx *lexer) stateFn { fallthrough case '\\': return lx.pop() + case 'x': + if !lx.tomlNext { + return lx.error(errLexEscape{r}) + } + return lexHexEscape case 'u': return lexShortUnicodeEscape case 'U': @@ -840,14 +882,23 @@ func lexStringEscape(lx *lexer) stateFn { return lx.error(errLexEscape{r}) } +func lexHexEscape(lx *lexer) stateFn { + var r rune + for i := 0; i < 2; i++ { + r = lx.next() + if !isHex(r) { + return lx.errorf(`expected two hexadecimal digits after '\x', but got %q instead`, lx.current()) + } + } + return lx.pop() +} + func lexShortUnicodeEscape(lx *lexer) stateFn { var r rune for i := 0; i < 4; i++ { r = lx.next() - if !isHexadecimal(r) { - return lx.errorf( - `expected four hexadecimal digits after '\u', but got %q instead`, - lx.current()) + if !isHex(r) { + return lx.errorf(`expected four hexadecimal digits after '\u', but got %q instead`, lx.current()) } } return lx.pop() @@ -857,10 +908,8 @@ func lexLongUnicodeEscape(lx *lexer) stateFn { var r rune for i := 0; i < 8; i++ { r = lx.next() - if !isHexadecimal(r) { - return lx.errorf( - `expected eight hexadecimal digits after '\U', but got %q instead`, - lx.current()) + if !isHex(r) { + return lx.errorf(`expected eight hexadecimal digits after '\U', but got %q instead`, lx.current()) } } return lx.pop() @@ -927,7 +976,7 @@ func lexDatetime(lx *lexer) stateFn { // lexHexInteger consumes a hexadecimal integer after seeing the '0x' prefix. func lexHexInteger(lx *lexer) stateFn { r := lx.next() - if isHexadecimal(r) { + if isHex(r) { return lexHexInteger } switch r { @@ -1061,7 +1110,7 @@ func lexBaseNumberOrDate(lx *lexer) stateFn { return lexOctalInteger case 'x': r = lx.peek() - if !isHexadecimal(r) { + if !isHex(r) { lx.errorf("not a hexidecimal number: '%s%c'", lx.current(), r) } return lexHexInteger @@ -1159,7 +1208,7 @@ func (itype itemType) String() string { return "EOF" case itemText: return "Text" - case itemString, itemRawString, itemMultilineString, itemRawMultilineString: + case itemString, itemStringEsc, itemRawString, itemMultilineString, itemRawMultilineString: return "String" case itemBool: return "Bool" @@ -1192,7 +1241,7 @@ func (itype itemType) String() string { } func (item item) String() string { - return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val) + return fmt.Sprintf("(%s, %s)", item.typ, item.val) } func isWhitespace(r rune) bool { return r == '\t' || r == ' ' } @@ -1208,10 +1257,23 @@ func isControl(r rune) bool { // Control characters except \t, \r, \n func isDigit(r rune) bool { return r >= '0' && r <= '9' } func isBinary(r rune) bool { return r == '0' || r == '1' } func isOctal(r rune) bool { return r >= '0' && r <= '7' } -func isHexadecimal(r rune) bool { - return (r >= '0' && r <= '9') || (r >= 'a' && r <= 'f') || (r >= 'A' && r <= 'F') -} -func isBareKeyChar(r rune) bool { +func isHex(r rune) bool { return (r >= '0' && r <= '9') || (r|0x20 >= 'a' && r|0x20 <= 'f') } +func isBareKeyChar(r rune, tomlNext bool) bool { + if tomlNext { + return (r >= 'A' && r <= 'Z') || + (r >= 'a' && r <= 'z') || + (r >= '0' && r <= '9') || + r == '_' || r == '-' || + r == 0xb2 || r == 0xb3 || r == 0xb9 || (r >= 0xbc && r <= 0xbe) || + (r >= 0xc0 && r <= 0xd6) || (r >= 0xd8 && r <= 0xf6) || (r >= 0xf8 && r <= 0x037d) || + (r >= 0x037f && r <= 0x1fff) || + (r >= 0x200c && r <= 0x200d) || (r >= 0x203f && r <= 0x2040) || + (r >= 0x2070 && r <= 0x218f) || (r >= 0x2460 && r <= 0x24ff) || + (r >= 0x2c00 && r <= 0x2fef) || (r >= 0x3001 && r <= 0xd7ff) || + (r >= 0xf900 && r <= 0xfdcf) || (r >= 0xfdf0 && r <= 0xfffd) || + (r >= 0x10000 && r <= 0xeffff) + } + return (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9') || diff --git a/vendor/github.com/BurntSushi/toml/meta.go b/vendor/github.com/BurntSushi/toml/meta.go index 868619fb9750..e61453730040 100644 --- a/vendor/github.com/BurntSushi/toml/meta.go +++ b/vendor/github.com/BurntSushi/toml/meta.go @@ -12,10 +12,11 @@ import ( type MetaData struct { context Key // Used only during decoding. - mapping map[string]interface{} - types map[string]tomlType + keyInfo map[string]keyInfo + mapping map[string]any keys []Key decoded map[string]struct{} + data []byte // Input file; for errors. } // IsDefined reports if the key exists in the TOML data. @@ -30,12 +31,12 @@ func (md *MetaData) IsDefined(key ...string) bool { } var ( - hash map[string]interface{} + hash map[string]any ok bool - hashOrVal interface{} = md.mapping + hashOrVal any = md.mapping ) for _, k := range key { - if hash, ok = hashOrVal.(map[string]interface{}); !ok { + if hash, ok = hashOrVal.(map[string]any); !ok { return false } if hashOrVal, ok = hash[k]; !ok { @@ -50,8 +51,8 @@ func (md *MetaData) IsDefined(key ...string) bool { // Type will return the empty string if given an empty key or a key that does // not exist. Keys are case sensitive. func (md *MetaData) Type(key ...string) string { - if typ, ok := md.types[Key(key).String()]; ok { - return typ.typeString() + if ki, ok := md.keyInfo[Key(key).String()]; ok { + return ki.tomlType.typeString() } return "" } @@ -70,7 +71,7 @@ func (md *MetaData) Keys() []Key { // Undecoded returns all keys that have not been decoded in the order in which // they appear in the original TOML document. // -// This includes keys that haven't been decoded because of a Primitive value. +// This includes keys that haven't been decoded because of a [Primitive] value. // Once the Primitive value is decoded, the keys will be considered decoded. // // Also note that decoding into an empty interface will result in no decoding, @@ -88,33 +89,60 @@ func (md *MetaData) Undecoded() []Key { return undecoded } -// Key represents any TOML key, including key groups. Use (MetaData).Keys to get +// Key represents any TOML key, including key groups. Use [MetaData.Keys] to get // values of this type. type Key []string func (k Key) String() string { - ss := make([]string, len(k)) - for i := range k { - ss[i] = k.maybeQuoted(i) + // This is called quite often, so it's a bit funky to make it faster. + var b strings.Builder + b.Grow(len(k) * 25) +outer: + for i, kk := range k { + if i > 0 { + b.WriteByte('.') + } + if kk == "" { + b.WriteString(`""`) + } else { + for _, r := range kk { + // "Inline" isBareKeyChar + if !((r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9') || r == '_' || r == '-') { + b.WriteByte('"') + b.WriteString(dblQuotedReplacer.Replace(kk)) + b.WriteByte('"') + continue outer + } + } + b.WriteString(kk) + } } - return strings.Join(ss, ".") + return b.String() } func (k Key) maybeQuoted(i int) string { if k[i] == "" { return `""` } - for _, c := range k[i] { - if !isBareKeyChar(c) { - return `"` + dblQuotedReplacer.Replace(k[i]) + `"` + for _, r := range k[i] { + if (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || (r >= '0' && r <= '9') || r == '_' || r == '-' { + continue } + return `"` + dblQuotedReplacer.Replace(k[i]) + `"` } return k[i] } +// Like append(), but only increase the cap by 1. func (k Key) add(piece string) Key { + if cap(k) > len(k) { + return append(k, piece) + } newKey := make(Key, len(k)+1) copy(newKey, k) newKey[len(k)] = piece return newKey } + +func (k Key) parent() Key { return k[:len(k)-1] } // all except the last piece. +func (k Key) last() string { return k[len(k)-1] } // last piece of this key. diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go index 8269cca17016..11ac3108be3f 100644 --- a/vendor/github.com/BurntSushi/toml/parse.go +++ b/vendor/github.com/BurntSushi/toml/parse.go @@ -2,6 +2,8 @@ package toml import ( "fmt" + "math" + "os" "strconv" "strings" "time" @@ -15,14 +17,23 @@ type parser struct { context Key // Full key for the current hash in scope. currentKey string // Base key name for everything except hashes. pos Position // Current position in the TOML file. + tomlNext bool - ordered []Key // List of keys in the order that they appear in the TOML data. - mapping map[string]interface{} // Map keyname ā†’ key value. - types map[string]tomlType // Map keyname ā†’ TOML type. - implicits map[string]struct{} // Record implicit keys (e.g. "key.group.names"). + ordered []Key // List of keys in the order that they appear in the TOML data. + + keyInfo map[string]keyInfo // Map keyname ā†’ info about the TOML key. + mapping map[string]any // Map keyname ā†’ key value. + implicits map[string]struct{} // Record implicit keys (e.g. "key.group.names"). +} + +type keyInfo struct { + pos Position + tomlType tomlType } func parse(data string) (p *parser, err error) { + _, tomlNext := os.LookupEnv("BURNTSUSHI_TOML_110") + defer func() { if r := recover(); r != nil { if pErr, ok := r.(ParseError); ok { @@ -35,9 +46,13 @@ func parse(data string) (p *parser, err error) { }() // Read over BOM; do this here as the lexer calls utf8.DecodeRuneInString() - // which mangles stuff. - if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") { + // which mangles stuff. UTF-16 BOM isn't strictly valid, but some tools add + // it anyway. + if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") { // UTF-16 data = data[2:] + //lint:ignore S1017 https://github.com/dominikh/go-tools/issues/1447 + } else if strings.HasPrefix(data, "\xef\xbb\xbf") { // UTF-8 + data = data[3:] } // Examine first few bytes for NULL bytes; this probably means it's a UTF-16 @@ -57,11 +72,12 @@ func parse(data string) (p *parser, err error) { } p = &parser{ - mapping: make(map[string]interface{}), - types: make(map[string]tomlType), - lx: lex(data), + keyInfo: make(map[string]keyInfo), + mapping: make(map[string]any), + lx: lex(data, tomlNext), ordered: make([]Key, 0), implicits: make(map[string]struct{}), + tomlNext: tomlNext, } for { item := p.next() @@ -74,7 +90,16 @@ func parse(data string) (p *parser, err error) { return p, nil } -func (p *parser) panicItemf(it item, format string, v ...interface{}) { +func (p *parser) panicErr(it item, err error) { + panic(ParseError{ + err: err, + Position: it.pos, + Line: it.pos.Len, + LastKey: p.current(), + }) +} + +func (p *parser) panicItemf(it item, format string, v ...any) { panic(ParseError{ Message: fmt.Sprintf(format, v...), Position: it.pos, @@ -83,7 +108,7 @@ func (p *parser) panicItemf(it item, format string, v ...interface{}) { }) } -func (p *parser) panicf(format string, v ...interface{}) { +func (p *parser) panicf(format string, v ...any) { panic(ParseError{ Message: fmt.Sprintf(format, v...), Position: p.pos, @@ -94,7 +119,7 @@ func (p *parser) panicf(format string, v ...interface{}) { func (p *parser) next() item { it := p.lx.nextItem() - //fmt.Printf("ITEM %-18s line %-3d ā”‚ %q\n", it.typ, it.line, it.val) + //fmt.Printf("ITEM %-18s line %-3d ā”‚ %q\n", it.typ, it.pos.Line, it.val) if it.typ == itemError { if it.err != nil { panic(ParseError{ @@ -116,7 +141,7 @@ func (p *parser) nextPos() item { return it } -func (p *parser) bug(format string, v ...interface{}) { +func (p *parser) bug(format string, v ...any) { panic(fmt.Sprintf("BUG: "+format+"\n\n", v...)) } @@ -146,7 +171,7 @@ func (p *parser) topLevel(item item) { p.assertEqual(itemTableEnd, name.typ) p.addContext(key, false) - p.setType("", tomlHash) + p.setType("", tomlHash, item.pos) p.ordered = append(p.ordered, key) case itemArrayTableStart: // [[ .. ]] name := p.nextPos() @@ -158,7 +183,7 @@ func (p *parser) topLevel(item item) { p.assertEqual(itemArrayTableEnd, name.typ) p.addContext(key, true) - p.setType("", tomlArrayHash) + p.setType("", tomlArrayHash, item.pos) p.ordered = append(p.ordered, key) case itemKeyStart: // key = .. outerContext := p.context @@ -171,19 +196,21 @@ func (p *parser) topLevel(item item) { p.assertEqual(itemKeyEnd, k.typ) /// The current key is the last part. - p.currentKey = key[len(key)-1] + p.currentKey = key.last() /// All the other parts (if any) are the context; need to set each part /// as implicit. - context := key[:len(key)-1] + context := key.parent() for i := range context { p.addImplicitContext(append(p.context, context[i:i+1]...)) } + p.ordered = append(p.ordered, p.context.add(p.currentKey)) /// Set value. - val, typ := p.value(p.next(), false) - p.set(p.currentKey, val, typ) - p.ordered = append(p.ordered, p.context.add(p.currentKey)) + vItem := p.next() + val, typ := p.value(vItem, false) + p.setValue(p.currentKey, val) + p.setType(p.currentKey, typ, vItem.pos) /// Remove the context we added (preserving any context from [tbl] lines). p.context = outerContext @@ -198,7 +225,7 @@ func (p *parser) keyString(it item) string { switch it.typ { case itemText: return it.val - case itemString, itemMultilineString, + case itemString, itemStringEsc, itemMultilineString, itemRawString, itemRawMultilineString: s, _ := p.value(it, false) return s.(string) @@ -215,12 +242,14 @@ var datetimeRepl = strings.NewReplacer( // value translates an expected value from the lexer into a Go value wrapped // as an empty interface. -func (p *parser) value(it item, parentIsArray bool) (interface{}, tomlType) { +func (p *parser) value(it item, parentIsArray bool) (any, tomlType) { switch it.typ { case itemString: + return it.val, p.typeOfPrimitive(it) + case itemStringEsc: return p.replaceEscapes(it, it.val), p.typeOfPrimitive(it) case itemMultilineString: - return p.replaceEscapes(it, stripFirstNewline(stripEscapedNewlines(it.val))), p.typeOfPrimitive(it) + return p.replaceEscapes(it, p.stripEscapedNewlines(stripFirstNewline(it.val))), p.typeOfPrimitive(it) case itemRawString: return it.val, p.typeOfPrimitive(it) case itemRawMultilineString: @@ -250,7 +279,7 @@ func (p *parser) value(it item, parentIsArray bool) (interface{}, tomlType) { panic("unreachable") } -func (p *parser) valueInteger(it item) (interface{}, tomlType) { +func (p *parser) valueInteger(it item) (any, tomlType) { if !numUnderscoresOK(it.val) { p.panicItemf(it, "Invalid integer %q: underscores must be surrounded by digits", it.val) } @@ -266,7 +295,7 @@ func (p *parser) valueInteger(it item) (interface{}, tomlType) { // So mark the former as a bug but the latter as a legitimate user // error. if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange { - p.panicItemf(it, "Integer '%s' is out of the range of 64-bit signed integers.", it.val) + p.panicErr(it, errParseRange{i: it.val, size: "int64"}) } else { p.bug("Expected integer value, but got '%s'.", it.val) } @@ -274,7 +303,7 @@ func (p *parser) valueInteger(it item) (interface{}, tomlType) { return num, p.typeOfPrimitive(it) } -func (p *parser) valueFloat(it item) (interface{}, tomlType) { +func (p *parser) valueFloat(it item) (any, tomlType) { parts := strings.FieldsFunc(it.val, func(r rune) bool { switch r { case '.', 'e', 'E': @@ -298,31 +327,42 @@ func (p *parser) valueFloat(it item) (interface{}, tomlType) { p.panicItemf(it, "Invalid float %q: '.' must be followed by one or more digits", it.val) } val := strings.Replace(it.val, "_", "", -1) - if val == "+nan" || val == "-nan" { // Go doesn't support this, but TOML spec does. + signbit := false + if val == "+nan" || val == "-nan" { + signbit = val == "-nan" val = "nan" } num, err := strconv.ParseFloat(val, 64) if err != nil { if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange { - p.panicItemf(it, "Float '%s' is out of the range of 64-bit IEEE-754 floating-point numbers.", it.val) + p.panicErr(it, errParseRange{i: it.val, size: "float64"}) } else { p.panicItemf(it, "Invalid float value: %q", it.val) } } + if signbit { + num = math.Copysign(num, -1) + } return num, p.typeOfPrimitive(it) } var dtTypes = []struct { fmt string zone *time.Location + next bool }{ - {time.RFC3339Nano, time.Local}, - {"2006-01-02T15:04:05.999999999", internal.LocalDatetime}, - {"2006-01-02", internal.LocalDate}, - {"15:04:05.999999999", internal.LocalTime}, + {time.RFC3339Nano, time.Local, false}, + {"2006-01-02T15:04:05.999999999", internal.LocalDatetime, false}, + {"2006-01-02", internal.LocalDate, false}, + {"15:04:05.999999999", internal.LocalTime, false}, + + // tomlNext + {"2006-01-02T15:04Z07:00", time.Local, true}, + {"2006-01-02T15:04", internal.LocalDatetime, true}, + {"15:04", internal.LocalTime, true}, } -func (p *parser) valueDatetime(it item) (interface{}, tomlType) { +func (p *parser) valueDatetime(it item) (any, tomlType) { it.val = datetimeRepl.Replace(it.val) var ( t time.Time @@ -330,29 +370,49 @@ func (p *parser) valueDatetime(it item) (interface{}, tomlType) { err error ) for _, dt := range dtTypes { + if dt.next && !p.tomlNext { + continue + } t, err = time.ParseInLocation(dt.fmt, it.val, dt.zone) if err == nil { + if missingLeadingZero(it.val, dt.fmt) { + p.panicErr(it, errParseDate{it.val}) + } ok = true break } } if !ok { - p.panicItemf(it, "Invalid TOML Datetime: %q.", it.val) + p.panicErr(it, errParseDate{it.val}) } return t, p.typeOfPrimitive(it) } -func (p *parser) valueArray(it item) (interface{}, tomlType) { - p.setType(p.currentKey, tomlArray) +// Go's time.Parse() will accept numbers without a leading zero; there isn't any +// way to require it. https://github.com/golang/go/issues/29911 +// +// Depend on the fact that the separators (- and :) should always be at the same +// location. +func missingLeadingZero(d, l string) bool { + for i, c := range []byte(l) { + if c == '.' || c == 'Z' { + return false + } + if (c < '0' || c > '9') && d[i] != c { + return true + } + } + return false +} - // p.setType(p.currentKey, typ) - var ( - types []tomlType +func (p *parser) valueArray(it item) (any, tomlType) { + p.setType(p.currentKey, tomlArray, it.pos) - // Initialize to a non-nil empty slice. This makes it consistent with - // how S = [] decodes into a non-nil slice inside something like struct - // { S []string }. See #338 - array = []interface{}{} + var ( + // Initialize to a non-nil slice to make it consistent with how S = [] + // decodes into a non-nil slice inside something like struct { S + // []string }. See #338 + array = make([]any, 0, 2) ) for it = p.next(); it.typ != itemArrayEnd; it = p.next() { if it.typ == itemCommentStart { @@ -362,20 +422,20 @@ func (p *parser) valueArray(it item) (interface{}, tomlType) { val, typ := p.value(it, true) array = append(array, val) - types = append(types, typ) - // XXX: types isn't used here, we need it to record the accurate type + // XXX: type isn't used here, we need it to record the accurate type // information. // // Not entirely sure how to best store this; could use "key[0]", // "key[1]" notation, or maybe store it on the Array type? + _ = typ } return array, tomlArray } -func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tomlType) { +func (p *parser) valueInlineTable(it item, parentIsArray bool) (any, tomlType) { var ( - hash = make(map[string]interface{}) + topHash = make(map[string]any) outerContext = p.context outerKey = p.currentKey ) @@ -403,19 +463,33 @@ func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tom p.assertEqual(itemKeyEnd, k.typ) /// The current key is the last part. - p.currentKey = key[len(key)-1] + p.currentKey = key.last() /// All the other parts (if any) are the context; need to set each part /// as implicit. - context := key[:len(key)-1] + context := key.parent() for i := range context { p.addImplicitContext(append(p.context, context[i:i+1]...)) } + p.ordered = append(p.ordered, p.context.add(p.currentKey)) /// Set the value. val, typ := p.value(p.next(), false) - p.set(p.currentKey, val, typ) - p.ordered = append(p.ordered, p.context.add(p.currentKey)) + p.setValue(p.currentKey, val) + p.setType(p.currentKey, typ, it.pos) + + hash := topHash + for _, c := range context { + h, ok := hash[c] + if !ok { + h = make(map[string]any) + hash[c] = h + } + hash, ok = h.(map[string]any) + if !ok { + p.panicf("%q is not a table", p.context) + } + } hash[p.currentKey] = val /// Restore context. @@ -423,7 +497,7 @@ func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tom } p.context = outerContext p.currentKey = outerKey - return hash, tomlHash + return topHash, tomlHash } // numHasLeadingZero checks if this number has leading zeroes, allowing for '0', @@ -453,9 +527,9 @@ func numUnderscoresOK(s string) bool { } } - // isHexadecimal is a superset of all the permissable characters - // surrounding an underscore. - accept = isHexadecimal(r) + // isHexis a superset of all the permissable characters surrounding an + // underscore. + accept = isHex(r) } return accept } @@ -478,21 +552,19 @@ func numPeriodsOK(s string) bool { // Establishing the context also makes sure that the key isn't a duplicate, and // will create implicit hashes automatically. func (p *parser) addContext(key Key, array bool) { - var ok bool - - // Always start at the top level and drill down for our context. + /// Always start at the top level and drill down for our context. hashContext := p.mapping - keyContext := make(Key, 0) + keyContext := make(Key, 0, len(key)-1) - // We only need implicit hashes for key[0:-1] - for _, k := range key[0 : len(key)-1] { - _, ok = hashContext[k] + /// We only need implicit hashes for the parents. + for _, k := range key.parent() { + _, ok := hashContext[k] keyContext = append(keyContext, k) // No key? Make an implicit hash and move on. if !ok { p.addImplicit(keyContext) - hashContext[k] = make(map[string]interface{}) + hashContext[k] = make(map[string]any) } // If the hash context is actually an array of tables, then set @@ -501,9 +573,9 @@ func (p *parser) addContext(key Key, array bool) { // Otherwise, it better be a table, since this MUST be a key group (by // virtue of it not being the last element in a key). switch t := hashContext[k].(type) { - case []map[string]interface{}: + case []map[string]any: hashContext = t[len(t)-1] - case map[string]interface{}: + case map[string]any: hashContext = t default: p.panicf("Key '%s' was already created as a hash.", keyContext) @@ -514,39 +586,33 @@ func (p *parser) addContext(key Key, array bool) { if array { // If this is the first element for this array, then allocate a new // list of tables for it. - k := key[len(key)-1] + k := key.last() if _, ok := hashContext[k]; !ok { - hashContext[k] = make([]map[string]interface{}, 0, 4) + hashContext[k] = make([]map[string]any, 0, 4) } // Add a new table. But make sure the key hasn't already been used // for something else. - if hash, ok := hashContext[k].([]map[string]interface{}); ok { - hashContext[k] = append(hash, make(map[string]interface{})) + if hash, ok := hashContext[k].([]map[string]any); ok { + hashContext[k] = append(hash, make(map[string]any)) } else { p.panicf("Key '%s' was already created and cannot be used as an array.", key) } } else { - p.setValue(key[len(key)-1], make(map[string]interface{})) + p.setValue(key.last(), make(map[string]any)) } - p.context = append(p.context, key[len(key)-1]) -} - -// set calls setValue and setType. -func (p *parser) set(key string, val interface{}, typ tomlType) { - p.setValue(key, val) - p.setType(key, typ) + p.context = append(p.context, key.last()) } // setValue sets the given key to the given value in the current context. // It will make sure that the key hasn't already been defined, account for // implicit key groups. -func (p *parser) setValue(key string, value interface{}) { +func (p *parser) setValue(key string, value any) { var ( - tmpHash interface{} + tmpHash any ok bool hash = p.mapping - keyContext Key + keyContext = make(Key, 0, len(p.context)+1) ) for _, k := range p.context { keyContext = append(keyContext, k) @@ -554,11 +620,11 @@ func (p *parser) setValue(key string, value interface{}) { p.bug("Context for key '%s' has not been established.", keyContext) } switch t := tmpHash.(type) { - case []map[string]interface{}: + case []map[string]any: // The context is a table of hashes. Pick the most recent table // defined as the current hash. hash = t[len(t)-1] - case map[string]interface{}: + case map[string]any: hash = t default: p.panicf("Key '%s' has already been defined.", keyContext) @@ -585,9 +651,8 @@ func (p *parser) setValue(key string, value interface{}) { p.removeImplicit(keyContext) return } - - // Otherwise, we have a concrete key trying to override a previous - // key, which is *always* wrong. + // Otherwise, we have a concrete key trying to override a previous key, + // which is *always* wrong. p.panicf("Key '%s' has already been defined.", keyContext) } @@ -599,7 +664,7 @@ func (p *parser) setValue(key string, value interface{}) { // // Note that if `key` is empty, then the type given will be applied to the // current context (which is either a table or an array of tables). -func (p *parser) setType(key string, typ tomlType) { +func (p *parser) setType(key string, typ tomlType, pos Position) { keyContext := make(Key, 0, len(p.context)+1) keyContext = append(keyContext, p.context...) if len(key) > 0 { // allow type setting for hashes @@ -611,19 +676,16 @@ func (p *parser) setType(key string, typ tomlType) { if len(keyContext) == 0 { keyContext = Key{""} } - p.types[keyContext.String()] = typ + p.keyInfo[keyContext.String()] = keyInfo{tomlType: typ, pos: pos} } // Implicit keys need to be created when tables are implied in "a.b.c.d = 1" and // "[a.b.c]" (the "a", "b", and "c" hashes are never created explicitly). -func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = struct{}{} } -func (p *parser) removeImplicit(key Key) { delete(p.implicits, key.String()) } -func (p *parser) isImplicit(key Key) bool { _, ok := p.implicits[key.String()]; return ok } -func (p *parser) isArray(key Key) bool { return p.types[key.String()] == tomlArray } -func (p *parser) addImplicitContext(key Key) { - p.addImplicit(key) - p.addContext(key, false) -} +func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = struct{}{} } +func (p *parser) removeImplicit(key Key) { delete(p.implicits, key.String()) } +func (p *parser) isImplicit(key Key) bool { _, ok := p.implicits[key.String()]; return ok } +func (p *parser) isArray(key Key) bool { return p.keyInfo[key.String()].tomlType == tomlArray } +func (p *parser) addImplicitContext(key Key) { p.addImplicit(key); p.addContext(key, false) } // current returns the full key name of the current context. func (p *parser) current() string { @@ -646,112 +708,131 @@ func stripFirstNewline(s string) string { return s } -// Remove newlines inside triple-quoted strings if a line ends with "\". -func stripEscapedNewlines(s string) string { - split := strings.Split(s, "\n") - if len(split) < 1 { - return s - } - - escNL := false // Keep track of the last non-blank line was escaped. - for i, line := range split { - line = strings.TrimRight(line, " \t\r") - - if len(line) == 0 || line[len(line)-1] != '\\' { - split[i] = strings.TrimRight(split[i], "\r") - if !escNL && i != len(split)-1 { - split[i] += "\n" - } - continue +// stripEscapedNewlines removes whitespace after line-ending backslashes in +// multiline strings. +// +// A line-ending backslash is an unescaped \ followed only by whitespace until +// the next newline. After a line-ending backslash, all whitespace is removed +// until the next non-whitespace character. +func (p *parser) stripEscapedNewlines(s string) string { + var ( + b strings.Builder + i int + ) + b.Grow(len(s)) + for { + ix := strings.Index(s[i:], `\`) + if ix < 0 { + b.WriteString(s) + return b.String() } + i += ix - escBS := true - for j := len(line) - 1; j >= 0 && line[j] == '\\'; j-- { - escBS = !escBS + if len(s) > i+1 && s[i+1] == '\\' { + // Escaped backslash. + i += 2 + continue } - if escNL { - line = strings.TrimLeft(line, " \t\r") + // Scan until the next non-whitespace. + j := i + 1 + whitespaceLoop: + for ; j < len(s); j++ { + switch s[j] { + case ' ', '\t', '\r', '\n': + default: + break whitespaceLoop + } } - escNL = !escBS - - if escBS { - split[i] += "\n" + if j == i+1 { + // Not a whitespace escape. + i++ continue } - - split[i] = line[:len(line)-1] // Remove \ - if len(split)-1 > i { - split[i+1] = strings.TrimLeft(split[i+1], " \t\r") + if !strings.Contains(s[i:j], "\n") { + // This is not a line-ending backslash. (It's a bad escape sequence, + // but we can let replaceEscapes catch it.) + i++ + continue } + b.WriteString(s[:i]) + s = s[j:] + i = 0 } - return strings.Join(split, "") } func (p *parser) replaceEscapes(it item, str string) string { - replaced := make([]rune, 0, len(str)) - s := []byte(str) - r := 0 - for r < len(s) { - if s[r] != '\\' { - c, size := utf8.DecodeRune(s[r:]) - r += size - replaced = append(replaced, c) + var ( + b strings.Builder + skip = 0 + ) + b.Grow(len(str)) + for i, c := range str { + if skip > 0 { + skip-- continue } - r += 1 - if r >= len(s) { + if c != '\\' { + b.WriteRune(c) + continue + } + + if i >= len(str) { p.bug("Escape sequence at end of string.") return "" } - switch s[r] { + switch str[i+1] { default: - p.bug("Expected valid escape code after \\, but got %q.", s[r]) - return "" + p.bug("Expected valid escape code after \\, but got %q.", str[i+1]) case ' ', '\t': - p.panicItemf(it, "invalid escape: '\\%c'", s[r]) - return "" + p.panicItemf(it, "invalid escape: '\\%c'", str[i+1]) case 'b': - replaced = append(replaced, rune(0x0008)) - r += 1 + b.WriteByte(0x08) + skip = 1 case 't': - replaced = append(replaced, rune(0x0009)) - r += 1 + b.WriteByte(0x09) + skip = 1 case 'n': - replaced = append(replaced, rune(0x000A)) - r += 1 + b.WriteByte(0x0a) + skip = 1 case 'f': - replaced = append(replaced, rune(0x000C)) - r += 1 + b.WriteByte(0x0c) + skip = 1 case 'r': - replaced = append(replaced, rune(0x000D)) - r += 1 + b.WriteByte(0x0d) + skip = 1 + case 'e': + if p.tomlNext { + b.WriteByte(0x1b) + skip = 1 + } case '"': - replaced = append(replaced, rune(0x0022)) - r += 1 + b.WriteByte(0x22) + skip = 1 case '\\': - replaced = append(replaced, rune(0x005C)) - r += 1 + b.WriteByte(0x5c) + skip = 1 + // The lexer guarantees the correct number of characters are present; + // don't need to check here. + case 'x': + if p.tomlNext { + escaped := p.asciiEscapeToUnicode(it, str[i+2:i+4]) + b.WriteRune(escaped) + skip = 3 + } case 'u': - // At this point, we know we have a Unicode escape of the form - // `uXXXX` at [r, r+5). (Because the lexer guarantees this - // for us.) - escaped := p.asciiEscapeToUnicode(it, s[r+1:r+5]) - replaced = append(replaced, escaped) - r += 5 + escaped := p.asciiEscapeToUnicode(it, str[i+2:i+6]) + b.WriteRune(escaped) + skip = 5 case 'U': - // At this point, we know we have a Unicode escape of the form - // `uXXXX` at [r, r+9). (Because the lexer guarantees this - // for us.) - escaped := p.asciiEscapeToUnicode(it, s[r+1:r+9]) - replaced = append(replaced, escaped) - r += 9 + escaped := p.asciiEscapeToUnicode(it, str[i+2:i+10]) + b.WriteRune(escaped) + skip = 9 } } - return string(replaced) + return b.String() } -func (p *parser) asciiEscapeToUnicode(it item, bs []byte) rune { - s := string(bs) +func (p *parser) asciiEscapeToUnicode(it item, s string) rune { hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32) if err != nil { p.bug("Could not parse '%s' as a hexadecimal number, but the lexer claims it's OK: %s", s, err) diff --git a/vendor/github.com/BurntSushi/toml/type_fields.go b/vendor/github.com/BurntSushi/toml/type_fields.go index 254ca82e5494..10c51f7eeb41 100644 --- a/vendor/github.com/BurntSushi/toml/type_fields.go +++ b/vendor/github.com/BurntSushi/toml/type_fields.go @@ -25,10 +25,8 @@ type field struct { // breaking ties with index sequence. type byName []field -func (x byName) Len() int { return len(x) } - +func (x byName) Len() int { return len(x) } func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - func (x byName) Less(i, j int) bool { if x[i].name != x[j].name { return x[i].name < x[j].name @@ -45,10 +43,8 @@ func (x byName) Less(i, j int) bool { // byIndex sorts field by index sequence. type byIndex []field -func (x byIndex) Len() int { return len(x) } - +func (x byIndex) Len() int { return len(x) } func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - func (x byIndex) Less(i, j int) bool { for k, xik := range x[i].index { if k >= len(x[j].index) { diff --git a/vendor/github.com/BurntSushi/toml/type_toml.go b/vendor/github.com/BurntSushi/toml/type_toml.go index 4e90d77373b9..1c090d331e3f 100644 --- a/vendor/github.com/BurntSushi/toml/type_toml.go +++ b/vendor/github.com/BurntSushi/toml/type_toml.go @@ -22,13 +22,8 @@ func typeIsTable(t tomlType) bool { type tomlBaseType string -func (btype tomlBaseType) typeString() string { - return string(btype) -} - -func (btype tomlBaseType) String() string { - return btype.typeString() -} +func (btype tomlBaseType) typeString() string { return string(btype) } +func (btype tomlBaseType) String() string { return btype.typeString() } var ( tomlInteger tomlBaseType = "Integer" @@ -54,7 +49,7 @@ func (p *parser) typeOfPrimitive(lexItem item) tomlType { return tomlFloat case itemDatetime: return tomlDatetime - case itemString: + case itemString, itemStringEsc: return tomlString case itemMultilineString: return tomlString diff --git a/vendor/github.com/alessio/shellescape/.golangci.yml b/vendor/github.com/alessio/shellescape/.golangci.yml index cd4a17e442cb..836dabbba4f0 100644 --- a/vendor/github.com/alessio/shellescape/.golangci.yml +++ b/vendor/github.com/alessio/shellescape/.golangci.yml @@ -6,25 +6,20 @@ linters: disable-all: true enable: - bodyclose - - deadcode - - depguard - dogsled - goconst - gocritic - gofmt - goimports - - golint - gosec - gosimple - govet - ineffassign - - interfacer - - maligned - misspell - prealloc - - scopelint + - exportloopref + - revive - staticcheck - - structcheck - stylecheck - typecheck - unconvert diff --git a/vendor/github.com/alessio/shellescape/.goreleaser.yml b/vendor/github.com/alessio/shellescape/.goreleaser.yml index 064c9374d790..0915eb869b4b 100644 --- a/vendor/github.com/alessio/shellescape/.goreleaser.yml +++ b/vendor/github.com/alessio/shellescape/.goreleaser.yml @@ -9,18 +9,39 @@ before: builds: - env: - CGO_ENABLED=0 + - >- + {{- if eq .Os "darwin" }} + {{- if eq .Arch "amd64"}}CC=o64-clang{{- end }} + {{- if eq .Arch "arm64"}}CC=aarch64-apple-darwin20.2-clang{{- end }} + {{- end }} + {{- if eq .Os "windows" }} + {{- if eq .Arch "amd64" }}CC=x86_64-w64-mingw32-gcc{{- end }} + {{- end }} main: ./cmd/escargs goos: - linux - windows - darwin -archives: - - replacements: - darwin: Darwin - linux: Linux - windows: Windows - 386: i386 - amd64: x86_64 + - freebsd + goarch: + - amd64 + - arm64 + - arm + goarm: + - 6 + - 7 + goamd64: + - v2 + - v3 + ignore: + - goos: darwin + goarch: 386 + - goos: linux + goarch: arm + goarm: 7 + - goarm: mips64 + - gomips: hardfloat + - goamd64: v4 checksum: name_template: 'checksums.txt' snapshot: diff --git a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md index 5edd5a7ca9a3..9e790390b627 100644 --- a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md +++ b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md @@ -1,5 +1,17 @@ # Change history of go-restful + +## [v3.12.0] - 2024-03-11 +- add Flush method #529 (#538) +- fix: Improper handling of empty POST requests (#543) + +## [v3.11.3] - 2024-01-09 +- better not have 2 tags on one commit + +## [v3.11.1, v3.11.2] - 2024-01-09 + +- fix by restoring custom JSON handler functions (Mike Beaumont #540) + ## [v3.11.0] - 2023-08-19 - restored behavior as <= v3.9.0 with option to change path strategy using TrimRightSlashEnabled. diff --git a/vendor/github.com/emicklei/go-restful/v3/README.md b/vendor/github.com/emicklei/go-restful/v3/README.md index e3e30080ec18..7234604e47b8 100644 --- a/vendor/github.com/emicklei/go-restful/v3/README.md +++ b/vendor/github.com/emicklei/go-restful/v3/README.md @@ -2,7 +2,6 @@ go-restful ========== package for building REST-style Web Services using Google Go -[![Build Status](https://travis-ci.org/emicklei/go-restful.png)](https://travis-ci.org/emicklei/go-restful) [![Go Report Card](https://goreportcard.com/badge/github.com/emicklei/go-restful)](https://goreportcard.com/report/github.com/emicklei/go-restful) [![GoDoc](https://godoc.org/github.com/emicklei/go-restful?status.svg)](https://pkg.go.dev/github.com/emicklei/go-restful) [![codecov](https://codecov.io/gh/emicklei/go-restful/branch/master/graph/badge.svg)](https://codecov.io/gh/emicklei/go-restful) @@ -95,8 +94,7 @@ There are several hooks to customize the behavior of the go-restful package. - Trace logging - Compression - Encoders for other serializers -- Use [jsoniter](https://github.com/json-iterator/go) by building this package using a build tag, e.g. `go build -tags=jsoniter .` -- Use the package variable `TrimRightSlashEnabled` (default true) to control the behavior of matching routes that end with a slash `/` +- Use the package variable `TrimRightSlashEnabled` (default true) to control the behavior of matching routes that end with a slash `/` ## Resources diff --git a/vendor/github.com/emicklei/go-restful/v3/compress.go b/vendor/github.com/emicklei/go-restful/v3/compress.go index 1ff239f99fe7..80adf55fdfee 100644 --- a/vendor/github.com/emicklei/go-restful/v3/compress.go +++ b/vendor/github.com/emicklei/go-restful/v3/compress.go @@ -49,6 +49,16 @@ func (c *CompressingResponseWriter) CloseNotify() <-chan bool { return c.writer.(http.CloseNotifier).CloseNotify() } +// Flush is part of http.Flusher interface. Noop if the underlying writer doesn't support it. +func (c *CompressingResponseWriter) Flush() { + flusher, ok := c.writer.(http.Flusher) + if !ok { + // writer doesn't support http.Flusher interface + return + } + flusher.Flush() +} + // Close the underlying compressor func (c *CompressingResponseWriter) Close() error { if c.isCompressorClosed() { diff --git a/vendor/github.com/emicklei/go-restful/v3/entity_accessors.go b/vendor/github.com/emicklei/go-restful/v3/entity_accessors.go index 66dfc824f55b..9808752acdf9 100644 --- a/vendor/github.com/emicklei/go-restful/v3/entity_accessors.go +++ b/vendor/github.com/emicklei/go-restful/v3/entity_accessors.go @@ -5,11 +5,18 @@ package restful // that can be found in the LICENSE file. import ( + "encoding/json" "encoding/xml" "strings" "sync" ) +var ( + MarshalIndent = json.MarshalIndent + NewDecoder = json.NewDecoder + NewEncoder = json.NewEncoder +) + // EntityReaderWriter can read and write values using an encoding such as JSON,XML. type EntityReaderWriter interface { // Read a serialized version of the value from the request. diff --git a/vendor/github.com/emicklei/go-restful/v3/json.go b/vendor/github.com/emicklei/go-restful/v3/json.go deleted file mode 100644 index 871165166a16..000000000000 --- a/vendor/github.com/emicklei/go-restful/v3/json.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !jsoniter - -package restful - -import "encoding/json" - -var ( - MarshalIndent = json.MarshalIndent - NewDecoder = json.NewDecoder - NewEncoder = json.NewEncoder -) diff --git a/vendor/github.com/emicklei/go-restful/v3/jsoniter.go b/vendor/github.com/emicklei/go-restful/v3/jsoniter.go deleted file mode 100644 index 11b8f8ae7f17..000000000000 --- a/vendor/github.com/emicklei/go-restful/v3/jsoniter.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build jsoniter - -package restful - -import "github.com/json-iterator/go" - -var ( - json = jsoniter.ConfigCompatibleWithStandardLibrary - MarshalIndent = json.MarshalIndent - NewDecoder = json.NewDecoder - NewEncoder = json.NewEncoder -) diff --git a/vendor/github.com/emicklei/go-restful/v3/jsr311.go b/vendor/github.com/emicklei/go-restful/v3/jsr311.go index 07a0c91e9424..a9b3faaa81fa 100644 --- a/vendor/github.com/emicklei/go-restful/v3/jsr311.go +++ b/vendor/github.com/emicklei/go-restful/v3/jsr311.go @@ -155,7 +155,7 @@ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*R method, length := httpRequest.Method, httpRequest.Header.Get("Content-Length") if (method == http.MethodPost || method == http.MethodPut || - method == http.MethodPatch) && length == "" { + method == http.MethodPatch) && (length == "" || length == "0") { return nil, NewError( http.StatusUnsupportedMediaType, fmt.Sprintf("415: Unsupported Media Type\n\nAvailable representations: %s", strings.Join(available, ", ")), diff --git a/vendor/github.com/go-task/slim-sprig/.editorconfig b/vendor/github.com/go-task/slim-sprig/v3/.editorconfig similarity index 100% rename from vendor/github.com/go-task/slim-sprig/.editorconfig rename to vendor/github.com/go-task/slim-sprig/v3/.editorconfig diff --git a/vendor/github.com/go-task/slim-sprig/.gitattributes b/vendor/github.com/go-task/slim-sprig/v3/.gitattributes similarity index 100% rename from vendor/github.com/go-task/slim-sprig/.gitattributes rename to vendor/github.com/go-task/slim-sprig/v3/.gitattributes diff --git a/vendor/github.com/go-task/slim-sprig/.gitignore b/vendor/github.com/go-task/slim-sprig/v3/.gitignore similarity index 100% rename from vendor/github.com/go-task/slim-sprig/.gitignore rename to vendor/github.com/go-task/slim-sprig/v3/.gitignore diff --git a/vendor/github.com/go-task/slim-sprig/CHANGELOG.md b/vendor/github.com/go-task/slim-sprig/v3/CHANGELOG.md similarity index 95% rename from vendor/github.com/go-task/slim-sprig/CHANGELOG.md rename to vendor/github.com/go-task/slim-sprig/v3/CHANGELOG.md index 61d8ebffc375..2ce45dd4eca6 100644 --- a/vendor/github.com/go-task/slim-sprig/CHANGELOG.md +++ b/vendor/github.com/go-task/slim-sprig/v3/CHANGELOG.md @@ -1,5 +1,24 @@ # Changelog +## Release 3.2.3 (2022-11-29) + +### Changed + +- Updated docs (thanks @book987 @aJetHorn @neelayu @pellizzetti @apricote @SaigyoujiYuyuko233 @AlekSi) +- #348: Updated huandu/xstrings which fixed a snake case bug (thanks @yxxhero) +- #353: Updated masterminds/semver which included bug fixes +- #354: Updated golang.org/x/crypto which included bug fixes + +## Release 3.2.2 (2021-02-04) + +This is a re-release of 3.2.1 to satisfy something with the Go module system. + +## Release 3.2.1 (2021-02-04) + +### Changed + +- Upgraded `Masterminds/goutils` to `v1.1.1`. see the [Security Advisory](https://github.com/Masterminds/goutils/security/advisories/GHSA-xg2h-wx96-xgxr) + ## Release 3.2.0 (2020-12-14) ### Added diff --git a/vendor/github.com/go-task/slim-sprig/LICENSE.txt b/vendor/github.com/go-task/slim-sprig/v3/LICENSE.txt similarity index 100% rename from vendor/github.com/go-task/slim-sprig/LICENSE.txt rename to vendor/github.com/go-task/slim-sprig/v3/LICENSE.txt diff --git a/vendor/github.com/go-task/slim-sprig/README.md b/vendor/github.com/go-task/slim-sprig/v3/README.md similarity index 88% rename from vendor/github.com/go-task/slim-sprig/README.md rename to vendor/github.com/go-task/slim-sprig/v3/README.md index 72579471ff0e..b5ab564254f4 100644 --- a/vendor/github.com/go-task/slim-sprig/README.md +++ b/vendor/github.com/go-task/slim-sprig/v3/README.md @@ -1,4 +1,4 @@ -# Slim-Sprig: Template functions for Go templates [![GoDoc](https://godoc.org/github.com/go-task/slim-sprig?status.svg)](https://godoc.org/github.com/go-task/slim-sprig) [![Go Report Card](https://goreportcard.com/badge/github.com/go-task/slim-sprig)](https://goreportcard.com/report/github.com/go-task/slim-sprig) +# Slim-Sprig: Template functions for Go templates [![Go Reference](https://pkg.go.dev/badge/github.com/go-task/slim-sprig/v3.svg)](https://pkg.go.dev/github.com/go-task/slim-sprig/v3) Slim-Sprig is a fork of [Sprig](https://github.com/Masterminds/sprig), but with all functions that depend on external (non standard library) or crypto packages diff --git a/vendor/github.com/go-task/slim-sprig/Taskfile.yml b/vendor/github.com/go-task/slim-sprig/v3/Taskfile.yml similarity index 89% rename from vendor/github.com/go-task/slim-sprig/Taskfile.yml rename to vendor/github.com/go-task/slim-sprig/v3/Taskfile.yml index cdcfd223b719..8e6346bb19eb 100644 --- a/vendor/github.com/go-task/slim-sprig/Taskfile.yml +++ b/vendor/github.com/go-task/slim-sprig/v3/Taskfile.yml @@ -1,6 +1,6 @@ # https://taskfile.dev -version: '2' +version: '3' tasks: default: diff --git a/vendor/github.com/go-task/slim-sprig/crypto.go b/vendor/github.com/go-task/slim-sprig/v3/crypto.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/crypto.go rename to vendor/github.com/go-task/slim-sprig/v3/crypto.go diff --git a/vendor/github.com/go-task/slim-sprig/date.go b/vendor/github.com/go-task/slim-sprig/v3/date.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/date.go rename to vendor/github.com/go-task/slim-sprig/v3/date.go diff --git a/vendor/github.com/go-task/slim-sprig/defaults.go b/vendor/github.com/go-task/slim-sprig/v3/defaults.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/defaults.go rename to vendor/github.com/go-task/slim-sprig/v3/defaults.go diff --git a/vendor/github.com/go-task/slim-sprig/dict.go b/vendor/github.com/go-task/slim-sprig/v3/dict.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/dict.go rename to vendor/github.com/go-task/slim-sprig/v3/dict.go diff --git a/vendor/github.com/go-task/slim-sprig/doc.go b/vendor/github.com/go-task/slim-sprig/v3/doc.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/doc.go rename to vendor/github.com/go-task/slim-sprig/v3/doc.go diff --git a/vendor/github.com/go-task/slim-sprig/functions.go b/vendor/github.com/go-task/slim-sprig/v3/functions.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/functions.go rename to vendor/github.com/go-task/slim-sprig/v3/functions.go diff --git a/vendor/github.com/go-task/slim-sprig/list.go b/vendor/github.com/go-task/slim-sprig/v3/list.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/list.go rename to vendor/github.com/go-task/slim-sprig/v3/list.go diff --git a/vendor/github.com/go-task/slim-sprig/network.go b/vendor/github.com/go-task/slim-sprig/v3/network.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/network.go rename to vendor/github.com/go-task/slim-sprig/v3/network.go diff --git a/vendor/github.com/go-task/slim-sprig/numeric.go b/vendor/github.com/go-task/slim-sprig/v3/numeric.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/numeric.go rename to vendor/github.com/go-task/slim-sprig/v3/numeric.go diff --git a/vendor/github.com/go-task/slim-sprig/reflect.go b/vendor/github.com/go-task/slim-sprig/v3/reflect.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/reflect.go rename to vendor/github.com/go-task/slim-sprig/v3/reflect.go diff --git a/vendor/github.com/go-task/slim-sprig/regex.go b/vendor/github.com/go-task/slim-sprig/v3/regex.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/regex.go rename to vendor/github.com/go-task/slim-sprig/v3/regex.go diff --git a/vendor/github.com/go-task/slim-sprig/strings.go b/vendor/github.com/go-task/slim-sprig/v3/strings.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/strings.go rename to vendor/github.com/go-task/slim-sprig/v3/strings.go diff --git a/vendor/github.com/go-task/slim-sprig/url.go b/vendor/github.com/go-task/slim-sprig/v3/url.go similarity index 100% rename from vendor/github.com/go-task/slim-sprig/url.go rename to vendor/github.com/go-task/slim-sprig/v3/url.go diff --git a/vendor/github.com/google/pprof/profile/encode.go b/vendor/github.com/google/pprof/profile/encode.go index 182c926b9089..860bb304c349 100644 --- a/vendor/github.com/google/pprof/profile/encode.go +++ b/vendor/github.com/google/pprof/profile/encode.go @@ -530,6 +530,7 @@ func (p *Line) decoder() []decoder { func (p *Line) encode(b *buffer) { encodeUint64Opt(b, 1, p.functionIDX) encodeInt64Opt(b, 2, p.Line) + encodeInt64Opt(b, 3, p.Column) } var lineDecoder = []decoder{ @@ -538,6 +539,8 @@ var lineDecoder = []decoder{ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Line).functionIDX) }, // optional int64 line = 2 func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Line) }, + // optional int64 column = 3 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Column) }, } func (p *Function) decoder() []decoder { diff --git a/vendor/github.com/google/pprof/profile/legacy_java_profile.go b/vendor/github.com/google/pprof/profile/legacy_java_profile.go index 91f45e53c6c2..4580bab18396 100644 --- a/vendor/github.com/google/pprof/profile/legacy_java_profile.go +++ b/vendor/github.com/google/pprof/profile/legacy_java_profile.go @@ -56,7 +56,7 @@ func javaCPUProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte } // Strip out addresses for better merge. - if err = p.Aggregate(true, true, true, true, false); err != nil { + if err = p.Aggregate(true, true, true, true, false, false); err != nil { return nil, err } @@ -99,7 +99,7 @@ func parseJavaProfile(b []byte) (*Profile, error) { } // Strip out addresses for better merge. - if err = p.Aggregate(true, true, true, true, false); err != nil { + if err = p.Aggregate(true, true, true, true, false, false); err != nil { return nil, err } diff --git a/vendor/github.com/google/pprof/profile/merge.go b/vendor/github.com/google/pprof/profile/merge.go index 4b66282cb8e0..eee0132e7406 100644 --- a/vendor/github.com/google/pprof/profile/merge.go +++ b/vendor/github.com/google/pprof/profile/merge.go @@ -326,12 +326,13 @@ func (l *Location) key() locationKey { key.addr -= l.Mapping.Start key.mappingID = l.Mapping.ID } - lines := make([]string, len(l.Line)*2) + lines := make([]string, len(l.Line)*3) for i, line := range l.Line { if line.Function != nil { lines[i*2] = strconv.FormatUint(line.Function.ID, 16) } lines[i*2+1] = strconv.FormatInt(line.Line, 16) + lines[i*2+2] = strconv.FormatInt(line.Column, 16) } key.lines = strings.Join(lines, "|") return key @@ -418,6 +419,7 @@ func (pm *profileMerger) mapLine(src Line) Line { ln := Line{ Function: pm.mapFunction(src.Function), Line: src.Line, + Column: src.Column, } return ln } diff --git a/vendor/github.com/google/pprof/profile/profile.go b/vendor/github.com/google/pprof/profile/profile.go index 60ef7e92687f..62df80a55636 100644 --- a/vendor/github.com/google/pprof/profile/profile.go +++ b/vendor/github.com/google/pprof/profile/profile.go @@ -145,6 +145,7 @@ type Location struct { type Line struct { Function *Function Line int64 + Column int64 functionIDX uint64 } @@ -436,7 +437,7 @@ func (p *Profile) CheckValid() error { // Aggregate merges the locations in the profile into equivalence // classes preserving the request attributes. It also updates the // samples to point to the merged locations. -func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, address bool) error { +func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, columnnumber, address bool) error { for _, m := range p.Mapping { m.HasInlineFrames = m.HasInlineFrames && inlineFrame m.HasFunctions = m.HasFunctions && function @@ -458,7 +459,7 @@ func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, address } // Aggregate locations - if !inlineFrame || !address || !linenumber { + if !inlineFrame || !address || !linenumber || !columnnumber { for _, l := range p.Location { if !inlineFrame && len(l.Line) > 1 { l.Line = l.Line[len(l.Line)-1:] @@ -466,6 +467,12 @@ func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, address if !linenumber { for i := range l.Line { l.Line[i].Line = 0 + l.Line[i].Column = 0 + } + } + if !columnnumber { + for i := range l.Line { + l.Line[i].Column = 0 } } if !address { @@ -627,10 +634,11 @@ func (l *Location) string() string { for li := range l.Line { lnStr := "??" if fn := l.Line[li].Function; fn != nil { - lnStr = fmt.Sprintf("%s %s:%d s=%d", + lnStr = fmt.Sprintf("%s %s:%d:%d s=%d", fn.Name, fn.Filename, l.Line[li].Line, + l.Line[li].Column, fn.StartLine) if fn.Name != fn.SystemName { lnStr = lnStr + "(" + fn.SystemName + ")" diff --git a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md index 44222220a383..0a894979998f 100644 --- a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md +++ b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md @@ -1,3 +1,23 @@ +## 2.17.2 + +### Fixes +- fix: close files [32259c8] +- fix github output log level for skipped specs [780e7a3] + +### Maintenance +- Bump github.com/google/pprof [d91fe4e] +- Bump github.com/go-task/slim-sprig to v3 [8cb662e] +- Bump golang.org/x/net in /integration/_fixtures/version_mismatch_fixture (#1391) [3134422] +- Bump github-pages from 230 to 231 in /docs (#1384) [eca81b4] +- Bump golang.org/x/tools from 0.19.0 to 0.20.0 (#1383) [760def8] +- Bump golang.org/x/net from 0.23.0 to 0.24.0 (#1381) [4ce33f4] +- Fix test for gomega version bump [f2fcd97] +- Bump github.com/onsi/gomega from 1.30.0 to 1.33.0 (#1390) [fd622d2] +- Bump golang.org/x/tools from 0.17.0 to 0.19.0 (#1368) [5474a26] +- Bump github-pages from 229 to 230 in /docs (#1359) [e6d1170] +- Bump google.golang.org/protobuf from 1.28.0 to 1.33.0 (#1374) [7f447b2] +- Bump golang.org/x/net from 0.20.0 to 0.23.0 (#1380) [f15239a] + ## 2.17.1 ### Fixes diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go index 73aff0b7a185..b2dc59be66fd 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go @@ -7,7 +7,7 @@ import ( "os" "text/template" - sprig "github.com/go-task/slim-sprig" + sprig "github.com/go-task/slim-sprig/v3" "github.com/onsi/ginkgo/v2/ginkgo/command" "github.com/onsi/ginkgo/v2/ginkgo/internal" "github.com/onsi/ginkgo/v2/types" diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go index be01dec979dc..cf3b7cb6d6d5 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go @@ -10,7 +10,7 @@ import ( "strings" "text/template" - sprig "github.com/go-task/slim-sprig" + sprig "github.com/go-task/slim-sprig/v3" "github.com/onsi/ginkgo/v2/ginkgo/command" "github.com/onsi/ginkgo/v2/ginkgo/internal" "github.com/onsi/ginkgo/v2/types" @@ -174,6 +174,7 @@ func moduleName(modRoot string) string { if err != nil { return "" } + defer modFile.Close() mod := make([]byte, 128) _, err = modFile.Read(mod) diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go index 5f35864ddba7..8e16d2bb034b 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go @@ -161,6 +161,7 @@ func MergeAndCleanupCoverProfiles(profiles []string, destination string) error { if err != nil { return err } + defer dst.Close() err = DumpCoverProfiles(merged, dst) if err != nil { return err @@ -196,6 +197,7 @@ func MergeProfiles(profilePaths []string, destination string) error { return fmt.Errorf("Could not open profile: %s\n%s", profilePath, err.Error()) } prof, err := profile.Parse(proFile) + _ = proFile.Close() if err != nil { return fmt.Errorf("Could not parse profile: %s\n%s", profilePath, err.Error()) } diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go index 4026859ec397..980973370e06 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go @@ -419,7 +419,11 @@ func (r *DefaultReporter) emitFailure(indent uint, state types.SpecState, failur highlightColor := r.highlightColorForState(state) r.emitBlock(r.fi(indent, highlightColor+"[%s] %s{{/}}", r.humanReadableState(state), failure.Message)) if r.conf.GithubOutput { - r.emitBlock(r.fi(indent, "::error file=%s,line=%d::%s %s", failure.Location.FileName, failure.Location.LineNumber, failure.FailureNodeType, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + level := "error" + if state.Is(types.SpecStateSkipped) { + level = "notice" + } + r.emitBlock(r.fi(indent, "::%s file=%s,line=%d::%s %s", level, failure.Location.FileName, failure.Location.LineNumber, failure.FailureNodeType, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) } else { r.emitBlock(r.fi(indent, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}} {{gray}}@ %s{{/}}\n", failure.FailureNodeType, failure.Location, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) } diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go index 43244a9bd519..2a3215b5138f 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go @@ -324,6 +324,7 @@ func MergeAndCleanupJUnitReports(sources []string, dst string) ([]string, error) continue } err = xml.NewDecoder(f).Decode(&report) + _ = f.Close() if err != nil { messages = append(messages, fmt.Sprintf("Could not decode %s:\n%s", source, err.Error())) continue diff --git a/vendor/github.com/onsi/ginkgo/v2/types/version.go b/vendor/github.com/onsi/ginkgo/v2/types/version.go index 851d42b456b8..5dd0140cd34c 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/version.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/version.go @@ -1,3 +1,3 @@ package types -const VERSION = "2.17.1" +const VERSION = "2.17.2" diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md index 01ec5245cdc6..62af14ad2f2a 100644 --- a/vendor/github.com/onsi/gomega/CHANGELOG.md +++ b/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -1,3 +1,22 @@ +## 1.33.1 + +### Fixes +- fix confusing eventually docs [3a66379] + +### Maintenance +- Bump github.com/onsi/ginkgo/v2 from 2.17.1 to 2.17.2 [e9bc35a] + +## 1.33.0 + +### Features + +`Receive` not accepts `Receive(, MATCHER>)`, allowing you to pick out a specific value on the channel that satisfies the provided matcher and is stored in the provided pointer. + +### Maintenance +- Bump github.com/onsi/ginkgo/v2 from 2.15.0 to 2.17.1 (#745) [9999deb] +- Bump github-pages from 229 to 230 in /docs (#735) [cb5ff21] +- Bump golang.org/x/net from 0.20.0 to 0.23.0 (#746) [bac6596] + ## 1.32.0 ### Maintenance diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go index ffb81b1feb39..9697d5134ff4 100644 --- a/vendor/github.com/onsi/gomega/gomega_dsl.go +++ b/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -22,7 +22,7 @@ import ( "github.com/onsi/gomega/types" ) -const GOMEGA_VERSION = "1.32.0" +const GOMEGA_VERSION = "1.33.1" const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler. If you're using Ginkgo then you probably forgot to put your assertion in an It(). @@ -372,11 +372,11 @@ You can ensure that you get a number of consecutive successful tries before succ Finally, in addition to passing timeouts and a context to Eventually you can be more explicit with Eventually's chaining configuration methods: - Eventually(..., "1s", "2s", ctx).Should(...) + Eventually(..., "10s", "2s", ctx).Should(...) is equivalent to - Eventually(...).WithTimeout(time.Second).WithPolling(2*time.Second).WithContext(ctx).Should(...) + Eventually(...).WithTimeout(10*time.Second).WithPolling(2*time.Second).WithContext(ctx).Should(...) */ func Eventually(actualOrCtx interface{}, args ...interface{}) AsyncAssertion { ensureDefaultGomegaIsConfigured() diff --git a/vendor/github.com/onsi/gomega/matchers.go b/vendor/github.com/onsi/gomega/matchers.go index 8860d677fc8f..7ef27dc9c955 100644 --- a/vendor/github.com/onsi/gomega/matchers.go +++ b/vendor/github.com/onsi/gomega/matchers.go @@ -194,20 +194,21 @@ func BeClosed() types.GomegaMatcher { // // will repeatedly attempt to pull values out of `c` until a value matching "bar" is received. // -// Finally, if you want to have a reference to the value *sent* to the channel you can pass the `Receive` matcher a pointer to a variable of the appropriate type: +// Furthermore, if you want to have a reference to the value *sent* to the channel you can pass the `Receive` matcher a pointer to a variable of the appropriate type: // // var myThing thing // Eventually(thingChan).Should(Receive(&myThing)) // Expect(myThing.Sprocket).Should(Equal("foo")) // Expect(myThing.IsValid()).Should(BeTrue()) +// +// Finally, if you want to match the received object as well as get the actual received value into a variable, so you can reason further about the value received, +// you can pass a pointer to a variable of the approriate type first, and second a matcher: +// +// var myThing thing +// Eventually(thingChan).Should(Receive(&myThing, ContainSubstring("bar"))) func Receive(args ...interface{}) types.GomegaMatcher { - var arg interface{} - if len(args) > 0 { - arg = args[0] - } - return &matchers.ReceiveMatcher{ - Arg: arg, + Args: args, } } diff --git a/vendor/github.com/onsi/gomega/matchers/receive_matcher.go b/vendor/github.com/onsi/gomega/matchers/receive_matcher.go index 1936a2ba52f2..948164eaf88b 100644 --- a/vendor/github.com/onsi/gomega/matchers/receive_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/receive_matcher.go @@ -3,6 +3,7 @@ package matchers import ( + "errors" "fmt" "reflect" @@ -10,7 +11,7 @@ import ( ) type ReceiveMatcher struct { - Arg interface{} + Args []interface{} receivedValue reflect.Value channelClosed bool } @@ -29,15 +30,38 @@ func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err erro var subMatcher omegaMatcher var hasSubMatcher bool - - if matcher.Arg != nil { - subMatcher, hasSubMatcher = (matcher.Arg).(omegaMatcher) + var resultReference interface{} + + // Valid arg formats are as follows, always with optional POINTER before + // optional MATCHER: + // - Receive() + // - Receive(POINTER) + // - Receive(MATCHER) + // - Receive(POINTER, MATCHER) + args := matcher.Args + if len(args) > 0 { + arg := args[0] + _, isSubMatcher := arg.(omegaMatcher) + if !isSubMatcher && reflect.ValueOf(arg).Kind() == reflect.Ptr { + // Consume optional POINTER arg first, if it ain't no matcher ;) + resultReference = arg + args = args[1:] + } + } + if len(args) > 0 { + arg := args[0] + subMatcher, hasSubMatcher = arg.(omegaMatcher) if !hasSubMatcher { - argType := reflect.TypeOf(matcher.Arg) - if argType.Kind() != reflect.Ptr { - return false, fmt.Errorf("Cannot assign a value from the channel:\n%s\nTo:\n%s\nYou need to pass a pointer!", format.Object(actual, 1), format.Object(matcher.Arg, 1)) - } + // At this point we assume the dev user wanted to assign a received + // value, so [POINTER,]MATCHER. + return false, fmt.Errorf("Cannot assign a value from the channel:\n%s\nTo:\n%s\nYou need to pass a pointer!", format.Object(actual, 1), format.Object(arg, 1)) } + // Consume optional MATCHER arg. + args = args[1:] + } + if len(args) > 0 { + // If there are still args present, reject all. + return false, errors.New("Receive matcher expects at most an optional pointer and/or an optional matcher") } winnerIndex, value, open := reflect.Select([]reflect.SelectCase{ @@ -58,16 +82,20 @@ func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err erro } if hasSubMatcher { - if didReceive { - matcher.receivedValue = value - return subMatcher.Match(matcher.receivedValue.Interface()) + if !didReceive { + return false, nil } - return false, nil + matcher.receivedValue = value + if match, err := subMatcher.Match(matcher.receivedValue.Interface()); err != nil || !match { + return match, err + } + // if we received a match, then fall through in order to handle an + // optional assignment of the received value to the specified reference. } if didReceive { - if matcher.Arg != nil { - outValue := reflect.ValueOf(matcher.Arg) + if resultReference != nil { + outValue := reflect.ValueOf(resultReference) if value.Type().AssignableTo(outValue.Elem().Type()) { outValue.Elem().Set(value) @@ -77,7 +105,7 @@ func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err erro outValue.Elem().Set(value.Elem()) return true, nil } else { - return false, fmt.Errorf("Cannot assign a value from the channel:\n%s\nType:\n%s\nTo:\n%s", format.Object(actual, 1), format.Object(value.Interface(), 1), format.Object(matcher.Arg, 1)) + return false, fmt.Errorf("Cannot assign a value from the channel:\n%s\nType:\n%s\nTo:\n%s", format.Object(actual, 1), format.Object(value.Interface(), 1), format.Object(resultReference, 1)) } } @@ -88,7 +116,11 @@ func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err erro } func (matcher *ReceiveMatcher) FailureMessage(actual interface{}) (message string) { - subMatcher, hasSubMatcher := (matcher.Arg).(omegaMatcher) + var matcherArg interface{} + if len(matcher.Args) > 0 { + matcherArg = matcher.Args[len(matcher.Args)-1] + } + subMatcher, hasSubMatcher := (matcherArg).(omegaMatcher) closedAddendum := "" if matcher.channelClosed { @@ -105,7 +137,11 @@ func (matcher *ReceiveMatcher) FailureMessage(actual interface{}) (message strin } func (matcher *ReceiveMatcher) NegatedFailureMessage(actual interface{}) (message string) { - subMatcher, hasSubMatcher := (matcher.Arg).(omegaMatcher) + var matcherArg interface{} + if len(matcher.Args) > 0 { + matcherArg = matcher.Args[len(matcher.Args)-1] + } + subMatcher, hasSubMatcher := (matcherArg).(omegaMatcher) closedAddendum := "" if matcher.channelClosed { diff --git a/vendor/github.com/pelletier/go-toml/README.md b/vendor/github.com/pelletier/go-toml/README.md index 6c061712bb10..7399e04bf654 100644 --- a/vendor/github.com/pelletier/go-toml/README.md +++ b/vendor/github.com/pelletier/go-toml/README.md @@ -25,9 +25,9 @@ and [much faster][v2-bench]. If you only need reading and writing TOML documents (majority of cases), those features are implemented and the API unlikely to change. -The remaining features (Document structure editing and tooling) will be added -shortly. While pull-requests are welcome on v1, no active development is -expected on it. When v2.0.0 is released, v1 will be deprecated. +The remaining features will be added shortly. While pull-requests are welcome on +v1, no active development is expected on it. When v2.0.0 is released, v1 will be +deprecated. šŸ‘‰ [go-toml v2][v2] diff --git a/vendor/github.com/pelletier/go-toml/SECURITY.md b/vendor/github.com/pelletier/go-toml/SECURITY.md new file mode 100644 index 000000000000..b2f21cfc92c9 --- /dev/null +++ b/vendor/github.com/pelletier/go-toml/SECURITY.md @@ -0,0 +1,19 @@ +# Security Policy + +## Supported Versions + +Use this section to tell people about which versions of your project are +currently being supported with security updates. + +| Version | Supported | +| ---------- | ------------------ | +| Latest 2.x | :white_check_mark: | +| All 1.x | :x: | +| All 0.x | :x: | + +## Reporting a Vulnerability + +Email a vulnerability report to `security@pelletier.codes`. Make sure to include +as many details as possible to reproduce the vulnerability. This is a +side-project: I will try to get back to you as quickly as possible, time +permitting in my personal life. Providing a working patch helps very much! diff --git a/vendor/github.com/pelletier/go-toml/marshal.go b/vendor/github.com/pelletier/go-toml/marshal.go index 3443c35452ad..571273049848 100644 --- a/vendor/github.com/pelletier/go-toml/marshal.go +++ b/vendor/github.com/pelletier/go-toml/marshal.go @@ -1113,7 +1113,7 @@ func (d *Decoder) valueFromToml(mtype reflect.Type, tval interface{}, mval1 *ref return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) } - if val.Convert(reflect.TypeOf(int(1))).Int() < 0 { + if val.Type().Kind() != reflect.Uint64 && val.Convert(reflect.TypeOf(int(1))).Int() < 0 { return reflect.ValueOf(nil), fmt.Errorf("%v(%T) is negative so does not fit in %v", tval, tval, mtype.String()) } if reflect.Indirect(reflect.New(mtype)).OverflowUint(val.Convert(reflect.TypeOf(uint64(0))).Uint()) { diff --git a/vendor/github.com/pelletier/go-toml/parser.go b/vendor/github.com/pelletier/go-toml/parser.go index f5e1a44fb4d5..b3726d0dd8cc 100644 --- a/vendor/github.com/pelletier/go-toml/parser.go +++ b/vendor/github.com/pelletier/go-toml/parser.go @@ -293,42 +293,41 @@ func (p *tomlParser) parseRvalue() interface{} { return math.NaN() case tokenInteger: cleanedVal := cleanupNumberToken(tok.val) - var err error - var val int64 + base := 10 + s := cleanedVal + checkInvalidUnderscore := numberContainsInvalidUnderscore if len(cleanedVal) >= 3 && cleanedVal[0] == '0' { switch cleanedVal[1] { case 'x': - err = hexNumberContainsInvalidUnderscore(tok.val) - if err != nil { - p.raiseError(tok, "%s", err) - } - val, err = strconv.ParseInt(cleanedVal[2:], 16, 64) + checkInvalidUnderscore = hexNumberContainsInvalidUnderscore + base = 16 case 'o': - err = numberContainsInvalidUnderscore(tok.val) - if err != nil { - p.raiseError(tok, "%s", err) - } - val, err = strconv.ParseInt(cleanedVal[2:], 8, 64) + base = 8 case 'b': - err = numberContainsInvalidUnderscore(tok.val) - if err != nil { - p.raiseError(tok, "%s", err) - } - val, err = strconv.ParseInt(cleanedVal[2:], 2, 64) + base = 2 default: panic("invalid base") // the lexer should catch this first } - } else { - err = numberContainsInvalidUnderscore(tok.val) - if err != nil { - p.raiseError(tok, "%s", err) - } - val, err = strconv.ParseInt(cleanedVal, 10, 64) + s = cleanedVal[2:] } + + err := checkInvalidUnderscore(tok.val) if err != nil { p.raiseError(tok, "%s", err) } - return val + + var val interface{} + val, err = strconv.ParseInt(s, base, 64) + if err == nil { + return val + } + + if s[0] != '-' { + if val, err = strconv.ParseUint(s, base, 64); err == nil { + return val + } + } + p.raiseError(tok, "%s", err) case tokenFloat: err := numberContainsInvalidUnderscore(tok.val) if err != nil { diff --git a/vendor/github.com/pelletier/go-toml/toml.go b/vendor/github.com/pelletier/go-toml/toml.go index 6d82587c4882..5541b941f8b8 100644 --- a/vendor/github.com/pelletier/go-toml/toml.go +++ b/vendor/github.com/pelletier/go-toml/toml.go @@ -471,7 +471,7 @@ func LoadBytes(b []byte) (tree *Tree, err error) { if _, ok := r.(runtime.Error); ok { panic(r) } - err = errors.New(r.(string)) + err = fmt.Errorf("%s", r) } }() diff --git a/vendor/golang.org/x/mod/modfile/read.go b/vendor/golang.org/x/mod/modfile/read.go index 5b5bb5e115b3..220568259158 100644 --- a/vendor/golang.org/x/mod/modfile/read.go +++ b/vendor/golang.org/x/mod/modfile/read.go @@ -225,7 +225,7 @@ func (x *FileSyntax) Cleanup() { if ww == 0 { continue } - if ww == 1 { + if ww == 1 && len(stmt.RParen.Comments.Before) == 0 { // Collapse block into single line. line := &Line{ Comments: Comments{ diff --git a/vendor/golang.org/x/mod/modfile/rule.go b/vendor/golang.org/x/mod/modfile/rule.go index 35fd1f534cf8..0e7b7e26792b 100644 --- a/vendor/golang.org/x/mod/modfile/rule.go +++ b/vendor/golang.org/x/mod/modfile/rule.go @@ -308,6 +308,7 @@ var laxGoVersionRE = lazyregexp.New(`^v?(([1-9][0-9]*)\.(0|[1-9][0-9]*))([^0-9]. // Toolchains must be named beginning with `go1`, // like "go1.20.3" or "go1.20.3-gccgo". As a special case, "default" is also permitted. +// TODO(samthanawalla): Replace regex with https://pkg.go.dev/go/version#IsValid in 1.23+ var ToolchainRE = lazyregexp.New(`^default$|^go1($|\.)`) func (f *File) add(errs *ErrorList, block *LineBlock, line *Line, verb string, args []string, fix VersionFixer, strict bool) { @@ -384,7 +385,7 @@ func (f *File) add(errs *ErrorList, block *LineBlock, line *Line, verb string, a errorf("toolchain directive expects exactly one argument") return } else if strict && !ToolchainRE.MatchString(args[0]) { - errorf("invalid toolchain version '%s': must match format go1.23.0 or local", args[0]) + errorf("invalid toolchain version '%s': must match format go1.23.0 or default", args[0]) return } f.Toolchain = &Toolchain{Syntax: line} @@ -630,7 +631,7 @@ func (f *WorkFile) add(errs *ErrorList, line *Line, verb string, args []string, errorf("go directive expects exactly one argument") return } else if !GoVersionRE.MatchString(args[0]) { - errorf("invalid go version '%s': must match format 1.23", args[0]) + errorf("invalid go version '%s': must match format 1.23.0", args[0]) return } @@ -646,7 +647,7 @@ func (f *WorkFile) add(errs *ErrorList, line *Line, verb string, args []string, errorf("toolchain directive expects exactly one argument") return } else if !ToolchainRE.MatchString(args[0]) { - errorf("invalid toolchain version '%s': must match format go1.23 or local", args[0]) + errorf("invalid toolchain version '%s': must match format go1.23.0 or default", args[0]) return } @@ -974,6 +975,8 @@ func (f *File) AddGoStmt(version string) error { var hint Expr if f.Module != nil && f.Module.Syntax != nil { hint = f.Module.Syntax + } else if f.Syntax == nil { + f.Syntax = new(FileSyntax) } f.Go = &Go{ Version: version, diff --git a/vendor/golang.org/x/sys/unix/mmap_nomremap.go b/vendor/golang.org/x/sys/unix/mmap_nomremap.go index 4b68e59780a2..7f602ffd26d4 100644 --- a/vendor/golang.org/x/sys/unix/mmap_nomremap.go +++ b/vendor/golang.org/x/sys/unix/mmap_nomremap.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build aix || darwin || dragonfly || freebsd || openbsd || solaris +//go:build aix || darwin || dragonfly || freebsd || openbsd || solaris || zos package unix diff --git a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go index b473038c6155..27c41b6f0a13 100644 --- a/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go @@ -1520,6 +1520,14 @@ func (m *mmapper) Munmap(data []byte) (err error) { return nil } +func Mmap(fd int, offset int64, length int, prot int, flags int) (data []byte, err error) { + return mapper.Mmap(fd, offset, length, prot, flags) +} + +func Munmap(b []byte) (err error) { + return mapper.Munmap(b) +} + func Read(fd int, p []byte) (n int, err error) { n, err = read(fd, p) if raceenabled { diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 6395a031d45d..6525c62f3c2f 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -165,6 +165,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) [failretval==InvalidHandle] = CreateFileW //sys CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *SecurityAttributes) (handle Handle, err error) [failretval==InvalidHandle] = CreateNamedPipeW //sys ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) +//sys DisconnectNamedPipe(pipe Handle) (err error) //sys GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) //sys GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW //sys SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32) (err error) = SetNamedPipeHandleState @@ -348,8 +349,19 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys SetProcessPriorityBoost(process Handle, disable bool) (err error) = kernel32.SetProcessPriorityBoost //sys GetProcessWorkingSetSizeEx(hProcess Handle, lpMinimumWorkingSetSize *uintptr, lpMaximumWorkingSetSize *uintptr, flags *uint32) //sys SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr, dwMaximumWorkingSetSize uintptr, flags uint32) (err error) +//sys ClearCommBreak(handle Handle) (err error) +//sys ClearCommError(handle Handle, lpErrors *uint32, lpStat *ComStat) (err error) +//sys EscapeCommFunction(handle Handle, dwFunc uint32) (err error) +//sys GetCommState(handle Handle, lpDCB *DCB) (err error) +//sys GetCommModemStatus(handle Handle, lpModemStat *uint32) (err error) //sys GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) +//sys PurgeComm(handle Handle, dwFlags uint32) (err error) +//sys SetCommBreak(handle Handle) (err error) +//sys SetCommMask(handle Handle, dwEvtMask uint32) (err error) +//sys SetCommState(handle Handle, lpDCB *DCB) (err error) //sys SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) +//sys SetupComm(handle Handle, dwInQueue uint32, dwOutQueue uint32) (err error) +//sys WaitCommEvent(handle Handle, lpEvtMask *uint32, lpOverlapped *Overlapped) (err error) //sys GetActiveProcessorCount(groupNumber uint16) (ret uint32) //sys GetMaximumProcessorCount(groupNumber uint16) (ret uint32) //sys EnumWindows(enumFunc uintptr, param unsafe.Pointer) (err error) = user32.EnumWindows @@ -1834,3 +1846,73 @@ func ResizePseudoConsole(pconsole Handle, size Coord) error { // accept arguments that can be casted to uintptr, and Coord can't. return resizePseudoConsole(pconsole, *((*uint32)(unsafe.Pointer(&size)))) } + +// DCB constants. See https://learn.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-dcb. +const ( + CBR_110 = 110 + CBR_300 = 300 + CBR_600 = 600 + CBR_1200 = 1200 + CBR_2400 = 2400 + CBR_4800 = 4800 + CBR_9600 = 9600 + CBR_14400 = 14400 + CBR_19200 = 19200 + CBR_38400 = 38400 + CBR_57600 = 57600 + CBR_115200 = 115200 + CBR_128000 = 128000 + CBR_256000 = 256000 + + DTR_CONTROL_DISABLE = 0x00000000 + DTR_CONTROL_ENABLE = 0x00000010 + DTR_CONTROL_HANDSHAKE = 0x00000020 + + RTS_CONTROL_DISABLE = 0x00000000 + RTS_CONTROL_ENABLE = 0x00001000 + RTS_CONTROL_HANDSHAKE = 0x00002000 + RTS_CONTROL_TOGGLE = 0x00003000 + + NOPARITY = 0 + ODDPARITY = 1 + EVENPARITY = 2 + MARKPARITY = 3 + SPACEPARITY = 4 + + ONESTOPBIT = 0 + ONE5STOPBITS = 1 + TWOSTOPBITS = 2 +) + +// EscapeCommFunction constants. See https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-escapecommfunction. +const ( + SETXOFF = 1 + SETXON = 2 + SETRTS = 3 + CLRRTS = 4 + SETDTR = 5 + CLRDTR = 6 + SETBREAK = 8 + CLRBREAK = 9 +) + +// PurgeComm constants. See https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-purgecomm. +const ( + PURGE_TXABORT = 0x0001 + PURGE_RXABORT = 0x0002 + PURGE_TXCLEAR = 0x0004 + PURGE_RXCLEAR = 0x0008 +) + +// SetCommMask constants. See https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-setcommmask. +const ( + EV_RXCHAR = 0x0001 + EV_RXFLAG = 0x0002 + EV_TXEMPTY = 0x0004 + EV_CTS = 0x0008 + EV_DSR = 0x0010 + EV_RLSD = 0x0020 + EV_BREAK = 0x0040 + EV_ERR = 0x0080 + EV_RING = 0x0100 +) diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 359780f6ace5..d8cb71db0a61 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -3380,3 +3380,27 @@ type BLOB struct { Size uint32 BlobData *byte } + +type ComStat struct { + Flags uint32 + CBInQue uint32 + CBOutQue uint32 +} + +type DCB struct { + DCBlength uint32 + BaudRate uint32 + Flags uint32 + wReserved uint16 + XonLim uint16 + XoffLim uint16 + ByteSize uint8 + Parity uint8 + StopBits uint8 + XonChar byte + XoffChar byte + ErrorChar byte + EofChar byte + EvtChar byte + wReserved1 uint16 +} diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index e8791c82c30f..5c6035ddfa92 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -188,6 +188,8 @@ var ( procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") procCancelIo = modkernel32.NewProc("CancelIo") procCancelIoEx = modkernel32.NewProc("CancelIoEx") + procClearCommBreak = modkernel32.NewProc("ClearCommBreak") + procClearCommError = modkernel32.NewProc("ClearCommError") procCloseHandle = modkernel32.NewProc("CloseHandle") procClosePseudoConsole = modkernel32.NewProc("ClosePseudoConsole") procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") @@ -212,7 +214,9 @@ var ( procDeleteProcThreadAttributeList = modkernel32.NewProc("DeleteProcThreadAttributeList") procDeleteVolumeMountPointW = modkernel32.NewProc("DeleteVolumeMountPointW") procDeviceIoControl = modkernel32.NewProc("DeviceIoControl") + procDisconnectNamedPipe = modkernel32.NewProc("DisconnectNamedPipe") procDuplicateHandle = modkernel32.NewProc("DuplicateHandle") + procEscapeCommFunction = modkernel32.NewProc("EscapeCommFunction") procExitProcess = modkernel32.NewProc("ExitProcess") procExpandEnvironmentStringsW = modkernel32.NewProc("ExpandEnvironmentStringsW") procFindClose = modkernel32.NewProc("FindClose") @@ -236,6 +240,8 @@ var ( procGenerateConsoleCtrlEvent = modkernel32.NewProc("GenerateConsoleCtrlEvent") procGetACP = modkernel32.NewProc("GetACP") procGetActiveProcessorCount = modkernel32.NewProc("GetActiveProcessorCount") + procGetCommModemStatus = modkernel32.NewProc("GetCommModemStatus") + procGetCommState = modkernel32.NewProc("GetCommState") procGetCommTimeouts = modkernel32.NewProc("GetCommTimeouts") procGetCommandLineW = modkernel32.NewProc("GetCommandLineW") procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW") @@ -322,6 +328,7 @@ var ( procProcess32NextW = modkernel32.NewProc("Process32NextW") procProcessIdToSessionId = modkernel32.NewProc("ProcessIdToSessionId") procPulseEvent = modkernel32.NewProc("PulseEvent") + procPurgeComm = modkernel32.NewProc("PurgeComm") procQueryDosDeviceW = modkernel32.NewProc("QueryDosDeviceW") procQueryFullProcessImageNameW = modkernel32.NewProc("QueryFullProcessImageNameW") procQueryInformationJobObject = modkernel32.NewProc("QueryInformationJobObject") @@ -335,6 +342,9 @@ var ( procResetEvent = modkernel32.NewProc("ResetEvent") procResizePseudoConsole = modkernel32.NewProc("ResizePseudoConsole") procResumeThread = modkernel32.NewProc("ResumeThread") + procSetCommBreak = modkernel32.NewProc("SetCommBreak") + procSetCommMask = modkernel32.NewProc("SetCommMask") + procSetCommState = modkernel32.NewProc("SetCommState") procSetCommTimeouts = modkernel32.NewProc("SetCommTimeouts") procSetConsoleCursorPosition = modkernel32.NewProc("SetConsoleCursorPosition") procSetConsoleMode = modkernel32.NewProc("SetConsoleMode") @@ -342,7 +352,6 @@ var ( procSetDefaultDllDirectories = modkernel32.NewProc("SetDefaultDllDirectories") procSetDllDirectoryW = modkernel32.NewProc("SetDllDirectoryW") procSetEndOfFile = modkernel32.NewProc("SetEndOfFile") - procSetFileValidData = modkernel32.NewProc("SetFileValidData") procSetEnvironmentVariableW = modkernel32.NewProc("SetEnvironmentVariableW") procSetErrorMode = modkernel32.NewProc("SetErrorMode") procSetEvent = modkernel32.NewProc("SetEvent") @@ -351,6 +360,7 @@ var ( procSetFileInformationByHandle = modkernel32.NewProc("SetFileInformationByHandle") procSetFilePointer = modkernel32.NewProc("SetFilePointer") procSetFileTime = modkernel32.NewProc("SetFileTime") + procSetFileValidData = modkernel32.NewProc("SetFileValidData") procSetHandleInformation = modkernel32.NewProc("SetHandleInformation") procSetInformationJobObject = modkernel32.NewProc("SetInformationJobObject") procSetNamedPipeHandleState = modkernel32.NewProc("SetNamedPipeHandleState") @@ -361,6 +371,7 @@ var ( procSetStdHandle = modkernel32.NewProc("SetStdHandle") procSetVolumeLabelW = modkernel32.NewProc("SetVolumeLabelW") procSetVolumeMountPointW = modkernel32.NewProc("SetVolumeMountPointW") + procSetupComm = modkernel32.NewProc("SetupComm") procSizeofResource = modkernel32.NewProc("SizeofResource") procSleepEx = modkernel32.NewProc("SleepEx") procTerminateJobObject = modkernel32.NewProc("TerminateJobObject") @@ -379,6 +390,7 @@ var ( procVirtualQueryEx = modkernel32.NewProc("VirtualQueryEx") procVirtualUnlock = modkernel32.NewProc("VirtualUnlock") procWTSGetActiveConsoleSessionId = modkernel32.NewProc("WTSGetActiveConsoleSessionId") + procWaitCommEvent = modkernel32.NewProc("WaitCommEvent") procWaitForMultipleObjects = modkernel32.NewProc("WaitForMultipleObjects") procWaitForSingleObject = modkernel32.NewProc("WaitForSingleObject") procWriteConsoleW = modkernel32.NewProc("WriteConsoleW") @@ -1641,6 +1653,22 @@ func CancelIoEx(s Handle, o *Overlapped) (err error) { return } +func ClearCommBreak(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procClearCommBreak.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func ClearCommError(handle Handle, lpErrors *uint32, lpStat *ComStat) (err error) { + r1, _, e1 := syscall.Syscall(procClearCommError.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(lpErrors)), uintptr(unsafe.Pointer(lpStat))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func CloseHandle(handle Handle) (err error) { r1, _, e1 := syscall.Syscall(procCloseHandle.Addr(), 1, uintptr(handle), 0, 0) if r1 == 0 { @@ -1845,6 +1873,14 @@ func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBuff return } +func DisconnectNamedPipe(pipe Handle) (err error) { + r1, _, e1 := syscall.Syscall(procDisconnectNamedPipe.Addr(), 1, uintptr(pipe), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetProcessHandle Handle, lpTargetHandle *Handle, dwDesiredAccess uint32, bInheritHandle bool, dwOptions uint32) (err error) { var _p0 uint32 if bInheritHandle { @@ -1857,6 +1893,14 @@ func DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetP return } +func EscapeCommFunction(handle Handle, dwFunc uint32) (err error) { + r1, _, e1 := syscall.Syscall(procEscapeCommFunction.Addr(), 2, uintptr(handle), uintptr(dwFunc), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func ExitProcess(exitcode uint32) { syscall.Syscall(procExitProcess.Addr(), 1, uintptr(exitcode), 0, 0) return @@ -2058,6 +2102,22 @@ func GetActiveProcessorCount(groupNumber uint16) (ret uint32) { return } +func GetCommModemStatus(handle Handle, lpModemStat *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetCommModemStatus.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpModemStat)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func GetCommState(handle Handle, lpDCB *DCB) (err error) { + r1, _, e1 := syscall.Syscall(procGetCommState.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpDCB)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { r1, _, e1 := syscall.Syscall(procGetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0) if r1 == 0 { @@ -2810,6 +2870,14 @@ func PulseEvent(event Handle) (err error) { return } +func PurgeComm(handle Handle, dwFlags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procPurgeComm.Addr(), 2, uintptr(handle), uintptr(dwFlags), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint32, err error) { r0, _, e1 := syscall.Syscall(procQueryDosDeviceW.Addr(), 3, uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)), uintptr(max)) n = uint32(r0) @@ -2924,6 +2992,30 @@ func ResumeThread(thread Handle) (ret uint32, err error) { return } +func SetCommBreak(handle Handle) (err error) { + r1, _, e1 := syscall.Syscall(procSetCommBreak.Addr(), 1, uintptr(handle), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetCommMask(handle Handle, dwEvtMask uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetCommMask.Addr(), 2, uintptr(handle), uintptr(dwEvtMask), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetCommState(handle Handle, lpDCB *DCB) (err error) { + r1, _, e1 := syscall.Syscall(procSetCommState.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpDCB)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { r1, _, e1 := syscall.Syscall(procSetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0) if r1 == 0 { @@ -2989,14 +3081,6 @@ func SetEndOfFile(handle Handle) (err error) { return } -func SetFileValidData(handle Handle, validDataLength int64) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileValidData.Addr(), 2, uintptr(handle), uintptr(validDataLength), 0) - if r1 == 0 { - err = errnoErr(e1) - } - return -} - func SetEnvironmentVariable(name *uint16, value *uint16) (err error) { r1, _, e1 := syscall.Syscall(procSetEnvironmentVariableW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)), 0) if r1 == 0 { @@ -3060,6 +3144,14 @@ func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetim return } +func SetFileValidData(handle Handle, validDataLength int64) (err error) { + r1, _, e1 := syscall.Syscall(procSetFileValidData.Addr(), 2, uintptr(handle), uintptr(validDataLength), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) { r1, _, e1 := syscall.Syscall(procSetHandleInformation.Addr(), 3, uintptr(handle), uintptr(mask), uintptr(flags)) if r1 == 0 { @@ -3145,6 +3237,14 @@ func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err erro return } +func SetupComm(handle Handle, dwInQueue uint32, dwOutQueue uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetupComm.Addr(), 3, uintptr(handle), uintptr(dwInQueue), uintptr(dwOutQueue)) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SizeofResource(module Handle, resInfo Handle) (size uint32, err error) { r0, _, e1 := syscall.Syscall(procSizeofResource.Addr(), 2, uintptr(module), uintptr(resInfo), 0) size = uint32(r0) @@ -3291,6 +3391,14 @@ func WTSGetActiveConsoleSessionId() (sessionID uint32) { return } +func WaitCommEvent(handle Handle, lpEvtMask *uint32, lpOverlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procWaitCommEvent.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(lpEvtMask)), uintptr(unsafe.Pointer(lpOverlapped))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func waitForMultipleObjects(count uint32, handles uintptr, waitAll bool, waitMilliseconds uint32) (event uint32, err error) { var _p0 uint32 if waitAll { diff --git a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go index 03543bd4bb8f..137cc8df1d86 100644 --- a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go +++ b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go @@ -47,7 +47,7 @@ import ( func Find(importPath, srcDir string) (filename, path string) { cmd := exec.Command("go", "list", "-json", "-export", "--", importPath) cmd.Dir = srcDir - out, err := cmd.CombinedOutput() + out, err := cmd.Output() if err != nil { return "", "" } diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index f33b0afc22cf..865d90597a94 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -9,6 +9,7 @@ package packages import ( "context" "encoding/json" + "errors" "fmt" "go/ast" "go/parser" @@ -24,6 +25,8 @@ import ( "sync" "time" + "golang.org/x/sync/errgroup" + "golang.org/x/tools/go/gcexportdata" "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/packagesinternal" @@ -255,8 +258,27 @@ func Load(cfg *Config, patterns ...string) ([]*Package, error) { // defaultDriver will fall back to the go list driver. // The boolean result indicates that an external driver handled the request. func defaultDriver(cfg *Config, patterns ...string) (*DriverResponse, bool, error) { + const ( + // windowsArgMax specifies the maximum command line length for + // the Windows' CreateProcess function. + windowsArgMax = 32767 + // maxEnvSize is a very rough estimation of the maximum environment + // size of a user. + maxEnvSize = 16384 + // safeArgMax specifies the maximum safe command line length to use + // by the underlying driver excl. the environment. We choose the Windows' + // ARG_MAX as the starting point because it's one of the lowest ARG_MAX + // constants out of the different supported platforms, + // e.g., https://www.in-ulm.de/~mascheck/various/argmax/#results. + safeArgMax = windowsArgMax - maxEnvSize + ) + chunks, err := splitIntoChunks(patterns, safeArgMax) + if err != nil { + return nil, false, err + } + if driver := findExternalDriver(cfg); driver != nil { - response, err := driver(cfg, patterns...) + response, err := callDriverOnChunks(driver, cfg, chunks) if err != nil { return nil, false, err } else if !response.NotHandled { @@ -265,11 +287,82 @@ func defaultDriver(cfg *Config, patterns ...string) (*DriverResponse, bool, erro // (fall through) } - response, err := goListDriver(cfg, patterns...) + response, err := callDriverOnChunks(goListDriver, cfg, chunks) if err != nil { return nil, false, err } - return response, false, nil + return response, false, err +} + +// splitIntoChunks chunks the slice so that the total number of characters +// in a chunk is no longer than argMax. +func splitIntoChunks(patterns []string, argMax int) ([][]string, error) { + if argMax <= 0 { + return nil, errors.New("failed to split patterns into chunks, negative safe argMax value") + } + var chunks [][]string + charsInChunk := 0 + nextChunkStart := 0 + for i, v := range patterns { + vChars := len(v) + if vChars > argMax { + // a single pattern is longer than the maximum safe ARG_MAX, hardly should happen + return nil, errors.New("failed to split patterns into chunks, a pattern is too long") + } + charsInChunk += vChars + 1 // +1 is for a whitespace between patterns that has to be counted too + if charsInChunk > argMax { + chunks = append(chunks, patterns[nextChunkStart:i]) + nextChunkStart = i + charsInChunk = vChars + } + } + // add the last chunk + if nextChunkStart < len(patterns) { + chunks = append(chunks, patterns[nextChunkStart:]) + } + return chunks, nil +} + +func callDriverOnChunks(driver driver, cfg *Config, chunks [][]string) (*DriverResponse, error) { + if len(chunks) == 0 { + return driver(cfg) + } + responses := make([]*DriverResponse, len(chunks)) + errNotHandled := errors.New("driver returned NotHandled") + var g errgroup.Group + for i, chunk := range chunks { + i := i + chunk := chunk + g.Go(func() (err error) { + responses[i], err = driver(cfg, chunk...) + if responses[i] != nil && responses[i].NotHandled { + err = errNotHandled + } + return err + }) + } + if err := g.Wait(); err != nil { + if errors.Is(err, errNotHandled) { + return &DriverResponse{NotHandled: true}, nil + } + return nil, err + } + return mergeResponses(responses...), nil +} + +func mergeResponses(responses ...*DriverResponse) *DriverResponse { + if len(responses) == 0 { + return nil + } + response := newDeduper() + response.dr.NotHandled = false + response.dr.Compiler = responses[0].Compiler + response.dr.Arch = responses[0].Arch + response.dr.GoVersion = responses[0].GoVersion + for _, v := range responses { + response.addAll(v) + } + return response.dr } // A Package describes a loaded Go package. @@ -1025,7 +1118,7 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { Sizes: ld.sizes, // may be nil } if lpkg.Module != nil && lpkg.Module.GoVersion != "" { - typesinternal.SetGoVersion(tc, "go"+lpkg.Module.GoVersion) + tc.GoVersion = "go" + lpkg.Module.GoVersion } if (ld.Mode & typecheckCgo) != 0 { if !typesinternal.SetUsesCgo(tc) { @@ -1036,10 +1129,24 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { return } } - types.NewChecker(tc, ld.Fset, lpkg.Types, lpkg.TypesInfo).Files(lpkg.Syntax) + typErr := types.NewChecker(tc, ld.Fset, lpkg.Types, lpkg.TypesInfo).Files(lpkg.Syntax) lpkg.importErrors = nil // no longer needed + // In go/types go1.21 and go1.22, Checker.Files failed fast with a + // a "too new" error, without calling tc.Error and without + // proceeding to type-check the package (#66525). + // We rely on the runtimeVersion error to give the suggested remedy. + if typErr != nil && len(lpkg.Errors) == 0 && len(lpkg.Syntax) > 0 { + if msg := typErr.Error(); strings.HasPrefix(msg, "package requires newer Go version") { + appendError(types.Error{ + Fset: ld.Fset, + Pos: lpkg.Syntax[0].Package, + Msg: msg, + }) + } + } + // If !Cgo, the type-checker uses FakeImportC mode, so // it doesn't invoke the importer for import "C", // nor report an error for the import, @@ -1061,6 +1168,12 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { } } + // If types.Checker.Files had an error that was unreported, + // make sure to report the unknown error so the package is illTyped. + if typErr != nil && len(lpkg.Errors) == 0 { + appendError(typErr) + } + // Record accumulated errors. illTyped := len(lpkg.Errors) > 0 if !illTyped { diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go index 11d5c8c3adf1..a2386c347a25 100644 --- a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go +++ b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go @@ -29,9 +29,12 @@ import ( "strconv" "strings" - "golang.org/x/tools/internal/typeparams" + "golang.org/x/tools/internal/aliases" + "golang.org/x/tools/internal/typesinternal" ) +// TODO(adonovan): think about generic aliases. + // A Path is an opaque name that identifies a types.Object // relative to its package. Conceptually, the name consists of a // sequence of destructuring operations applied to the package scope @@ -223,7 +226,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { // Reject obviously non-viable cases. switch obj := obj.(type) { case *types.TypeName: - if _, ok := obj.Type().(*types.TypeParam); !ok { + if _, ok := aliases.Unalias(obj.Type()).(*types.TypeParam); !ok { // With the exception of type parameters, only package-level type names // have a path. return "", fmt.Errorf("no path for %v", obj) @@ -310,7 +313,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { } // Inspect declared methods of defined types. - if T, ok := o.Type().(*types.Named); ok { + if T, ok := aliases.Unalias(o.Type()).(*types.Named); ok { path = append(path, opType) // The method index here is always with respect // to the underlying go/types data structures, @@ -391,17 +394,12 @@ func (enc *Encoder) concreteMethod(meth *types.Func) (Path, bool) { // of objectpath will only be giving us origin methods, anyway, as referring // to instantiated methods is usually not useful. - if typeparams.OriginMethod(meth) != meth { + if meth.Origin() != meth { return "", false } - recvT := meth.Type().(*types.Signature).Recv().Type() - if ptr, ok := recvT.(*types.Pointer); ok { - recvT = ptr.Elem() - } - - named, ok := recvT.(*types.Named) - if !ok { + _, named := typesinternal.ReceiverNamed(meth.Type().(*types.Signature).Recv()) + if named == nil { return "", false } @@ -444,6 +442,8 @@ func (enc *Encoder) concreteMethod(meth *types.Func) (Path, bool) { // nil, it will be allocated as necessary. func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]bool) []byte { switch T := T.(type) { + case *aliases.Alias: + return find(obj, aliases.Unalias(T), path, seen) case *types.Basic, *types.Named: // Named types belonging to pkg were handled already, // so T must belong to another package. No path. @@ -616,6 +616,7 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { // Inv: t != nil, obj == nil + t = aliases.Unalias(t) switch code { case opElem: hasElem, ok := t.(hasElem) // Pointer, Slice, Array, Chan, Map diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases.go b/vendor/golang.org/x/tools/internal/aliases/aliases.go new file mode 100644 index 000000000000..f89112c8ee57 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/aliases/aliases.go @@ -0,0 +1,28 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package aliases + +import ( + "go/token" + "go/types" +) + +// Package aliases defines backward compatible shims +// for the types.Alias type representation added in 1.22. +// This defines placeholders for x/tools until 1.26. + +// NewAlias creates a new TypeName in Package pkg that +// is an alias for the type rhs. +// +// When GoVersion>=1.22 and GODEBUG=gotypesalias=1, +// the Type() of the return value is a *types.Alias. +func NewAlias(pos token.Pos, pkg *types.Package, name string, rhs types.Type) *types.TypeName { + if enabled() { + tname := types.NewTypeName(pos, pkg, name, nil) + newAlias(tname, rhs) + return tname + } + return types.NewTypeName(pos, pkg, name, rhs) +} diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go b/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go new file mode 100644 index 000000000000..1872b56ff8fc --- /dev/null +++ b/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go @@ -0,0 +1,30 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.22 +// +build !go1.22 + +package aliases + +import ( + "go/types" +) + +// Alias is a placeholder for a go/types.Alias for <=1.21. +// It will never be created by go/types. +type Alias struct{} + +func (*Alias) String() string { panic("unreachable") } + +func (*Alias) Underlying() types.Type { panic("unreachable") } + +func (*Alias) Obj() *types.TypeName { panic("unreachable") } + +// Unalias returns the type t for go <=1.21. +func Unalias(t types.Type) types.Type { return t } + +// Always false for go <=1.21. Ignores GODEBUG. +func enabled() bool { return false } + +func newAlias(name *types.TypeName, rhs types.Type) *Alias { panic("unreachable") } diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go b/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go new file mode 100644 index 000000000000..8b92116284d0 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go @@ -0,0 +1,72 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.22 +// +build go1.22 + +package aliases + +import ( + "go/ast" + "go/parser" + "go/token" + "go/types" + "os" + "strings" + "sync" +) + +// Alias is an alias of types.Alias. +type Alias = types.Alias + +// Unalias is a wrapper of types.Unalias. +func Unalias(t types.Type) types.Type { return types.Unalias(t) } + +// newAlias is an internal alias around types.NewAlias. +// Direct usage is discouraged as the moment. +// Try to use NewAlias instead. +func newAlias(tname *types.TypeName, rhs types.Type) *Alias { + a := types.NewAlias(tname, rhs) + // TODO(go.dev/issue/65455): Remove kludgy workaround to set a.actual as a side-effect. + Unalias(a) + return a +} + +// enabled returns true when types.Aliases are enabled. +func enabled() bool { + // Use the gotypesalias value in GODEBUG if set. + godebug := os.Getenv("GODEBUG") + value := -1 // last set value. + for _, f := range strings.Split(godebug, ",") { + switch f { + case "gotypesalias=1": + value = 1 + case "gotypesalias=0": + value = 0 + } + } + switch value { + case 0: + return false + case 1: + return true + default: + return aliasesDefault() + } +} + +// aliasesDefault reports if aliases are enabled by default. +func aliasesDefault() bool { + // Dynamically check if Aliases will be produced from go/types. + aliasesDefaultOnce.Do(func() { + fset := token.NewFileSet() + f, _ := parser.ParseFile(fset, "a.go", "package p; type A = int", 0) + pkg, _ := new(types.Config).Check("p", fset, []*ast.File{f}, nil) + _, gotypesaliasDefault = pkg.Scope().Lookup("A").Type().(*types.Alias) + }) + return gotypesaliasDefault +} + +var gotypesaliasDefault bool +var aliasesDefaultOnce sync.Once diff --git a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go index 2d078ccb19c5..39df91124a46 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go @@ -259,13 +259,6 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func return } -func deref(typ types.Type) types.Type { - if p, _ := typ.(*types.Pointer); p != nil { - return p.Elem() - } - return typ -} - type byPath []*types.Package func (a byPath) Len() int { return len(a) } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go index 2ee8c70164f8..683bd7395a6b 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go @@ -21,8 +21,10 @@ import ( "sort" "strconv" "strings" + "unsafe" "golang.org/x/tools/go/types/objectpath" + "golang.org/x/tools/internal/aliases" "golang.org/x/tools/internal/tokeninternal" ) @@ -463,7 +465,7 @@ func (p *iexporter) doDecl(obj types.Object) { switch obj := obj.(type) { case *types.Var: - w.tag('V') + w.tag(varTag) w.pos(obj.Pos()) w.typ(obj.Type(), obj.Pkg()) @@ -481,9 +483,9 @@ func (p *iexporter) doDecl(obj types.Object) { // Function. if sig.TypeParams().Len() == 0 { - w.tag('F') + w.tag(funcTag) } else { - w.tag('G') + w.tag(genericFuncTag) } w.pos(obj.Pos()) // The tparam list of the function type is the declaration of the type @@ -499,20 +501,20 @@ func (p *iexporter) doDecl(obj types.Object) { w.signature(sig) case *types.Const: - w.tag('C') + w.tag(constTag) w.pos(obj.Pos()) w.value(obj.Type(), obj.Val()) case *types.TypeName: t := obj.Type() - if tparam, ok := t.(*types.TypeParam); ok { - w.tag('P') + if tparam, ok := aliases.Unalias(t).(*types.TypeParam); ok { + w.tag(typeParamTag) w.pos(obj.Pos()) constraint := tparam.Constraint() if p.version >= iexportVersionGo1_18 { implicit := false - if iface, _ := constraint.(*types.Interface); iface != nil { + if iface, _ := aliases.Unalias(constraint).(*types.Interface); iface != nil { implicit = iface.IsImplicit() } w.bool(implicit) @@ -522,8 +524,13 @@ func (p *iexporter) doDecl(obj types.Object) { } if obj.IsAlias() { - w.tag('A') + w.tag(aliasTag) w.pos(obj.Pos()) + if alias, ok := t.(*aliases.Alias); ok { + // Preserve materialized aliases, + // even of non-exported types. + t = aliasRHS(alias) + } w.typ(t, obj.Pkg()) break } @@ -535,9 +542,9 @@ func (p *iexporter) doDecl(obj types.Object) { } if named.TypeParams().Len() == 0 { - w.tag('T') + w.tag(typeTag) } else { - w.tag('U') + w.tag(genericTypeTag) } w.pos(obj.Pos()) @@ -547,7 +554,7 @@ func (p *iexporter) doDecl(obj types.Object) { w.tparamList(obj.Name(), named.TypeParams(), obj.Pkg()) } - underlying := obj.Type().Underlying() + underlying := named.Underlying() w.typ(underlying, obj.Pkg()) if types.IsInterface(t) { @@ -738,6 +745,11 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { }() } switch t := t.(type) { + case *aliases.Alias: + // TODO(adonovan): support parameterized aliases, following *types.Named. + w.startType(aliasType) + w.qualifiedType(t.Obj()) + case *types.Named: if targs := t.TypeArgs(); targs.Len() > 0 { w.startType(instanceType) @@ -843,7 +855,7 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { for i := 0; i < n; i++ { ft := t.EmbeddedType(i) tPkg := pkg - if named, _ := ft.(*types.Named); named != nil { + if named, _ := aliases.Unalias(ft).(*types.Named); named != nil { w.pos(named.Obj().Pos()) } else { w.pos(token.NoPos) @@ -1319,3 +1331,19 @@ func (e internalError) Error() string { return "gcimporter: " + string(e) } func internalErrorf(format string, args ...interface{}) error { return internalError(fmt.Sprintf(format, args...)) } + +// aliasRHS removes exactly one Alias constructor. +func aliasRHS(alias *aliases.Alias) types.Type { + // TODO(adonovan): if proposal #66559 is accepted, this will + // become Alias.RHS(alias). In the meantime, we must punch + // through the drywall. + type go123Alias struct { + _ *types.TypeName + _ *types.TypeParamList + RHS types.Type + _ types.Type + } + var raw *go123Alias + *(**aliases.Alias)(unsafe.Pointer(&raw)) = alias + return raw.RHS +} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go index 9fffa9ad05cb..2732121b5efa 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go @@ -22,6 +22,8 @@ import ( "strings" "golang.org/x/tools/go/types/objectpath" + "golang.org/x/tools/internal/aliases" + "golang.org/x/tools/internal/typesinternal" ) type intReader struct { @@ -78,6 +80,20 @@ const ( typeParamType instanceType unionType + aliasType +) + +// Object tags +const ( + varTag = 'V' + funcTag = 'F' + genericFuncTag = 'G' + constTag = 'C' + aliasTag = 'A' + genericAliasTag = 'B' + typeParamTag = 'P' + typeTag = 'T' + genericTypeTag = 'U' ) // IImportData imports a package from the serialized package data @@ -322,7 +338,7 @@ func iimportCommon(fset *token.FileSet, getPackages GetPackagesFunc, data []byte } // SetConstraint can't be called if the constraint type is not yet complete. - // When type params are created in the 'P' case of (*importReader).obj(), + // When type params are created in the typeParamTag case of (*importReader).obj(), // the associated constraint type may not be complete due to recursion. // Therefore, we defer calling SetConstraint there, and call it here instead // after all types are complete. @@ -522,7 +538,7 @@ func canReuse(def *types.Named, rhs types.Type) bool { if def == nil { return true } - iface, _ := rhs.(*types.Interface) + iface, _ := aliases.Unalias(rhs).(*types.Interface) if iface == nil { return true } @@ -544,25 +560,29 @@ func (r *importReader) obj(name string) { pos := r.pos() switch tag { - case 'A': + case aliasTag: typ := r.typ() - - r.declare(types.NewTypeName(pos, r.currPkg, name, typ)) - - case 'C': + // TODO(adonovan): support generic aliases: + // if tag == genericAliasTag { + // tparams := r.tparamList() + // alias.SetTypeParams(tparams) + // } + r.declare(aliases.NewAlias(pos, r.currPkg, name, typ)) + + case constTag: typ, val := r.value() r.declare(types.NewConst(pos, r.currPkg, name, typ, val)) - case 'F', 'G': + case funcTag, genericFuncTag: var tparams []*types.TypeParam - if tag == 'G' { + if tag == genericFuncTag { tparams = r.tparamList() } sig := r.signature(nil, nil, tparams) r.declare(types.NewFunc(pos, r.currPkg, name, sig)) - case 'T', 'U': + case typeTag, genericTypeTag: // Types can be recursive. We need to setup a stub // declaration before recursing. obj := types.NewTypeName(pos, r.currPkg, name, nil) @@ -570,7 +590,7 @@ func (r *importReader) obj(name string) { // Declare obj before calling r.tparamList, so the new type name is recognized // if used in the constraint of one of its own typeparams (see #48280). r.declare(obj) - if tag == 'U' { + if tag == genericTypeTag { tparams := r.tparamList() named.SetTypeParams(tparams) } @@ -587,14 +607,13 @@ func (r *importReader) obj(name string) { // If the receiver has any targs, set those as the // rparams of the method (since those are the // typeparams being used in the method sig/body). - base := baseType(recv.Type()) - assert(base != nil) - targs := base.TypeArgs() + _, recvNamed := typesinternal.ReceiverNamed(recv) + targs := recvNamed.TypeArgs() var rparams []*types.TypeParam if targs.Len() > 0 { rparams = make([]*types.TypeParam, targs.Len()) for i := range rparams { - rparams[i] = targs.At(i).(*types.TypeParam) + rparams[i] = aliases.Unalias(targs.At(i)).(*types.TypeParam) } } msig := r.signature(recv, rparams, nil) @@ -603,7 +622,7 @@ func (r *importReader) obj(name string) { } } - case 'P': + case typeParamTag: // We need to "declare" a typeparam in order to have a name that // can be referenced recursively (if needed) in the type param's // bound. @@ -624,7 +643,7 @@ func (r *importReader) obj(name string) { } constraint := r.typ() if implicit { - iface, _ := constraint.(*types.Interface) + iface, _ := aliases.Unalias(constraint).(*types.Interface) if iface == nil { errorf("non-interface constraint marked implicit") } @@ -636,7 +655,7 @@ func (r *importReader) obj(name string) { // completely set up all types in ImportData. r.p.later = append(r.p.later, setConstraintArgs{t: t, constraint: constraint}) - case 'V': + case varTag: typ := r.typ() r.declare(types.NewVar(pos, r.currPkg, name, typ)) @@ -831,7 +850,7 @@ func (r *importReader) typ() types.Type { } func isInterface(t types.Type) bool { - _, ok := t.(*types.Interface) + _, ok := aliases.Unalias(t).(*types.Interface) return ok } @@ -853,7 +872,7 @@ func (r *importReader) doType(base *types.Named) (res types.Type) { errorf("unexpected kind tag in %q: %v", r.p.ipath, k) return nil - case definedType: + case aliasType, definedType: pkg, name := r.qualifiedIdent() r.p.doDecl(pkg, name) return pkg.Scope().Lookup(name).(*types.TypeName).Type() @@ -1030,7 +1049,7 @@ func (r *importReader) tparamList() []*types.TypeParam { for i := range xs { // Note: the standard library importer is tolerant of nil types here, // though would panic in SetTypeParams. - xs[i] = r.typ().(*types.TypeParam) + xs[i] = aliases.Unalias(r.typ()).(*types.TypeParam) } return xs } @@ -1077,13 +1096,3 @@ func (r *importReader) byte() byte { } return x } - -func baseType(typ types.Type) *types.Named { - // pointer receivers are never types.Named types - if p, _ := typ.(*types.Pointer); p != nil { - typ = p.Elem() - } - // receiver base types are always (possibly generic) types.Named types - n, _ := typ.(*types.Named) - return n -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/support_go117.go b/vendor/golang.org/x/tools/internal/gcimporter/support_go117.go deleted file mode 100644 index d892273efb61..000000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/support_go117.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.18 -// +build !go1.18 - -package gcimporter - -import "go/types" - -const iexportVersion = iexportVersionGo1_11 - -func additionalPredeclared() []types.Type { - return nil -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go b/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go index edbe6ea7041d..0cd3b91b65ad 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go @@ -2,9 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.18 -// +build go1.18 - package gcimporter import "go/types" diff --git a/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go b/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go index 286bf445483d..38b624cadab6 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !(go1.18 && goexperiment.unified) -// +build !go1.18 !goexperiment.unified +//go:build !goexperiment.unified +// +build !goexperiment.unified package gcimporter diff --git a/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go index b5d69ffbe682..b5118d0b3a50 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.18 && goexperiment.unified -// +build go1.18,goexperiment.unified +//go:build goexperiment.unified +// +build goexperiment.unified package gcimporter diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_no.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_no.go deleted file mode 100644 index 8eb20729c2ad..000000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/ureader_no.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.18 -// +build !go1.18 - -package gcimporter - -import ( - "fmt" - "go/token" - "go/types" -) - -func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) { - err = fmt.Errorf("go/tools compiled with a Go version earlier than 1.18 cannot read unified IR export data") - return -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go index b977435f626d..b3be452ae8a4 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go @@ -4,9 +4,6 @@ // Derived from go/internal/gcimporter/ureader.go -//go:build go1.18 -// +build go1.18 - package gcimporter import ( @@ -16,6 +13,7 @@ import ( "sort" "strings" + "golang.org/x/tools/internal/aliases" "golang.org/x/tools/internal/pkgbits" ) @@ -526,7 +524,7 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { case pkgbits.ObjAlias: pos := r.pos() typ := r.typ() - declare(types.NewTypeName(pos, objPkg, objName, typ)) + declare(aliases.NewAlias(pos, objPkg, objName, typ)) case pkgbits.ObjConst: pos := r.pos() @@ -553,7 +551,7 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { // If the underlying type is an interface, we need to // duplicate its methods so we can replace the receiver // parameter's type (#49906). - if iface, ok := underlying.(*types.Interface); ok && iface.NumExplicitMethods() != 0 { + if iface, ok := aliases.Unalias(underlying).(*types.Interface); ok && iface.NumExplicitMethods() != 0 { methods := make([]*types.Func, iface.NumExplicitMethods()) for i := range methods { fn := iface.ExplicitMethod(i) diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/vendor/golang.org/x/tools/internal/gocommand/invoke.go index 55312522dc2d..f7de3c8283b2 100644 --- a/vendor/golang.org/x/tools/internal/gocommand/invoke.go +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke.go @@ -158,12 +158,15 @@ type Invocation struct { BuildFlags []string // If ModFlag is set, the go command is invoked with -mod=ModFlag. + // TODO(rfindley): remove, in favor of Args. ModFlag string // If ModFile is set, the go command is invoked with -modfile=ModFile. + // TODO(rfindley): remove, in favor of Args. ModFile string // If Overlay is set, the go command is invoked with -overlay=Overlay. + // TODO(rfindley): remove, in favor of Args. Overlay string // If CleanEnv is set, the invocation will run only with the environment diff --git a/vendor/golang.org/x/tools/internal/imports/fix.go b/vendor/golang.org/x/tools/internal/imports/fix.go index 6a18f63a44dc..55980327616e 100644 --- a/vendor/golang.org/x/tools/internal/imports/fix.go +++ b/vendor/golang.org/x/tools/internal/imports/fix.go @@ -31,6 +31,7 @@ import ( "golang.org/x/tools/internal/event" "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/gopathwalk" + "golang.org/x/tools/internal/stdlib" ) // importToGroup is a list of functions which map from an import path to @@ -511,9 +512,9 @@ func (p *pass) assumeSiblingImportsValid() { } for left, rights := range refs { if imp, ok := importsByName[left]; ok { - if m, ok := stdlib[imp.ImportPath]; ok { + if m, ok := stdlib.PackageSymbols[imp.ImportPath]; ok { // We have the stdlib in memory; no need to guess. - rights = copyExports(m) + rights = symbolNameSet(m) } p.addCandidate(imp, &packageInfo{ // no name; we already know it. @@ -641,7 +642,7 @@ func getCandidatePkgs(ctx context.Context, wrappedCallback *scanCallback, filena dupCheck := map[string]struct{}{} // Start off with the standard library. - for importPath, exports := range stdlib { + for importPath, symbols := range stdlib.PackageSymbols { p := &pkg{ dir: filepath.Join(goenv["GOROOT"], "src", importPath), importPathShort: importPath, @@ -650,6 +651,13 @@ func getCandidatePkgs(ctx context.Context, wrappedCallback *scanCallback, filena } dupCheck[importPath] = struct{}{} if notSelf(p) && wrappedCallback.dirFound(p) && wrappedCallback.packageNameLoaded(p) { + var exports []stdlib.Symbol + for _, sym := range symbols { + switch sym.Kind { + case stdlib.Func, stdlib.Type, stdlib.Var, stdlib.Const: + exports = append(exports, sym) + } + } wrappedCallback.exportsLoaded(p, exports) } } @@ -670,7 +678,7 @@ func getCandidatePkgs(ctx context.Context, wrappedCallback *scanCallback, filena dupCheck[pkg.importPathShort] = struct{}{} return notSelf(pkg) && wrappedCallback.packageNameLoaded(pkg) }, - exportsLoaded: func(pkg *pkg, exports []string) { + exportsLoaded: func(pkg *pkg, exports []stdlib.Symbol) { // If we're an x_test, load the package under test's test variant. if strings.HasSuffix(filePkg, "_test") && pkg.dir == filepath.Dir(filename) { var err error @@ -795,7 +803,7 @@ func GetImportPaths(ctx context.Context, wrapped func(ImportFix), searchPrefix, // A PackageExport is a package and its exports. type PackageExport struct { Fix *ImportFix - Exports []string + Exports []stdlib.Symbol } // GetPackageExports returns all known packages with name pkg and their exports. @@ -810,8 +818,8 @@ func GetPackageExports(ctx context.Context, wrapped func(PackageExport), searchP packageNameLoaded: func(pkg *pkg) bool { return pkg.packageName == searchPkg }, - exportsLoaded: func(pkg *pkg, exports []string) { - sort.Strings(exports) + exportsLoaded: func(pkg *pkg, exports []stdlib.Symbol) { + sortSymbols(exports) wrapped(PackageExport{ Fix: &ImportFix{ StmtInfo: ImportInfo{ @@ -988,8 +996,10 @@ func (e *ProcessEnv) GetResolver() (Resolver, error) { // already know the view type. if len(e.Env["GOMOD"]) == 0 && len(e.Env["GOWORK"]) == 0 { e.resolver = newGopathResolver(e) + } else if r, err := newModuleResolver(e, e.ModCache); err != nil { + e.resolverErr = err } else { - e.resolver, e.resolverErr = newModuleResolver(e, e.ModCache) + e.resolver = Resolver(r) } } @@ -1054,7 +1064,7 @@ func addStdlibCandidates(pass *pass, refs references) error { if path.Base(pkg) == pass.f.Name.Name && filepath.Join(goenv["GOROOT"], "src", pkg) == pass.srcDir { return } - exports := copyExports(stdlib[pkg]) + exports := symbolNameSet(stdlib.PackageSymbols[pkg]) pass.addCandidate( &ImportInfo{ImportPath: pkg}, &packageInfo{name: path.Base(pkg), exports: exports}) @@ -1066,7 +1076,7 @@ func addStdlibCandidates(pass *pass, refs references) error { add("math/rand") continue } - for importPath := range stdlib { + for importPath := range stdlib.PackageSymbols { if path.Base(importPath) == left { add(importPath) } @@ -1085,7 +1095,7 @@ type Resolver interface { // loadExports returns the set of exported symbols in the package at dir. // loadExports may be called concurrently. - loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []string, error) + loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []stdlib.Symbol, error) // scoreImportPath returns the relevance for an import path. scoreImportPath(ctx context.Context, path string) float64 @@ -1114,7 +1124,7 @@ type scanCallback struct { // If it returns true, the package's exports will be loaded. packageNameLoaded func(pkg *pkg) bool // exportsLoaded is called when a package's exports have been loaded. - exportsLoaded func(pkg *pkg, exports []string) + exportsLoaded func(pkg *pkg, exports []stdlib.Symbol) } func addExternalCandidates(ctx context.Context, pass *pass, refs references, filename string) error { @@ -1295,7 +1305,7 @@ func (r *gopathResolver) loadPackageNames(importPaths []string, srcDir string) ( // importPathToName finds out the actual package name, as declared in its .go files. func importPathToName(bctx *build.Context, importPath, srcDir string) string { // Fast path for standard library without going to disk. - if _, ok := stdlib[importPath]; ok { + if stdlib.HasPackage(importPath) { return path.Base(importPath) // stdlib packages always match their paths. } @@ -1493,7 +1503,7 @@ func (r *gopathResolver) scan(ctx context.Context, callback *scanCallback) error } func (r *gopathResolver) scoreImportPath(ctx context.Context, path string) float64 { - if _, ok := stdlib[path]; ok { + if stdlib.HasPackage(path) { return MaxRelevance } return MaxRelevance - 1 @@ -1510,7 +1520,7 @@ func filterRoots(roots []gopathwalk.Root, include func(gopathwalk.Root) bool) [] return result } -func (r *gopathResolver) loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []string, error) { +func (r *gopathResolver) loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []stdlib.Symbol, error) { if info, ok := r.cache.Load(pkg.dir); ok && !includeTest { return r.cache.CacheExports(ctx, r.env, info) } @@ -1530,7 +1540,7 @@ func VendorlessPath(ipath string) string { return ipath } -func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, includeTest bool) (string, []string, error) { +func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, includeTest bool) (string, []stdlib.Symbol, error) { // Look for non-test, buildable .go files which could provide exports. all, err := os.ReadDir(dir) if err != nil { @@ -1554,7 +1564,7 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, incl } var pkgName string - var exports []string + var exports []stdlib.Symbol fset := token.NewFileSet() for _, fi := range files { select { @@ -1581,21 +1591,41 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, incl continue } pkgName = f.Name.Name - for name := range f.Scope.Objects { + for name, obj := range f.Scope.Objects { if ast.IsExported(name) { - exports = append(exports, name) + var kind stdlib.Kind + switch obj.Kind { + case ast.Con: + kind = stdlib.Const + case ast.Typ: + kind = stdlib.Type + case ast.Var: + kind = stdlib.Var + case ast.Fun: + kind = stdlib.Func + } + exports = append(exports, stdlib.Symbol{ + Name: name, + Kind: kind, + Version: 0, // unknown; be permissive + }) } } } + sortSymbols(exports) if env.Logf != nil { - sortedExports := append([]string(nil), exports...) - sort.Strings(sortedExports) - env.Logf("loaded exports in dir %v (package %v): %v", dir, pkgName, strings.Join(sortedExports, ", ")) + env.Logf("loaded exports in dir %v (package %v): %v", dir, pkgName, exports) } return pkgName, exports, nil } +func sortSymbols(syms []stdlib.Symbol) { + sort.Slice(syms, func(i, j int) bool { + return syms[i].Name < syms[j].Name + }) +} + // findImport searches for a package with the given symbols. // If no package is found, findImport returns ("", false, nil) func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgName string, symbols map[string]bool) (*pkg, error) { @@ -1662,7 +1692,7 @@ func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgNa exportsMap := make(map[string]bool, len(exports)) for _, sym := range exports { - exportsMap[sym] = true + exportsMap[sym.Name] = true } // If it doesn't have the right @@ -1820,10 +1850,13 @@ func (fn visitFn) Visit(node ast.Node) ast.Visitor { return fn(node) } -func copyExports(pkg []string) map[string]bool { - m := make(map[string]bool, len(pkg)) - for _, v := range pkg { - m[v] = true +func symbolNameSet(symbols []stdlib.Symbol) map[string]bool { + names := make(map[string]bool) + for _, sym := range symbols { + switch sym.Kind { + case stdlib.Const, stdlib.Var, stdlib.Type, stdlib.Func: + names[sym.Name] = true + } } - return m + return names } diff --git a/vendor/golang.org/x/tools/internal/imports/imports.go b/vendor/golang.org/x/tools/internal/imports/imports.go index 660407548e5a..f83465520a45 100644 --- a/vendor/golang.org/x/tools/internal/imports/imports.go +++ b/vendor/golang.org/x/tools/internal/imports/imports.go @@ -2,8 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:generate go run mkstdlib.go - // Package imports implements a Go pretty-printer (like package "go/format") // that also adds or removes import statements as necessary. package imports @@ -109,7 +107,7 @@ func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, e } // formatFile formats the file syntax tree. -// It may mutate the token.FileSet. +// It may mutate the token.FileSet and the ast.File. // // If an adjust function is provided, it is called after formatting // with the original source (formatFile's src parameter) and the diff --git a/vendor/golang.org/x/tools/internal/imports/mod.go b/vendor/golang.org/x/tools/internal/imports/mod.go index 3d0f38f6c231..21ef938978e1 100644 --- a/vendor/golang.org/x/tools/internal/imports/mod.go +++ b/vendor/golang.org/x/tools/internal/imports/mod.go @@ -21,6 +21,7 @@ import ( "golang.org/x/tools/internal/event" "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/gopathwalk" + "golang.org/x/tools/internal/stdlib" ) // Notes(rfindley): ModuleResolver appears to be heavily optimized for scanning @@ -313,15 +314,19 @@ func (r *ModuleResolver) ClearForNewScan() Resolver { // TODO(rfindley): move this to a new env.go, consolidating ProcessEnv methods. func (e *ProcessEnv) ClearModuleInfo() { if r, ok := e.resolver.(*ModuleResolver); ok { - resolver, resolverErr := newModuleResolver(e, e.ModCache) - if resolverErr == nil { - <-r.scanSema // acquire (guards caches) - resolver.moduleCacheCache = r.moduleCacheCache - resolver.otherCache = r.otherCache - r.scanSema <- struct{}{} // release + resolver, err := newModuleResolver(e, e.ModCache) + if err != nil { + e.resolver = nil + e.resolverErr = err + return } - e.resolver = resolver - e.resolverErr = resolverErr + + <-r.scanSema // acquire (guards caches) + resolver.moduleCacheCache = r.moduleCacheCache + resolver.otherCache = r.otherCache + r.scanSema <- struct{}{} // release + + e.UpdateResolver(resolver) } } @@ -412,7 +417,7 @@ func (r *ModuleResolver) cachePackageName(info directoryPackageInfo) (string, er return r.otherCache.CachePackageName(info) } -func (r *ModuleResolver) cacheExports(ctx context.Context, env *ProcessEnv, info directoryPackageInfo) (string, []string, error) { +func (r *ModuleResolver) cacheExports(ctx context.Context, env *ProcessEnv, info directoryPackageInfo) (string, []stdlib.Symbol, error) { if info.rootType == gopathwalk.RootModuleCache { return r.moduleCacheCache.CacheExports(ctx, env, info) } @@ -632,7 +637,7 @@ func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error } func (r *ModuleResolver) scoreImportPath(ctx context.Context, path string) float64 { - if _, ok := stdlib[path]; ok { + if stdlib.HasPackage(path) { return MaxRelevance } mod, _ := r.findPackage(path) @@ -710,7 +715,7 @@ func (r *ModuleResolver) canonicalize(info directoryPackageInfo) (*pkg, error) { return res, nil } -func (r *ModuleResolver) loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []string, error) { +func (r *ModuleResolver) loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []stdlib.Symbol, error) { if info, ok := r.cacheLoad(pkg.dir); ok && !includeTest { return r.cacheExports(ctx, r.env, info) } diff --git a/vendor/golang.org/x/tools/internal/imports/mod_cache.go b/vendor/golang.org/x/tools/internal/imports/mod_cache.go index cfc54657656d..b1192696b28e 100644 --- a/vendor/golang.org/x/tools/internal/imports/mod_cache.go +++ b/vendor/golang.org/x/tools/internal/imports/mod_cache.go @@ -14,6 +14,7 @@ import ( "golang.org/x/mod/module" "golang.org/x/tools/internal/gopathwalk" + "golang.org/x/tools/internal/stdlib" ) // To find packages to import, the resolver needs to know about all of @@ -73,7 +74,7 @@ type directoryPackageInfo struct { // the default build context GOOS and GOARCH. // // We can make this explicit, and key exports by GOOS, GOARCH. - exports []string + exports []stdlib.Symbol } // reachedStatus returns true when info has a status at least target and any error associated with @@ -229,7 +230,7 @@ func (d *DirInfoCache) CachePackageName(info directoryPackageInfo) (string, erro return info.packageName, info.err } -func (d *DirInfoCache) CacheExports(ctx context.Context, env *ProcessEnv, info directoryPackageInfo) (string, []string, error) { +func (d *DirInfoCache) CacheExports(ctx context.Context, env *ProcessEnv, info directoryPackageInfo) (string, []stdlib.Symbol, error) { if reached, _ := info.reachedStatus(exportsLoaded); reached { return info.packageName, info.exports, info.err } diff --git a/vendor/golang.org/x/tools/internal/imports/sortimports.go b/vendor/golang.org/x/tools/internal/imports/sortimports.go index 1a0a7ebd9e4d..da8194fd965b 100644 --- a/vendor/golang.org/x/tools/internal/imports/sortimports.go +++ b/vendor/golang.org/x/tools/internal/imports/sortimports.go @@ -18,7 +18,7 @@ import ( // sortImports sorts runs of consecutive import lines in import blocks in f. // It also removes duplicate imports when it is possible to do so without data loss. // -// It may mutate the token.File. +// It may mutate the token.File and the ast.File. func sortImports(localPrefix string, tokFile *token.File, f *ast.File) { for i, d := range f.Decls { d, ok := d.(*ast.GenDecl) diff --git a/vendor/golang.org/x/tools/internal/imports/zstdlib.go b/vendor/golang.org/x/tools/internal/imports/zstdlib.go deleted file mode 100644 index 8db24df2ff46..000000000000 --- a/vendor/golang.org/x/tools/internal/imports/zstdlib.go +++ /dev/null @@ -1,11406 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by mkstdlib.go. DO NOT EDIT. - -package imports - -var stdlib = map[string][]string{ - "archive/tar": { - "ErrFieldTooLong", - "ErrHeader", - "ErrInsecurePath", - "ErrWriteAfterClose", - "ErrWriteTooLong", - "FileInfoHeader", - "Format", - "FormatGNU", - "FormatPAX", - "FormatUSTAR", - "FormatUnknown", - "Header", - "NewReader", - "NewWriter", - "Reader", - "TypeBlock", - "TypeChar", - "TypeCont", - "TypeDir", - "TypeFifo", - "TypeGNULongLink", - "TypeGNULongName", - "TypeGNUSparse", - "TypeLink", - "TypeReg", - "TypeRegA", - "TypeSymlink", - "TypeXGlobalHeader", - "TypeXHeader", - "Writer", - }, - "archive/zip": { - "Compressor", - "Decompressor", - "Deflate", - "ErrAlgorithm", - "ErrChecksum", - "ErrFormat", - "ErrInsecurePath", - "File", - "FileHeader", - "FileInfoHeader", - "NewReader", - "NewWriter", - "OpenReader", - "ReadCloser", - "Reader", - "RegisterCompressor", - "RegisterDecompressor", - "Store", - "Writer", - }, - "bufio": { - "ErrAdvanceTooFar", - "ErrBadReadCount", - "ErrBufferFull", - "ErrFinalToken", - "ErrInvalidUnreadByte", - "ErrInvalidUnreadRune", - "ErrNegativeAdvance", - "ErrNegativeCount", - "ErrTooLong", - "MaxScanTokenSize", - "NewReadWriter", - "NewReader", - "NewReaderSize", - "NewScanner", - "NewWriter", - "NewWriterSize", - "ReadWriter", - "Reader", - "ScanBytes", - "ScanLines", - "ScanRunes", - "ScanWords", - "Scanner", - "SplitFunc", - "Writer", - }, - "bytes": { - "Buffer", - "Clone", - "Compare", - "Contains", - "ContainsAny", - "ContainsFunc", - "ContainsRune", - "Count", - "Cut", - "CutPrefix", - "CutSuffix", - "Equal", - "EqualFold", - "ErrTooLarge", - "Fields", - "FieldsFunc", - "HasPrefix", - "HasSuffix", - "Index", - "IndexAny", - "IndexByte", - "IndexFunc", - "IndexRune", - "Join", - "LastIndex", - "LastIndexAny", - "LastIndexByte", - "LastIndexFunc", - "Map", - "MinRead", - "NewBuffer", - "NewBufferString", - "NewReader", - "Reader", - "Repeat", - "Replace", - "ReplaceAll", - "Runes", - "Split", - "SplitAfter", - "SplitAfterN", - "SplitN", - "Title", - "ToLower", - "ToLowerSpecial", - "ToTitle", - "ToTitleSpecial", - "ToUpper", - "ToUpperSpecial", - "ToValidUTF8", - "Trim", - "TrimFunc", - "TrimLeft", - "TrimLeftFunc", - "TrimPrefix", - "TrimRight", - "TrimRightFunc", - "TrimSpace", - "TrimSuffix", - }, - "cmp": { - "Compare", - "Less", - "Or", - "Ordered", - }, - "compress/bzip2": { - "NewReader", - "StructuralError", - }, - "compress/flate": { - "BestCompression", - "BestSpeed", - "CorruptInputError", - "DefaultCompression", - "HuffmanOnly", - "InternalError", - "NewReader", - "NewReaderDict", - "NewWriter", - "NewWriterDict", - "NoCompression", - "ReadError", - "Reader", - "Resetter", - "WriteError", - "Writer", - }, - "compress/gzip": { - "BestCompression", - "BestSpeed", - "DefaultCompression", - "ErrChecksum", - "ErrHeader", - "Header", - "HuffmanOnly", - "NewReader", - "NewWriter", - "NewWriterLevel", - "NoCompression", - "Reader", - "Writer", - }, - "compress/lzw": { - "LSB", - "MSB", - "NewReader", - "NewWriter", - "Order", - "Reader", - "Writer", - }, - "compress/zlib": { - "BestCompression", - "BestSpeed", - "DefaultCompression", - "ErrChecksum", - "ErrDictionary", - "ErrHeader", - "HuffmanOnly", - "NewReader", - "NewReaderDict", - "NewWriter", - "NewWriterLevel", - "NewWriterLevelDict", - "NoCompression", - "Resetter", - "Writer", - }, - "container/heap": { - "Fix", - "Init", - "Interface", - "Pop", - "Push", - "Remove", - }, - "container/list": { - "Element", - "List", - "New", - }, - "container/ring": { - "New", - "Ring", - }, - "context": { - "AfterFunc", - "Background", - "CancelCauseFunc", - "CancelFunc", - "Canceled", - "Cause", - "Context", - "DeadlineExceeded", - "TODO", - "WithCancel", - "WithCancelCause", - "WithDeadline", - "WithDeadlineCause", - "WithTimeout", - "WithTimeoutCause", - "WithValue", - "WithoutCancel", - }, - "crypto": { - "BLAKE2b_256", - "BLAKE2b_384", - "BLAKE2b_512", - "BLAKE2s_256", - "Decrypter", - "DecrypterOpts", - "Hash", - "MD4", - "MD5", - "MD5SHA1", - "PrivateKey", - "PublicKey", - "RIPEMD160", - "RegisterHash", - "SHA1", - "SHA224", - "SHA256", - "SHA384", - "SHA3_224", - "SHA3_256", - "SHA3_384", - "SHA3_512", - "SHA512", - "SHA512_224", - "SHA512_256", - "Signer", - "SignerOpts", - }, - "crypto/aes": { - "BlockSize", - "KeySizeError", - "NewCipher", - }, - "crypto/cipher": { - "AEAD", - "Block", - "BlockMode", - "NewCBCDecrypter", - "NewCBCEncrypter", - "NewCFBDecrypter", - "NewCFBEncrypter", - "NewCTR", - "NewGCM", - "NewGCMWithNonceSize", - "NewGCMWithTagSize", - "NewOFB", - "Stream", - "StreamReader", - "StreamWriter", - }, - "crypto/des": { - "BlockSize", - "KeySizeError", - "NewCipher", - "NewTripleDESCipher", - }, - "crypto/dsa": { - "ErrInvalidPublicKey", - "GenerateKey", - "GenerateParameters", - "L1024N160", - "L2048N224", - "L2048N256", - "L3072N256", - "ParameterSizes", - "Parameters", - "PrivateKey", - "PublicKey", - "Sign", - "Verify", - }, - "crypto/ecdh": { - "Curve", - "P256", - "P384", - "P521", - "PrivateKey", - "PublicKey", - "X25519", - }, - "crypto/ecdsa": { - "GenerateKey", - "PrivateKey", - "PublicKey", - "Sign", - "SignASN1", - "Verify", - "VerifyASN1", - }, - "crypto/ed25519": { - "GenerateKey", - "NewKeyFromSeed", - "Options", - "PrivateKey", - "PrivateKeySize", - "PublicKey", - "PublicKeySize", - "SeedSize", - "Sign", - "SignatureSize", - "Verify", - "VerifyWithOptions", - }, - "crypto/elliptic": { - "Curve", - "CurveParams", - "GenerateKey", - "Marshal", - "MarshalCompressed", - "P224", - "P256", - "P384", - "P521", - "Unmarshal", - "UnmarshalCompressed", - }, - "crypto/hmac": { - "Equal", - "New", - }, - "crypto/md5": { - "BlockSize", - "New", - "Size", - "Sum", - }, - "crypto/rand": { - "Int", - "Prime", - "Read", - "Reader", - }, - "crypto/rc4": { - "Cipher", - "KeySizeError", - "NewCipher", - }, - "crypto/rsa": { - "CRTValue", - "DecryptOAEP", - "DecryptPKCS1v15", - "DecryptPKCS1v15SessionKey", - "EncryptOAEP", - "EncryptPKCS1v15", - "ErrDecryption", - "ErrMessageTooLong", - "ErrVerification", - "GenerateKey", - "GenerateMultiPrimeKey", - "OAEPOptions", - "PKCS1v15DecryptOptions", - "PSSOptions", - "PSSSaltLengthAuto", - "PSSSaltLengthEqualsHash", - "PrecomputedValues", - "PrivateKey", - "PublicKey", - "SignPKCS1v15", - "SignPSS", - "VerifyPKCS1v15", - "VerifyPSS", - }, - "crypto/sha1": { - "BlockSize", - "New", - "Size", - "Sum", - }, - "crypto/sha256": { - "BlockSize", - "New", - "New224", - "Size", - "Size224", - "Sum224", - "Sum256", - }, - "crypto/sha512": { - "BlockSize", - "New", - "New384", - "New512_224", - "New512_256", - "Size", - "Size224", - "Size256", - "Size384", - "Sum384", - "Sum512", - "Sum512_224", - "Sum512_256", - }, - "crypto/subtle": { - "ConstantTimeByteEq", - "ConstantTimeCompare", - "ConstantTimeCopy", - "ConstantTimeEq", - "ConstantTimeLessOrEq", - "ConstantTimeSelect", - "XORBytes", - }, - "crypto/tls": { - "AlertError", - "Certificate", - "CertificateRequestInfo", - "CertificateVerificationError", - "CipherSuite", - "CipherSuiteName", - "CipherSuites", - "Client", - "ClientAuthType", - "ClientHelloInfo", - "ClientSessionCache", - "ClientSessionState", - "Config", - "Conn", - "ConnectionState", - "CurveID", - "CurveP256", - "CurveP384", - "CurveP521", - "Dial", - "DialWithDialer", - "Dialer", - "ECDSAWithP256AndSHA256", - "ECDSAWithP384AndSHA384", - "ECDSAWithP521AndSHA512", - "ECDSAWithSHA1", - "Ed25519", - "InsecureCipherSuites", - "Listen", - "LoadX509KeyPair", - "NewLRUClientSessionCache", - "NewListener", - "NewResumptionState", - "NoClientCert", - "PKCS1WithSHA1", - "PKCS1WithSHA256", - "PKCS1WithSHA384", - "PKCS1WithSHA512", - "PSSWithSHA256", - "PSSWithSHA384", - "PSSWithSHA512", - "ParseSessionState", - "QUICClient", - "QUICConfig", - "QUICConn", - "QUICEncryptionLevel", - "QUICEncryptionLevelApplication", - "QUICEncryptionLevelEarly", - "QUICEncryptionLevelHandshake", - "QUICEncryptionLevelInitial", - "QUICEvent", - "QUICEventKind", - "QUICHandshakeDone", - "QUICNoEvent", - "QUICRejectedEarlyData", - "QUICServer", - "QUICSessionTicketOptions", - "QUICSetReadSecret", - "QUICSetWriteSecret", - "QUICTransportParameters", - "QUICTransportParametersRequired", - "QUICWriteData", - "RecordHeaderError", - "RenegotiateFreelyAsClient", - "RenegotiateNever", - "RenegotiateOnceAsClient", - "RenegotiationSupport", - "RequestClientCert", - "RequireAndVerifyClientCert", - "RequireAnyClientCert", - "Server", - "SessionState", - "SignatureScheme", - "TLS_AES_128_GCM_SHA256", - "TLS_AES_256_GCM_SHA384", - "TLS_CHACHA20_POLY1305_SHA256", - "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", - "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", - "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", - "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", - "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", - "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", - "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", - "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", - "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", - "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", - "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", - "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", - "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", - "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", - "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", - "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", - "TLS_ECDHE_RSA_WITH_RC4_128_SHA", - "TLS_FALLBACK_SCSV", - "TLS_RSA_WITH_3DES_EDE_CBC_SHA", - "TLS_RSA_WITH_AES_128_CBC_SHA", - "TLS_RSA_WITH_AES_128_CBC_SHA256", - "TLS_RSA_WITH_AES_128_GCM_SHA256", - "TLS_RSA_WITH_AES_256_CBC_SHA", - "TLS_RSA_WITH_AES_256_GCM_SHA384", - "TLS_RSA_WITH_RC4_128_SHA", - "VerifyClientCertIfGiven", - "VersionName", - "VersionSSL30", - "VersionTLS10", - "VersionTLS11", - "VersionTLS12", - "VersionTLS13", - "X25519", - "X509KeyPair", - }, - "crypto/x509": { - "CANotAuthorizedForExtKeyUsage", - "CANotAuthorizedForThisName", - "CertPool", - "Certificate", - "CertificateInvalidError", - "CertificateRequest", - "ConstraintViolationError", - "CreateCertificate", - "CreateCertificateRequest", - "CreateRevocationList", - "DSA", - "DSAWithSHA1", - "DSAWithSHA256", - "DecryptPEMBlock", - "ECDSA", - "ECDSAWithSHA1", - "ECDSAWithSHA256", - "ECDSAWithSHA384", - "ECDSAWithSHA512", - "Ed25519", - "EncryptPEMBlock", - "ErrUnsupportedAlgorithm", - "Expired", - "ExtKeyUsage", - "ExtKeyUsageAny", - "ExtKeyUsageClientAuth", - "ExtKeyUsageCodeSigning", - "ExtKeyUsageEmailProtection", - "ExtKeyUsageIPSECEndSystem", - "ExtKeyUsageIPSECTunnel", - "ExtKeyUsageIPSECUser", - "ExtKeyUsageMicrosoftCommercialCodeSigning", - "ExtKeyUsageMicrosoftKernelCodeSigning", - "ExtKeyUsageMicrosoftServerGatedCrypto", - "ExtKeyUsageNetscapeServerGatedCrypto", - "ExtKeyUsageOCSPSigning", - "ExtKeyUsageServerAuth", - "ExtKeyUsageTimeStamping", - "HostnameError", - "IncompatibleUsage", - "IncorrectPasswordError", - "InsecureAlgorithmError", - "InvalidReason", - "IsEncryptedPEMBlock", - "KeyUsage", - "KeyUsageCRLSign", - "KeyUsageCertSign", - "KeyUsageContentCommitment", - "KeyUsageDataEncipherment", - "KeyUsageDecipherOnly", - "KeyUsageDigitalSignature", - "KeyUsageEncipherOnly", - "KeyUsageKeyAgreement", - "KeyUsageKeyEncipherment", - "MD2WithRSA", - "MD5WithRSA", - "MarshalECPrivateKey", - "MarshalPKCS1PrivateKey", - "MarshalPKCS1PublicKey", - "MarshalPKCS8PrivateKey", - "MarshalPKIXPublicKey", - "NameConstraintsWithoutSANs", - "NameMismatch", - "NewCertPool", - "NotAuthorizedToSign", - "OID", - "OIDFromInts", - "PEMCipher", - "PEMCipher3DES", - "PEMCipherAES128", - "PEMCipherAES192", - "PEMCipherAES256", - "PEMCipherDES", - "ParseCRL", - "ParseCertificate", - "ParseCertificateRequest", - "ParseCertificates", - "ParseDERCRL", - "ParseECPrivateKey", - "ParsePKCS1PrivateKey", - "ParsePKCS1PublicKey", - "ParsePKCS8PrivateKey", - "ParsePKIXPublicKey", - "ParseRevocationList", - "PublicKeyAlgorithm", - "PureEd25519", - "RSA", - "RevocationList", - "RevocationListEntry", - "SHA1WithRSA", - "SHA256WithRSA", - "SHA256WithRSAPSS", - "SHA384WithRSA", - "SHA384WithRSAPSS", - "SHA512WithRSA", - "SHA512WithRSAPSS", - "SetFallbackRoots", - "SignatureAlgorithm", - "SystemCertPool", - "SystemRootsError", - "TooManyConstraints", - "TooManyIntermediates", - "UnconstrainedName", - "UnhandledCriticalExtension", - "UnknownAuthorityError", - "UnknownPublicKeyAlgorithm", - "UnknownSignatureAlgorithm", - "VerifyOptions", - }, - "crypto/x509/pkix": { - "AlgorithmIdentifier", - "AttributeTypeAndValue", - "AttributeTypeAndValueSET", - "CertificateList", - "Extension", - "Name", - "RDNSequence", - "RelativeDistinguishedNameSET", - "RevokedCertificate", - "TBSCertificateList", - }, - "database/sql": { - "ColumnType", - "Conn", - "DB", - "DBStats", - "Drivers", - "ErrConnDone", - "ErrNoRows", - "ErrTxDone", - "IsolationLevel", - "LevelDefault", - "LevelLinearizable", - "LevelReadCommitted", - "LevelReadUncommitted", - "LevelRepeatableRead", - "LevelSerializable", - "LevelSnapshot", - "LevelWriteCommitted", - "Named", - "NamedArg", - "Null", - "NullBool", - "NullByte", - "NullFloat64", - "NullInt16", - "NullInt32", - "NullInt64", - "NullString", - "NullTime", - "Open", - "OpenDB", - "Out", - "RawBytes", - "Register", - "Result", - "Row", - "Rows", - "Scanner", - "Stmt", - "Tx", - "TxOptions", - }, - "database/sql/driver": { - "Bool", - "ColumnConverter", - "Conn", - "ConnBeginTx", - "ConnPrepareContext", - "Connector", - "DefaultParameterConverter", - "Driver", - "DriverContext", - "ErrBadConn", - "ErrRemoveArgument", - "ErrSkip", - "Execer", - "ExecerContext", - "Int32", - "IsScanValue", - "IsValue", - "IsolationLevel", - "NamedValue", - "NamedValueChecker", - "NotNull", - "Null", - "Pinger", - "Queryer", - "QueryerContext", - "Result", - "ResultNoRows", - "Rows", - "RowsAffected", - "RowsColumnTypeDatabaseTypeName", - "RowsColumnTypeLength", - "RowsColumnTypeNullable", - "RowsColumnTypePrecisionScale", - "RowsColumnTypeScanType", - "RowsNextResultSet", - "SessionResetter", - "Stmt", - "StmtExecContext", - "StmtQueryContext", - "String", - "Tx", - "TxOptions", - "Validator", - "Value", - "ValueConverter", - "Valuer", - }, - "debug/buildinfo": { - "BuildInfo", - "Read", - "ReadFile", - }, - "debug/dwarf": { - "AddrType", - "ArrayType", - "Attr", - "AttrAbstractOrigin", - "AttrAccessibility", - "AttrAddrBase", - "AttrAddrClass", - "AttrAlignment", - "AttrAllocated", - "AttrArtificial", - "AttrAssociated", - "AttrBaseTypes", - "AttrBinaryScale", - "AttrBitOffset", - "AttrBitSize", - "AttrByteSize", - "AttrCallAllCalls", - "AttrCallAllSourceCalls", - "AttrCallAllTailCalls", - "AttrCallColumn", - "AttrCallDataLocation", - "AttrCallDataValue", - "AttrCallFile", - "AttrCallLine", - "AttrCallOrigin", - "AttrCallPC", - "AttrCallParameter", - "AttrCallReturnPC", - "AttrCallTailCall", - "AttrCallTarget", - "AttrCallTargetClobbered", - "AttrCallValue", - "AttrCalling", - "AttrCommonRef", - "AttrCompDir", - "AttrConstExpr", - "AttrConstValue", - "AttrContainingType", - "AttrCount", - "AttrDataBitOffset", - "AttrDataLocation", - "AttrDataMemberLoc", - "AttrDecimalScale", - "AttrDecimalSign", - "AttrDeclColumn", - "AttrDeclFile", - "AttrDeclLine", - "AttrDeclaration", - "AttrDefaultValue", - "AttrDefaulted", - "AttrDeleted", - "AttrDescription", - "AttrDigitCount", - "AttrDiscr", - "AttrDiscrList", - "AttrDiscrValue", - "AttrDwoName", - "AttrElemental", - "AttrEncoding", - "AttrEndianity", - "AttrEntrypc", - "AttrEnumClass", - "AttrExplicit", - "AttrExportSymbols", - "AttrExtension", - "AttrExternal", - "AttrFrameBase", - "AttrFriend", - "AttrHighpc", - "AttrIdentifierCase", - "AttrImport", - "AttrInline", - "AttrIsOptional", - "AttrLanguage", - "AttrLinkageName", - "AttrLocation", - "AttrLoclistsBase", - "AttrLowerBound", - "AttrLowpc", - "AttrMacroInfo", - "AttrMacros", - "AttrMainSubprogram", - "AttrMutable", - "AttrName", - "AttrNamelistItem", - "AttrNoreturn", - "AttrObjectPointer", - "AttrOrdering", - "AttrPictureString", - "AttrPriority", - "AttrProducer", - "AttrPrototyped", - "AttrPure", - "AttrRanges", - "AttrRank", - "AttrRecursive", - "AttrReference", - "AttrReturnAddr", - "AttrRnglistsBase", - "AttrRvalueReference", - "AttrSegment", - "AttrSibling", - "AttrSignature", - "AttrSmall", - "AttrSpecification", - "AttrStartScope", - "AttrStaticLink", - "AttrStmtList", - "AttrStrOffsetsBase", - "AttrStride", - "AttrStrideSize", - "AttrStringLength", - "AttrStringLengthBitSize", - "AttrStringLengthByteSize", - "AttrThreadsScaled", - "AttrTrampoline", - "AttrType", - "AttrUpperBound", - "AttrUseLocation", - "AttrUseUTF8", - "AttrVarParam", - "AttrVirtuality", - "AttrVisibility", - "AttrVtableElemLoc", - "BasicType", - "BoolType", - "CharType", - "Class", - "ClassAddrPtr", - "ClassAddress", - "ClassBlock", - "ClassConstant", - "ClassExprLoc", - "ClassFlag", - "ClassLinePtr", - "ClassLocList", - "ClassLocListPtr", - "ClassMacPtr", - "ClassRangeListPtr", - "ClassReference", - "ClassReferenceAlt", - "ClassReferenceSig", - "ClassRngList", - "ClassRngListsPtr", - "ClassStrOffsetsPtr", - "ClassString", - "ClassStringAlt", - "ClassUnknown", - "CommonType", - "ComplexType", - "Data", - "DecodeError", - "DotDotDotType", - "Entry", - "EnumType", - "EnumValue", - "ErrUnknownPC", - "Field", - "FloatType", - "FuncType", - "IntType", - "LineEntry", - "LineFile", - "LineReader", - "LineReaderPos", - "New", - "Offset", - "PtrType", - "QualType", - "Reader", - "StructField", - "StructType", - "Tag", - "TagAccessDeclaration", - "TagArrayType", - "TagAtomicType", - "TagBaseType", - "TagCallSite", - "TagCallSiteParameter", - "TagCatchDwarfBlock", - "TagClassType", - "TagCoarrayType", - "TagCommonDwarfBlock", - "TagCommonInclusion", - "TagCompileUnit", - "TagCondition", - "TagConstType", - "TagConstant", - "TagDwarfProcedure", - "TagDynamicType", - "TagEntryPoint", - "TagEnumerationType", - "TagEnumerator", - "TagFileType", - "TagFormalParameter", - "TagFriend", - "TagGenericSubrange", - "TagImmutableType", - "TagImportedDeclaration", - "TagImportedModule", - "TagImportedUnit", - "TagInheritance", - "TagInlinedSubroutine", - "TagInterfaceType", - "TagLabel", - "TagLexDwarfBlock", - "TagMember", - "TagModule", - "TagMutableType", - "TagNamelist", - "TagNamelistItem", - "TagNamespace", - "TagPackedType", - "TagPartialUnit", - "TagPointerType", - "TagPtrToMemberType", - "TagReferenceType", - "TagRestrictType", - "TagRvalueReferenceType", - "TagSetType", - "TagSharedType", - "TagSkeletonUnit", - "TagStringType", - "TagStructType", - "TagSubprogram", - "TagSubrangeType", - "TagSubroutineType", - "TagTemplateAlias", - "TagTemplateTypeParameter", - "TagTemplateValueParameter", - "TagThrownType", - "TagTryDwarfBlock", - "TagTypeUnit", - "TagTypedef", - "TagUnionType", - "TagUnspecifiedParameters", - "TagUnspecifiedType", - "TagVariable", - "TagVariant", - "TagVariantPart", - "TagVolatileType", - "TagWithStmt", - "Type", - "TypedefType", - "UcharType", - "UintType", - "UnspecifiedType", - "UnsupportedType", - "VoidType", - }, - "debug/elf": { - "ARM_MAGIC_TRAMP_NUMBER", - "COMPRESS_HIOS", - "COMPRESS_HIPROC", - "COMPRESS_LOOS", - "COMPRESS_LOPROC", - "COMPRESS_ZLIB", - "COMPRESS_ZSTD", - "Chdr32", - "Chdr64", - "Class", - "CompressionType", - "DF_1_CONFALT", - "DF_1_DIRECT", - "DF_1_DISPRELDNE", - "DF_1_DISPRELPND", - "DF_1_EDITED", - "DF_1_ENDFILTEE", - "DF_1_GLOBAL", - "DF_1_GLOBAUDIT", - "DF_1_GROUP", - "DF_1_IGNMULDEF", - "DF_1_INITFIRST", - "DF_1_INTERPOSE", - "DF_1_KMOD", - "DF_1_LOADFLTR", - "DF_1_NOCOMMON", - "DF_1_NODEFLIB", - "DF_1_NODELETE", - "DF_1_NODIRECT", - "DF_1_NODUMP", - "DF_1_NOHDR", - "DF_1_NOKSYMS", - "DF_1_NOOPEN", - "DF_1_NORELOC", - "DF_1_NOW", - "DF_1_ORIGIN", - "DF_1_PIE", - "DF_1_SINGLETON", - "DF_1_STUB", - "DF_1_SYMINTPOSE", - "DF_1_TRANS", - "DF_1_WEAKFILTER", - "DF_BIND_NOW", - "DF_ORIGIN", - "DF_STATIC_TLS", - "DF_SYMBOLIC", - "DF_TEXTREL", - "DT_ADDRRNGHI", - "DT_ADDRRNGLO", - "DT_AUDIT", - "DT_AUXILIARY", - "DT_BIND_NOW", - "DT_CHECKSUM", - "DT_CONFIG", - "DT_DEBUG", - "DT_DEPAUDIT", - "DT_ENCODING", - "DT_FEATURE", - "DT_FILTER", - "DT_FINI", - "DT_FINI_ARRAY", - "DT_FINI_ARRAYSZ", - "DT_FLAGS", - "DT_FLAGS_1", - "DT_GNU_CONFLICT", - "DT_GNU_CONFLICTSZ", - "DT_GNU_HASH", - "DT_GNU_LIBLIST", - "DT_GNU_LIBLISTSZ", - "DT_GNU_PRELINKED", - "DT_HASH", - "DT_HIOS", - "DT_HIPROC", - "DT_INIT", - "DT_INIT_ARRAY", - "DT_INIT_ARRAYSZ", - "DT_JMPREL", - "DT_LOOS", - "DT_LOPROC", - "DT_MIPS_AUX_DYNAMIC", - "DT_MIPS_BASE_ADDRESS", - "DT_MIPS_COMPACT_SIZE", - "DT_MIPS_CONFLICT", - "DT_MIPS_CONFLICTNO", - "DT_MIPS_CXX_FLAGS", - "DT_MIPS_DELTA_CLASS", - "DT_MIPS_DELTA_CLASSSYM", - "DT_MIPS_DELTA_CLASSSYM_NO", - "DT_MIPS_DELTA_CLASS_NO", - "DT_MIPS_DELTA_INSTANCE", - "DT_MIPS_DELTA_INSTANCE_NO", - "DT_MIPS_DELTA_RELOC", - "DT_MIPS_DELTA_RELOC_NO", - "DT_MIPS_DELTA_SYM", - "DT_MIPS_DELTA_SYM_NO", - "DT_MIPS_DYNSTR_ALIGN", - "DT_MIPS_FLAGS", - "DT_MIPS_GOTSYM", - "DT_MIPS_GP_VALUE", - "DT_MIPS_HIDDEN_GOTIDX", - "DT_MIPS_HIPAGENO", - "DT_MIPS_ICHECKSUM", - "DT_MIPS_INTERFACE", - "DT_MIPS_INTERFACE_SIZE", - "DT_MIPS_IVERSION", - "DT_MIPS_LIBLIST", - "DT_MIPS_LIBLISTNO", - "DT_MIPS_LOCALPAGE_GOTIDX", - "DT_MIPS_LOCAL_GOTIDX", - "DT_MIPS_LOCAL_GOTNO", - "DT_MIPS_MSYM", - "DT_MIPS_OPTIONS", - "DT_MIPS_PERF_SUFFIX", - "DT_MIPS_PIXIE_INIT", - "DT_MIPS_PLTGOT", - "DT_MIPS_PROTECTED_GOTIDX", - "DT_MIPS_RLD_MAP", - "DT_MIPS_RLD_MAP_REL", - "DT_MIPS_RLD_TEXT_RESOLVE_ADDR", - "DT_MIPS_RLD_VERSION", - "DT_MIPS_RWPLT", - "DT_MIPS_SYMBOL_LIB", - "DT_MIPS_SYMTABNO", - "DT_MIPS_TIME_STAMP", - "DT_MIPS_UNREFEXTNO", - "DT_MOVEENT", - "DT_MOVESZ", - "DT_MOVETAB", - "DT_NEEDED", - "DT_NULL", - "DT_PLTGOT", - "DT_PLTPAD", - "DT_PLTPADSZ", - "DT_PLTREL", - "DT_PLTRELSZ", - "DT_POSFLAG_1", - "DT_PPC64_GLINK", - "DT_PPC64_OPD", - "DT_PPC64_OPDSZ", - "DT_PPC64_OPT", - "DT_PPC_GOT", - "DT_PPC_OPT", - "DT_PREINIT_ARRAY", - "DT_PREINIT_ARRAYSZ", - "DT_REL", - "DT_RELA", - "DT_RELACOUNT", - "DT_RELAENT", - "DT_RELASZ", - "DT_RELCOUNT", - "DT_RELENT", - "DT_RELSZ", - "DT_RPATH", - "DT_RUNPATH", - "DT_SONAME", - "DT_SPARC_REGISTER", - "DT_STRSZ", - "DT_STRTAB", - "DT_SYMBOLIC", - "DT_SYMENT", - "DT_SYMINENT", - "DT_SYMINFO", - "DT_SYMINSZ", - "DT_SYMTAB", - "DT_SYMTAB_SHNDX", - "DT_TEXTREL", - "DT_TLSDESC_GOT", - "DT_TLSDESC_PLT", - "DT_USED", - "DT_VALRNGHI", - "DT_VALRNGLO", - "DT_VERDEF", - "DT_VERDEFNUM", - "DT_VERNEED", - "DT_VERNEEDNUM", - "DT_VERSYM", - "Data", - "Dyn32", - "Dyn64", - "DynFlag", - "DynFlag1", - "DynTag", - "EI_ABIVERSION", - "EI_CLASS", - "EI_DATA", - "EI_NIDENT", - "EI_OSABI", - "EI_PAD", - "EI_VERSION", - "ELFCLASS32", - "ELFCLASS64", - "ELFCLASSNONE", - "ELFDATA2LSB", - "ELFDATA2MSB", - "ELFDATANONE", - "ELFMAG", - "ELFOSABI_86OPEN", - "ELFOSABI_AIX", - "ELFOSABI_ARM", - "ELFOSABI_AROS", - "ELFOSABI_CLOUDABI", - "ELFOSABI_FENIXOS", - "ELFOSABI_FREEBSD", - "ELFOSABI_HPUX", - "ELFOSABI_HURD", - "ELFOSABI_IRIX", - "ELFOSABI_LINUX", - "ELFOSABI_MODESTO", - "ELFOSABI_NETBSD", - "ELFOSABI_NONE", - "ELFOSABI_NSK", - "ELFOSABI_OPENBSD", - "ELFOSABI_OPENVMS", - "ELFOSABI_SOLARIS", - "ELFOSABI_STANDALONE", - "ELFOSABI_TRU64", - "EM_386", - "EM_486", - "EM_56800EX", - "EM_68HC05", - "EM_68HC08", - "EM_68HC11", - "EM_68HC12", - "EM_68HC16", - "EM_68K", - "EM_78KOR", - "EM_8051", - "EM_860", - "EM_88K", - "EM_960", - "EM_AARCH64", - "EM_ALPHA", - "EM_ALPHA_STD", - "EM_ALTERA_NIOS2", - "EM_AMDGPU", - "EM_ARC", - "EM_ARCA", - "EM_ARC_COMPACT", - "EM_ARC_COMPACT2", - "EM_ARM", - "EM_AVR", - "EM_AVR32", - "EM_BA1", - "EM_BA2", - "EM_BLACKFIN", - "EM_BPF", - "EM_C166", - "EM_CDP", - "EM_CE", - "EM_CLOUDSHIELD", - "EM_COGE", - "EM_COLDFIRE", - "EM_COOL", - "EM_COREA_1ST", - "EM_COREA_2ND", - "EM_CR", - "EM_CR16", - "EM_CRAYNV2", - "EM_CRIS", - "EM_CRX", - "EM_CSR_KALIMBA", - "EM_CUDA", - "EM_CYPRESS_M8C", - "EM_D10V", - "EM_D30V", - "EM_DSP24", - "EM_DSPIC30F", - "EM_DXP", - "EM_ECOG1", - "EM_ECOG16", - "EM_ECOG1X", - "EM_ECOG2", - "EM_ETPU", - "EM_EXCESS", - "EM_F2MC16", - "EM_FIREPATH", - "EM_FR20", - "EM_FR30", - "EM_FT32", - "EM_FX66", - "EM_H8S", - "EM_H8_300", - "EM_H8_300H", - "EM_H8_500", - "EM_HUANY", - "EM_IA_64", - "EM_INTEL205", - "EM_INTEL206", - "EM_INTEL207", - "EM_INTEL208", - "EM_INTEL209", - "EM_IP2K", - "EM_JAVELIN", - "EM_K10M", - "EM_KM32", - "EM_KMX16", - "EM_KMX32", - "EM_KMX8", - "EM_KVARC", - "EM_L10M", - "EM_LANAI", - "EM_LATTICEMICO32", - "EM_LOONGARCH", - "EM_M16C", - "EM_M32", - "EM_M32C", - "EM_M32R", - "EM_MANIK", - "EM_MAX", - "EM_MAXQ30", - "EM_MCHP_PIC", - "EM_MCST_ELBRUS", - "EM_ME16", - "EM_METAG", - "EM_MICROBLAZE", - "EM_MIPS", - "EM_MIPS_RS3_LE", - "EM_MIPS_RS4_BE", - "EM_MIPS_X", - "EM_MMA", - "EM_MMDSP_PLUS", - "EM_MMIX", - "EM_MN10200", - "EM_MN10300", - "EM_MOXIE", - "EM_MSP430", - "EM_NCPU", - "EM_NDR1", - "EM_NDS32", - "EM_NONE", - "EM_NORC", - "EM_NS32K", - "EM_OPEN8", - "EM_OPENRISC", - "EM_PARISC", - "EM_PCP", - "EM_PDP10", - "EM_PDP11", - "EM_PDSP", - "EM_PJ", - "EM_PPC", - "EM_PPC64", - "EM_PRISM", - "EM_QDSP6", - "EM_R32C", - "EM_RCE", - "EM_RH32", - "EM_RISCV", - "EM_RL78", - "EM_RS08", - "EM_RX", - "EM_S370", - "EM_S390", - "EM_SCORE7", - "EM_SEP", - "EM_SE_C17", - "EM_SE_C33", - "EM_SH", - "EM_SHARC", - "EM_SLE9X", - "EM_SNP1K", - "EM_SPARC", - "EM_SPARC32PLUS", - "EM_SPARCV9", - "EM_ST100", - "EM_ST19", - "EM_ST200", - "EM_ST7", - "EM_ST9PLUS", - "EM_STARCORE", - "EM_STM8", - "EM_STXP7X", - "EM_SVX", - "EM_TILE64", - "EM_TILEGX", - "EM_TILEPRO", - "EM_TINYJ", - "EM_TI_ARP32", - "EM_TI_C2000", - "EM_TI_C5500", - "EM_TI_C6000", - "EM_TI_PRU", - "EM_TMM_GPP", - "EM_TPC", - "EM_TRICORE", - "EM_TRIMEDIA", - "EM_TSK3000", - "EM_UNICORE", - "EM_V800", - "EM_V850", - "EM_VAX", - "EM_VIDEOCORE", - "EM_VIDEOCORE3", - "EM_VIDEOCORE5", - "EM_VISIUM", - "EM_VPP500", - "EM_X86_64", - "EM_XCORE", - "EM_XGATE", - "EM_XIMO16", - "EM_XTENSA", - "EM_Z80", - "EM_ZSP", - "ET_CORE", - "ET_DYN", - "ET_EXEC", - "ET_HIOS", - "ET_HIPROC", - "ET_LOOS", - "ET_LOPROC", - "ET_NONE", - "ET_REL", - "EV_CURRENT", - "EV_NONE", - "ErrNoSymbols", - "File", - "FileHeader", - "FormatError", - "Header32", - "Header64", - "ImportedSymbol", - "Machine", - "NT_FPREGSET", - "NT_PRPSINFO", - "NT_PRSTATUS", - "NType", - "NewFile", - "OSABI", - "Open", - "PF_MASKOS", - "PF_MASKPROC", - "PF_R", - "PF_W", - "PF_X", - "PT_AARCH64_ARCHEXT", - "PT_AARCH64_UNWIND", - "PT_ARM_ARCHEXT", - "PT_ARM_EXIDX", - "PT_DYNAMIC", - "PT_GNU_EH_FRAME", - "PT_GNU_MBIND_HI", - "PT_GNU_MBIND_LO", - "PT_GNU_PROPERTY", - "PT_GNU_RELRO", - "PT_GNU_STACK", - "PT_HIOS", - "PT_HIPROC", - "PT_INTERP", - "PT_LOAD", - "PT_LOOS", - "PT_LOPROC", - "PT_MIPS_ABIFLAGS", - "PT_MIPS_OPTIONS", - "PT_MIPS_REGINFO", - "PT_MIPS_RTPROC", - "PT_NOTE", - "PT_NULL", - "PT_OPENBSD_BOOTDATA", - "PT_OPENBSD_RANDOMIZE", - "PT_OPENBSD_WXNEEDED", - "PT_PAX_FLAGS", - "PT_PHDR", - "PT_S390_PGSTE", - "PT_SHLIB", - "PT_SUNWSTACK", - "PT_SUNW_EH_FRAME", - "PT_TLS", - "Prog", - "Prog32", - "Prog64", - "ProgFlag", - "ProgHeader", - "ProgType", - "R_386", - "R_386_16", - "R_386_32", - "R_386_32PLT", - "R_386_8", - "R_386_COPY", - "R_386_GLOB_DAT", - "R_386_GOT32", - "R_386_GOT32X", - "R_386_GOTOFF", - "R_386_GOTPC", - "R_386_IRELATIVE", - "R_386_JMP_SLOT", - "R_386_NONE", - "R_386_PC16", - "R_386_PC32", - "R_386_PC8", - "R_386_PLT32", - "R_386_RELATIVE", - "R_386_SIZE32", - "R_386_TLS_DESC", - "R_386_TLS_DESC_CALL", - "R_386_TLS_DTPMOD32", - "R_386_TLS_DTPOFF32", - "R_386_TLS_GD", - "R_386_TLS_GD_32", - "R_386_TLS_GD_CALL", - "R_386_TLS_GD_POP", - "R_386_TLS_GD_PUSH", - "R_386_TLS_GOTDESC", - "R_386_TLS_GOTIE", - "R_386_TLS_IE", - "R_386_TLS_IE_32", - "R_386_TLS_LDM", - "R_386_TLS_LDM_32", - "R_386_TLS_LDM_CALL", - "R_386_TLS_LDM_POP", - "R_386_TLS_LDM_PUSH", - "R_386_TLS_LDO_32", - "R_386_TLS_LE", - "R_386_TLS_LE_32", - "R_386_TLS_TPOFF", - "R_386_TLS_TPOFF32", - "R_390", - "R_390_12", - "R_390_16", - "R_390_20", - "R_390_32", - "R_390_64", - "R_390_8", - "R_390_COPY", - "R_390_GLOB_DAT", - "R_390_GOT12", - "R_390_GOT16", - "R_390_GOT20", - "R_390_GOT32", - "R_390_GOT64", - "R_390_GOTENT", - "R_390_GOTOFF", - "R_390_GOTOFF16", - "R_390_GOTOFF64", - "R_390_GOTPC", - "R_390_GOTPCDBL", - "R_390_GOTPLT12", - "R_390_GOTPLT16", - "R_390_GOTPLT20", - "R_390_GOTPLT32", - "R_390_GOTPLT64", - "R_390_GOTPLTENT", - "R_390_GOTPLTOFF16", - "R_390_GOTPLTOFF32", - "R_390_GOTPLTOFF64", - "R_390_JMP_SLOT", - "R_390_NONE", - "R_390_PC16", - "R_390_PC16DBL", - "R_390_PC32", - "R_390_PC32DBL", - "R_390_PC64", - "R_390_PLT16DBL", - "R_390_PLT32", - "R_390_PLT32DBL", - "R_390_PLT64", - "R_390_RELATIVE", - "R_390_TLS_DTPMOD", - "R_390_TLS_DTPOFF", - "R_390_TLS_GD32", - "R_390_TLS_GD64", - "R_390_TLS_GDCALL", - "R_390_TLS_GOTIE12", - "R_390_TLS_GOTIE20", - "R_390_TLS_GOTIE32", - "R_390_TLS_GOTIE64", - "R_390_TLS_IE32", - "R_390_TLS_IE64", - "R_390_TLS_IEENT", - "R_390_TLS_LDCALL", - "R_390_TLS_LDM32", - "R_390_TLS_LDM64", - "R_390_TLS_LDO32", - "R_390_TLS_LDO64", - "R_390_TLS_LE32", - "R_390_TLS_LE64", - "R_390_TLS_LOAD", - "R_390_TLS_TPOFF", - "R_AARCH64", - "R_AARCH64_ABS16", - "R_AARCH64_ABS32", - "R_AARCH64_ABS64", - "R_AARCH64_ADD_ABS_LO12_NC", - "R_AARCH64_ADR_GOT_PAGE", - "R_AARCH64_ADR_PREL_LO21", - "R_AARCH64_ADR_PREL_PG_HI21", - "R_AARCH64_ADR_PREL_PG_HI21_NC", - "R_AARCH64_CALL26", - "R_AARCH64_CONDBR19", - "R_AARCH64_COPY", - "R_AARCH64_GLOB_DAT", - "R_AARCH64_GOT_LD_PREL19", - "R_AARCH64_IRELATIVE", - "R_AARCH64_JUMP26", - "R_AARCH64_JUMP_SLOT", - "R_AARCH64_LD64_GOTOFF_LO15", - "R_AARCH64_LD64_GOTPAGE_LO15", - "R_AARCH64_LD64_GOT_LO12_NC", - "R_AARCH64_LDST128_ABS_LO12_NC", - "R_AARCH64_LDST16_ABS_LO12_NC", - "R_AARCH64_LDST32_ABS_LO12_NC", - "R_AARCH64_LDST64_ABS_LO12_NC", - "R_AARCH64_LDST8_ABS_LO12_NC", - "R_AARCH64_LD_PREL_LO19", - "R_AARCH64_MOVW_SABS_G0", - "R_AARCH64_MOVW_SABS_G1", - "R_AARCH64_MOVW_SABS_G2", - "R_AARCH64_MOVW_UABS_G0", - "R_AARCH64_MOVW_UABS_G0_NC", - "R_AARCH64_MOVW_UABS_G1", - "R_AARCH64_MOVW_UABS_G1_NC", - "R_AARCH64_MOVW_UABS_G2", - "R_AARCH64_MOVW_UABS_G2_NC", - "R_AARCH64_MOVW_UABS_G3", - "R_AARCH64_NONE", - "R_AARCH64_NULL", - "R_AARCH64_P32_ABS16", - "R_AARCH64_P32_ABS32", - "R_AARCH64_P32_ADD_ABS_LO12_NC", - "R_AARCH64_P32_ADR_GOT_PAGE", - "R_AARCH64_P32_ADR_PREL_LO21", - "R_AARCH64_P32_ADR_PREL_PG_HI21", - "R_AARCH64_P32_CALL26", - "R_AARCH64_P32_CONDBR19", - "R_AARCH64_P32_COPY", - "R_AARCH64_P32_GLOB_DAT", - "R_AARCH64_P32_GOT_LD_PREL19", - "R_AARCH64_P32_IRELATIVE", - "R_AARCH64_P32_JUMP26", - "R_AARCH64_P32_JUMP_SLOT", - "R_AARCH64_P32_LD32_GOT_LO12_NC", - "R_AARCH64_P32_LDST128_ABS_LO12_NC", - "R_AARCH64_P32_LDST16_ABS_LO12_NC", - "R_AARCH64_P32_LDST32_ABS_LO12_NC", - "R_AARCH64_P32_LDST64_ABS_LO12_NC", - "R_AARCH64_P32_LDST8_ABS_LO12_NC", - "R_AARCH64_P32_LD_PREL_LO19", - "R_AARCH64_P32_MOVW_SABS_G0", - "R_AARCH64_P32_MOVW_UABS_G0", - "R_AARCH64_P32_MOVW_UABS_G0_NC", - "R_AARCH64_P32_MOVW_UABS_G1", - "R_AARCH64_P32_PREL16", - "R_AARCH64_P32_PREL32", - "R_AARCH64_P32_RELATIVE", - "R_AARCH64_P32_TLSDESC", - "R_AARCH64_P32_TLSDESC_ADD_LO12_NC", - "R_AARCH64_P32_TLSDESC_ADR_PAGE21", - "R_AARCH64_P32_TLSDESC_ADR_PREL21", - "R_AARCH64_P32_TLSDESC_CALL", - "R_AARCH64_P32_TLSDESC_LD32_LO12_NC", - "R_AARCH64_P32_TLSDESC_LD_PREL19", - "R_AARCH64_P32_TLSGD_ADD_LO12_NC", - "R_AARCH64_P32_TLSGD_ADR_PAGE21", - "R_AARCH64_P32_TLSIE_ADR_GOTTPREL_PAGE21", - "R_AARCH64_P32_TLSIE_LD32_GOTTPREL_LO12_NC", - "R_AARCH64_P32_TLSIE_LD_GOTTPREL_PREL19", - "R_AARCH64_P32_TLSLE_ADD_TPREL_HI12", - "R_AARCH64_P32_TLSLE_ADD_TPREL_LO12", - "R_AARCH64_P32_TLSLE_ADD_TPREL_LO12_NC", - "R_AARCH64_P32_TLSLE_MOVW_TPREL_G0", - "R_AARCH64_P32_TLSLE_MOVW_TPREL_G0_NC", - "R_AARCH64_P32_TLSLE_MOVW_TPREL_G1", - "R_AARCH64_P32_TLS_DTPMOD", - "R_AARCH64_P32_TLS_DTPREL", - "R_AARCH64_P32_TLS_TPREL", - "R_AARCH64_P32_TSTBR14", - "R_AARCH64_PREL16", - "R_AARCH64_PREL32", - "R_AARCH64_PREL64", - "R_AARCH64_RELATIVE", - "R_AARCH64_TLSDESC", - "R_AARCH64_TLSDESC_ADD", - "R_AARCH64_TLSDESC_ADD_LO12_NC", - "R_AARCH64_TLSDESC_ADR_PAGE21", - "R_AARCH64_TLSDESC_ADR_PREL21", - "R_AARCH64_TLSDESC_CALL", - "R_AARCH64_TLSDESC_LD64_LO12_NC", - "R_AARCH64_TLSDESC_LDR", - "R_AARCH64_TLSDESC_LD_PREL19", - "R_AARCH64_TLSDESC_OFF_G0_NC", - "R_AARCH64_TLSDESC_OFF_G1", - "R_AARCH64_TLSGD_ADD_LO12_NC", - "R_AARCH64_TLSGD_ADR_PAGE21", - "R_AARCH64_TLSGD_ADR_PREL21", - "R_AARCH64_TLSGD_MOVW_G0_NC", - "R_AARCH64_TLSGD_MOVW_G1", - "R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21", - "R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC", - "R_AARCH64_TLSIE_LD_GOTTPREL_PREL19", - "R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC", - "R_AARCH64_TLSIE_MOVW_GOTTPREL_G1", - "R_AARCH64_TLSLD_ADR_PAGE21", - "R_AARCH64_TLSLD_ADR_PREL21", - "R_AARCH64_TLSLD_LDST128_DTPREL_LO12", - "R_AARCH64_TLSLD_LDST128_DTPREL_LO12_NC", - "R_AARCH64_TLSLE_ADD_TPREL_HI12", - "R_AARCH64_TLSLE_ADD_TPREL_LO12", - "R_AARCH64_TLSLE_ADD_TPREL_LO12_NC", - "R_AARCH64_TLSLE_LDST128_TPREL_LO12", - "R_AARCH64_TLSLE_LDST128_TPREL_LO12_NC", - "R_AARCH64_TLSLE_MOVW_TPREL_G0", - "R_AARCH64_TLSLE_MOVW_TPREL_G0_NC", - "R_AARCH64_TLSLE_MOVW_TPREL_G1", - "R_AARCH64_TLSLE_MOVW_TPREL_G1_NC", - "R_AARCH64_TLSLE_MOVW_TPREL_G2", - "R_AARCH64_TLS_DTPMOD64", - "R_AARCH64_TLS_DTPREL64", - "R_AARCH64_TLS_TPREL64", - "R_AARCH64_TSTBR14", - "R_ALPHA", - "R_ALPHA_BRADDR", - "R_ALPHA_COPY", - "R_ALPHA_GLOB_DAT", - "R_ALPHA_GPDISP", - "R_ALPHA_GPREL32", - "R_ALPHA_GPRELHIGH", - "R_ALPHA_GPRELLOW", - "R_ALPHA_GPVALUE", - "R_ALPHA_HINT", - "R_ALPHA_IMMED_BR_HI32", - "R_ALPHA_IMMED_GP_16", - "R_ALPHA_IMMED_GP_HI32", - "R_ALPHA_IMMED_LO32", - "R_ALPHA_IMMED_SCN_HI32", - "R_ALPHA_JMP_SLOT", - "R_ALPHA_LITERAL", - "R_ALPHA_LITUSE", - "R_ALPHA_NONE", - "R_ALPHA_OP_PRSHIFT", - "R_ALPHA_OP_PSUB", - "R_ALPHA_OP_PUSH", - "R_ALPHA_OP_STORE", - "R_ALPHA_REFLONG", - "R_ALPHA_REFQUAD", - "R_ALPHA_RELATIVE", - "R_ALPHA_SREL16", - "R_ALPHA_SREL32", - "R_ALPHA_SREL64", - "R_ARM", - "R_ARM_ABS12", - "R_ARM_ABS16", - "R_ARM_ABS32", - "R_ARM_ABS32_NOI", - "R_ARM_ABS8", - "R_ARM_ALU_PCREL_15_8", - "R_ARM_ALU_PCREL_23_15", - "R_ARM_ALU_PCREL_7_0", - "R_ARM_ALU_PC_G0", - "R_ARM_ALU_PC_G0_NC", - "R_ARM_ALU_PC_G1", - "R_ARM_ALU_PC_G1_NC", - "R_ARM_ALU_PC_G2", - "R_ARM_ALU_SBREL_19_12_NC", - "R_ARM_ALU_SBREL_27_20_CK", - "R_ARM_ALU_SB_G0", - "R_ARM_ALU_SB_G0_NC", - "R_ARM_ALU_SB_G1", - "R_ARM_ALU_SB_G1_NC", - "R_ARM_ALU_SB_G2", - "R_ARM_AMP_VCALL9", - "R_ARM_BASE_ABS", - "R_ARM_CALL", - "R_ARM_COPY", - "R_ARM_GLOB_DAT", - "R_ARM_GNU_VTENTRY", - "R_ARM_GNU_VTINHERIT", - "R_ARM_GOT32", - "R_ARM_GOTOFF", - "R_ARM_GOTOFF12", - "R_ARM_GOTPC", - "R_ARM_GOTRELAX", - "R_ARM_GOT_ABS", - "R_ARM_GOT_BREL12", - "R_ARM_GOT_PREL", - "R_ARM_IRELATIVE", - "R_ARM_JUMP24", - "R_ARM_JUMP_SLOT", - "R_ARM_LDC_PC_G0", - "R_ARM_LDC_PC_G1", - "R_ARM_LDC_PC_G2", - "R_ARM_LDC_SB_G0", - "R_ARM_LDC_SB_G1", - "R_ARM_LDC_SB_G2", - "R_ARM_LDRS_PC_G0", - "R_ARM_LDRS_PC_G1", - "R_ARM_LDRS_PC_G2", - "R_ARM_LDRS_SB_G0", - "R_ARM_LDRS_SB_G1", - "R_ARM_LDRS_SB_G2", - "R_ARM_LDR_PC_G1", - "R_ARM_LDR_PC_G2", - "R_ARM_LDR_SBREL_11_10_NC", - "R_ARM_LDR_SB_G0", - "R_ARM_LDR_SB_G1", - "R_ARM_LDR_SB_G2", - "R_ARM_ME_TOO", - "R_ARM_MOVT_ABS", - "R_ARM_MOVT_BREL", - "R_ARM_MOVT_PREL", - "R_ARM_MOVW_ABS_NC", - "R_ARM_MOVW_BREL", - "R_ARM_MOVW_BREL_NC", - "R_ARM_MOVW_PREL_NC", - "R_ARM_NONE", - "R_ARM_PC13", - "R_ARM_PC24", - "R_ARM_PLT32", - "R_ARM_PLT32_ABS", - "R_ARM_PREL31", - "R_ARM_PRIVATE_0", - "R_ARM_PRIVATE_1", - "R_ARM_PRIVATE_10", - "R_ARM_PRIVATE_11", - "R_ARM_PRIVATE_12", - "R_ARM_PRIVATE_13", - "R_ARM_PRIVATE_14", - "R_ARM_PRIVATE_15", - "R_ARM_PRIVATE_2", - "R_ARM_PRIVATE_3", - "R_ARM_PRIVATE_4", - "R_ARM_PRIVATE_5", - "R_ARM_PRIVATE_6", - "R_ARM_PRIVATE_7", - "R_ARM_PRIVATE_8", - "R_ARM_PRIVATE_9", - "R_ARM_RABS32", - "R_ARM_RBASE", - "R_ARM_REL32", - "R_ARM_REL32_NOI", - "R_ARM_RELATIVE", - "R_ARM_RPC24", - "R_ARM_RREL32", - "R_ARM_RSBREL32", - "R_ARM_RXPC25", - "R_ARM_SBREL31", - "R_ARM_SBREL32", - "R_ARM_SWI24", - "R_ARM_TARGET1", - "R_ARM_TARGET2", - "R_ARM_THM_ABS5", - "R_ARM_THM_ALU_ABS_G0_NC", - "R_ARM_THM_ALU_ABS_G1_NC", - "R_ARM_THM_ALU_ABS_G2_NC", - "R_ARM_THM_ALU_ABS_G3", - "R_ARM_THM_ALU_PREL_11_0", - "R_ARM_THM_GOT_BREL12", - "R_ARM_THM_JUMP11", - "R_ARM_THM_JUMP19", - "R_ARM_THM_JUMP24", - "R_ARM_THM_JUMP6", - "R_ARM_THM_JUMP8", - "R_ARM_THM_MOVT_ABS", - "R_ARM_THM_MOVT_BREL", - "R_ARM_THM_MOVT_PREL", - "R_ARM_THM_MOVW_ABS_NC", - "R_ARM_THM_MOVW_BREL", - "R_ARM_THM_MOVW_BREL_NC", - "R_ARM_THM_MOVW_PREL_NC", - "R_ARM_THM_PC12", - "R_ARM_THM_PC22", - "R_ARM_THM_PC8", - "R_ARM_THM_RPC22", - "R_ARM_THM_SWI8", - "R_ARM_THM_TLS_CALL", - "R_ARM_THM_TLS_DESCSEQ16", - "R_ARM_THM_TLS_DESCSEQ32", - "R_ARM_THM_XPC22", - "R_ARM_TLS_CALL", - "R_ARM_TLS_DESCSEQ", - "R_ARM_TLS_DTPMOD32", - "R_ARM_TLS_DTPOFF32", - "R_ARM_TLS_GD32", - "R_ARM_TLS_GOTDESC", - "R_ARM_TLS_IE12GP", - "R_ARM_TLS_IE32", - "R_ARM_TLS_LDM32", - "R_ARM_TLS_LDO12", - "R_ARM_TLS_LDO32", - "R_ARM_TLS_LE12", - "R_ARM_TLS_LE32", - "R_ARM_TLS_TPOFF32", - "R_ARM_V4BX", - "R_ARM_XPC25", - "R_INFO", - "R_INFO32", - "R_LARCH", - "R_LARCH_32", - "R_LARCH_32_PCREL", - "R_LARCH_64", - "R_LARCH_64_PCREL", - "R_LARCH_ABS64_HI12", - "R_LARCH_ABS64_LO20", - "R_LARCH_ABS_HI20", - "R_LARCH_ABS_LO12", - "R_LARCH_ADD16", - "R_LARCH_ADD24", - "R_LARCH_ADD32", - "R_LARCH_ADD6", - "R_LARCH_ADD64", - "R_LARCH_ADD8", - "R_LARCH_ADD_ULEB128", - "R_LARCH_ALIGN", - "R_LARCH_B16", - "R_LARCH_B21", - "R_LARCH_B26", - "R_LARCH_CFA", - "R_LARCH_COPY", - "R_LARCH_DELETE", - "R_LARCH_GNU_VTENTRY", - "R_LARCH_GNU_VTINHERIT", - "R_LARCH_GOT64_HI12", - "R_LARCH_GOT64_LO20", - "R_LARCH_GOT64_PC_HI12", - "R_LARCH_GOT64_PC_LO20", - "R_LARCH_GOT_HI20", - "R_LARCH_GOT_LO12", - "R_LARCH_GOT_PC_HI20", - "R_LARCH_GOT_PC_LO12", - "R_LARCH_IRELATIVE", - "R_LARCH_JUMP_SLOT", - "R_LARCH_MARK_LA", - "R_LARCH_MARK_PCREL", - "R_LARCH_NONE", - "R_LARCH_PCALA64_HI12", - "R_LARCH_PCALA64_LO20", - "R_LARCH_PCALA_HI20", - "R_LARCH_PCALA_LO12", - "R_LARCH_PCREL20_S2", - "R_LARCH_RELATIVE", - "R_LARCH_RELAX", - "R_LARCH_SOP_ADD", - "R_LARCH_SOP_AND", - "R_LARCH_SOP_ASSERT", - "R_LARCH_SOP_IF_ELSE", - "R_LARCH_SOP_NOT", - "R_LARCH_SOP_POP_32_S_0_10_10_16_S2", - "R_LARCH_SOP_POP_32_S_0_5_10_16_S2", - "R_LARCH_SOP_POP_32_S_10_12", - "R_LARCH_SOP_POP_32_S_10_16", - "R_LARCH_SOP_POP_32_S_10_16_S2", - "R_LARCH_SOP_POP_32_S_10_5", - "R_LARCH_SOP_POP_32_S_5_20", - "R_LARCH_SOP_POP_32_U", - "R_LARCH_SOP_POP_32_U_10_12", - "R_LARCH_SOP_PUSH_ABSOLUTE", - "R_LARCH_SOP_PUSH_DUP", - "R_LARCH_SOP_PUSH_GPREL", - "R_LARCH_SOP_PUSH_PCREL", - "R_LARCH_SOP_PUSH_PLT_PCREL", - "R_LARCH_SOP_PUSH_TLS_GD", - "R_LARCH_SOP_PUSH_TLS_GOT", - "R_LARCH_SOP_PUSH_TLS_TPREL", - "R_LARCH_SOP_SL", - "R_LARCH_SOP_SR", - "R_LARCH_SOP_SUB", - "R_LARCH_SUB16", - "R_LARCH_SUB24", - "R_LARCH_SUB32", - "R_LARCH_SUB6", - "R_LARCH_SUB64", - "R_LARCH_SUB8", - "R_LARCH_SUB_ULEB128", - "R_LARCH_TLS_DTPMOD32", - "R_LARCH_TLS_DTPMOD64", - "R_LARCH_TLS_DTPREL32", - "R_LARCH_TLS_DTPREL64", - "R_LARCH_TLS_GD_HI20", - "R_LARCH_TLS_GD_PC_HI20", - "R_LARCH_TLS_IE64_HI12", - "R_LARCH_TLS_IE64_LO20", - "R_LARCH_TLS_IE64_PC_HI12", - "R_LARCH_TLS_IE64_PC_LO20", - "R_LARCH_TLS_IE_HI20", - "R_LARCH_TLS_IE_LO12", - "R_LARCH_TLS_IE_PC_HI20", - "R_LARCH_TLS_IE_PC_LO12", - "R_LARCH_TLS_LD_HI20", - "R_LARCH_TLS_LD_PC_HI20", - "R_LARCH_TLS_LE64_HI12", - "R_LARCH_TLS_LE64_LO20", - "R_LARCH_TLS_LE_HI20", - "R_LARCH_TLS_LE_LO12", - "R_LARCH_TLS_TPREL32", - "R_LARCH_TLS_TPREL64", - "R_MIPS", - "R_MIPS_16", - "R_MIPS_26", - "R_MIPS_32", - "R_MIPS_64", - "R_MIPS_ADD_IMMEDIATE", - "R_MIPS_CALL16", - "R_MIPS_CALL_HI16", - "R_MIPS_CALL_LO16", - "R_MIPS_DELETE", - "R_MIPS_GOT16", - "R_MIPS_GOT_DISP", - "R_MIPS_GOT_HI16", - "R_MIPS_GOT_LO16", - "R_MIPS_GOT_OFST", - "R_MIPS_GOT_PAGE", - "R_MIPS_GPREL16", - "R_MIPS_GPREL32", - "R_MIPS_HI16", - "R_MIPS_HIGHER", - "R_MIPS_HIGHEST", - "R_MIPS_INSERT_A", - "R_MIPS_INSERT_B", - "R_MIPS_JALR", - "R_MIPS_LITERAL", - "R_MIPS_LO16", - "R_MIPS_NONE", - "R_MIPS_PC16", - "R_MIPS_PC32", - "R_MIPS_PJUMP", - "R_MIPS_REL16", - "R_MIPS_REL32", - "R_MIPS_RELGOT", - "R_MIPS_SCN_DISP", - "R_MIPS_SHIFT5", - "R_MIPS_SHIFT6", - "R_MIPS_SUB", - "R_MIPS_TLS_DTPMOD32", - "R_MIPS_TLS_DTPMOD64", - "R_MIPS_TLS_DTPREL32", - "R_MIPS_TLS_DTPREL64", - "R_MIPS_TLS_DTPREL_HI16", - "R_MIPS_TLS_DTPREL_LO16", - "R_MIPS_TLS_GD", - "R_MIPS_TLS_GOTTPREL", - "R_MIPS_TLS_LDM", - "R_MIPS_TLS_TPREL32", - "R_MIPS_TLS_TPREL64", - "R_MIPS_TLS_TPREL_HI16", - "R_MIPS_TLS_TPREL_LO16", - "R_PPC", - "R_PPC64", - "R_PPC64_ADDR14", - "R_PPC64_ADDR14_BRNTAKEN", - "R_PPC64_ADDR14_BRTAKEN", - "R_PPC64_ADDR16", - "R_PPC64_ADDR16_DS", - "R_PPC64_ADDR16_HA", - "R_PPC64_ADDR16_HI", - "R_PPC64_ADDR16_HIGH", - "R_PPC64_ADDR16_HIGHA", - "R_PPC64_ADDR16_HIGHER", - "R_PPC64_ADDR16_HIGHER34", - "R_PPC64_ADDR16_HIGHERA", - "R_PPC64_ADDR16_HIGHERA34", - "R_PPC64_ADDR16_HIGHEST", - "R_PPC64_ADDR16_HIGHEST34", - "R_PPC64_ADDR16_HIGHESTA", - "R_PPC64_ADDR16_HIGHESTA34", - "R_PPC64_ADDR16_LO", - "R_PPC64_ADDR16_LO_DS", - "R_PPC64_ADDR24", - "R_PPC64_ADDR32", - "R_PPC64_ADDR64", - "R_PPC64_ADDR64_LOCAL", - "R_PPC64_COPY", - "R_PPC64_D28", - "R_PPC64_D34", - "R_PPC64_D34_HA30", - "R_PPC64_D34_HI30", - "R_PPC64_D34_LO", - "R_PPC64_DTPMOD64", - "R_PPC64_DTPREL16", - "R_PPC64_DTPREL16_DS", - "R_PPC64_DTPREL16_HA", - "R_PPC64_DTPREL16_HI", - "R_PPC64_DTPREL16_HIGH", - "R_PPC64_DTPREL16_HIGHA", - "R_PPC64_DTPREL16_HIGHER", - "R_PPC64_DTPREL16_HIGHERA", - "R_PPC64_DTPREL16_HIGHEST", - "R_PPC64_DTPREL16_HIGHESTA", - "R_PPC64_DTPREL16_LO", - "R_PPC64_DTPREL16_LO_DS", - "R_PPC64_DTPREL34", - "R_PPC64_DTPREL64", - "R_PPC64_ENTRY", - "R_PPC64_GLOB_DAT", - "R_PPC64_GNU_VTENTRY", - "R_PPC64_GNU_VTINHERIT", - "R_PPC64_GOT16", - "R_PPC64_GOT16_DS", - "R_PPC64_GOT16_HA", - "R_PPC64_GOT16_HI", - "R_PPC64_GOT16_LO", - "R_PPC64_GOT16_LO_DS", - "R_PPC64_GOT_DTPREL16_DS", - "R_PPC64_GOT_DTPREL16_HA", - "R_PPC64_GOT_DTPREL16_HI", - "R_PPC64_GOT_DTPREL16_LO_DS", - "R_PPC64_GOT_DTPREL_PCREL34", - "R_PPC64_GOT_PCREL34", - "R_PPC64_GOT_TLSGD16", - "R_PPC64_GOT_TLSGD16_HA", - "R_PPC64_GOT_TLSGD16_HI", - "R_PPC64_GOT_TLSGD16_LO", - "R_PPC64_GOT_TLSGD_PCREL34", - "R_PPC64_GOT_TLSLD16", - "R_PPC64_GOT_TLSLD16_HA", - "R_PPC64_GOT_TLSLD16_HI", - "R_PPC64_GOT_TLSLD16_LO", - "R_PPC64_GOT_TLSLD_PCREL34", - "R_PPC64_GOT_TPREL16_DS", - "R_PPC64_GOT_TPREL16_HA", - "R_PPC64_GOT_TPREL16_HI", - "R_PPC64_GOT_TPREL16_LO_DS", - "R_PPC64_GOT_TPREL_PCREL34", - "R_PPC64_IRELATIVE", - "R_PPC64_JMP_IREL", - "R_PPC64_JMP_SLOT", - "R_PPC64_NONE", - "R_PPC64_PCREL28", - "R_PPC64_PCREL34", - "R_PPC64_PCREL_OPT", - "R_PPC64_PLT16_HA", - "R_PPC64_PLT16_HI", - "R_PPC64_PLT16_LO", - "R_PPC64_PLT16_LO_DS", - "R_PPC64_PLT32", - "R_PPC64_PLT64", - "R_PPC64_PLTCALL", - "R_PPC64_PLTCALL_NOTOC", - "R_PPC64_PLTGOT16", - "R_PPC64_PLTGOT16_DS", - "R_PPC64_PLTGOT16_HA", - "R_PPC64_PLTGOT16_HI", - "R_PPC64_PLTGOT16_LO", - "R_PPC64_PLTGOT_LO_DS", - "R_PPC64_PLTREL32", - "R_PPC64_PLTREL64", - "R_PPC64_PLTSEQ", - "R_PPC64_PLTSEQ_NOTOC", - "R_PPC64_PLT_PCREL34", - "R_PPC64_PLT_PCREL34_NOTOC", - "R_PPC64_REL14", - "R_PPC64_REL14_BRNTAKEN", - "R_PPC64_REL14_BRTAKEN", - "R_PPC64_REL16", - "R_PPC64_REL16DX_HA", - "R_PPC64_REL16_HA", - "R_PPC64_REL16_HI", - "R_PPC64_REL16_HIGH", - "R_PPC64_REL16_HIGHA", - "R_PPC64_REL16_HIGHER", - "R_PPC64_REL16_HIGHER34", - "R_PPC64_REL16_HIGHERA", - "R_PPC64_REL16_HIGHERA34", - "R_PPC64_REL16_HIGHEST", - "R_PPC64_REL16_HIGHEST34", - "R_PPC64_REL16_HIGHESTA", - "R_PPC64_REL16_HIGHESTA34", - "R_PPC64_REL16_LO", - "R_PPC64_REL24", - "R_PPC64_REL24_NOTOC", - "R_PPC64_REL24_P9NOTOC", - "R_PPC64_REL30", - "R_PPC64_REL32", - "R_PPC64_REL64", - "R_PPC64_RELATIVE", - "R_PPC64_SECTOFF", - "R_PPC64_SECTOFF_DS", - "R_PPC64_SECTOFF_HA", - "R_PPC64_SECTOFF_HI", - "R_PPC64_SECTOFF_LO", - "R_PPC64_SECTOFF_LO_DS", - "R_PPC64_TLS", - "R_PPC64_TLSGD", - "R_PPC64_TLSLD", - "R_PPC64_TOC", - "R_PPC64_TOC16", - "R_PPC64_TOC16_DS", - "R_PPC64_TOC16_HA", - "R_PPC64_TOC16_HI", - "R_PPC64_TOC16_LO", - "R_PPC64_TOC16_LO_DS", - "R_PPC64_TOCSAVE", - "R_PPC64_TPREL16", - "R_PPC64_TPREL16_DS", - "R_PPC64_TPREL16_HA", - "R_PPC64_TPREL16_HI", - "R_PPC64_TPREL16_HIGH", - "R_PPC64_TPREL16_HIGHA", - "R_PPC64_TPREL16_HIGHER", - "R_PPC64_TPREL16_HIGHERA", - "R_PPC64_TPREL16_HIGHEST", - "R_PPC64_TPREL16_HIGHESTA", - "R_PPC64_TPREL16_LO", - "R_PPC64_TPREL16_LO_DS", - "R_PPC64_TPREL34", - "R_PPC64_TPREL64", - "R_PPC64_UADDR16", - "R_PPC64_UADDR32", - "R_PPC64_UADDR64", - "R_PPC_ADDR14", - "R_PPC_ADDR14_BRNTAKEN", - "R_PPC_ADDR14_BRTAKEN", - "R_PPC_ADDR16", - "R_PPC_ADDR16_HA", - "R_PPC_ADDR16_HI", - "R_PPC_ADDR16_LO", - "R_PPC_ADDR24", - "R_PPC_ADDR32", - "R_PPC_COPY", - "R_PPC_DTPMOD32", - "R_PPC_DTPREL16", - "R_PPC_DTPREL16_HA", - "R_PPC_DTPREL16_HI", - "R_PPC_DTPREL16_LO", - "R_PPC_DTPREL32", - "R_PPC_EMB_BIT_FLD", - "R_PPC_EMB_MRKREF", - "R_PPC_EMB_NADDR16", - "R_PPC_EMB_NADDR16_HA", - "R_PPC_EMB_NADDR16_HI", - "R_PPC_EMB_NADDR16_LO", - "R_PPC_EMB_NADDR32", - "R_PPC_EMB_RELSDA", - "R_PPC_EMB_RELSEC16", - "R_PPC_EMB_RELST_HA", - "R_PPC_EMB_RELST_HI", - "R_PPC_EMB_RELST_LO", - "R_PPC_EMB_SDA21", - "R_PPC_EMB_SDA2I16", - "R_PPC_EMB_SDA2REL", - "R_PPC_EMB_SDAI16", - "R_PPC_GLOB_DAT", - "R_PPC_GOT16", - "R_PPC_GOT16_HA", - "R_PPC_GOT16_HI", - "R_PPC_GOT16_LO", - "R_PPC_GOT_TLSGD16", - "R_PPC_GOT_TLSGD16_HA", - "R_PPC_GOT_TLSGD16_HI", - "R_PPC_GOT_TLSGD16_LO", - "R_PPC_GOT_TLSLD16", - "R_PPC_GOT_TLSLD16_HA", - "R_PPC_GOT_TLSLD16_HI", - "R_PPC_GOT_TLSLD16_LO", - "R_PPC_GOT_TPREL16", - "R_PPC_GOT_TPREL16_HA", - "R_PPC_GOT_TPREL16_HI", - "R_PPC_GOT_TPREL16_LO", - "R_PPC_JMP_SLOT", - "R_PPC_LOCAL24PC", - "R_PPC_NONE", - "R_PPC_PLT16_HA", - "R_PPC_PLT16_HI", - "R_PPC_PLT16_LO", - "R_PPC_PLT32", - "R_PPC_PLTREL24", - "R_PPC_PLTREL32", - "R_PPC_REL14", - "R_PPC_REL14_BRNTAKEN", - "R_PPC_REL14_BRTAKEN", - "R_PPC_REL24", - "R_PPC_REL32", - "R_PPC_RELATIVE", - "R_PPC_SDAREL16", - "R_PPC_SECTOFF", - "R_PPC_SECTOFF_HA", - "R_PPC_SECTOFF_HI", - "R_PPC_SECTOFF_LO", - "R_PPC_TLS", - "R_PPC_TPREL16", - "R_PPC_TPREL16_HA", - "R_PPC_TPREL16_HI", - "R_PPC_TPREL16_LO", - "R_PPC_TPREL32", - "R_PPC_UADDR16", - "R_PPC_UADDR32", - "R_RISCV", - "R_RISCV_32", - "R_RISCV_32_PCREL", - "R_RISCV_64", - "R_RISCV_ADD16", - "R_RISCV_ADD32", - "R_RISCV_ADD64", - "R_RISCV_ADD8", - "R_RISCV_ALIGN", - "R_RISCV_BRANCH", - "R_RISCV_CALL", - "R_RISCV_CALL_PLT", - "R_RISCV_COPY", - "R_RISCV_GNU_VTENTRY", - "R_RISCV_GNU_VTINHERIT", - "R_RISCV_GOT_HI20", - "R_RISCV_GPREL_I", - "R_RISCV_GPREL_S", - "R_RISCV_HI20", - "R_RISCV_JAL", - "R_RISCV_JUMP_SLOT", - "R_RISCV_LO12_I", - "R_RISCV_LO12_S", - "R_RISCV_NONE", - "R_RISCV_PCREL_HI20", - "R_RISCV_PCREL_LO12_I", - "R_RISCV_PCREL_LO12_S", - "R_RISCV_RELATIVE", - "R_RISCV_RELAX", - "R_RISCV_RVC_BRANCH", - "R_RISCV_RVC_JUMP", - "R_RISCV_RVC_LUI", - "R_RISCV_SET16", - "R_RISCV_SET32", - "R_RISCV_SET6", - "R_RISCV_SET8", - "R_RISCV_SUB16", - "R_RISCV_SUB32", - "R_RISCV_SUB6", - "R_RISCV_SUB64", - "R_RISCV_SUB8", - "R_RISCV_TLS_DTPMOD32", - "R_RISCV_TLS_DTPMOD64", - "R_RISCV_TLS_DTPREL32", - "R_RISCV_TLS_DTPREL64", - "R_RISCV_TLS_GD_HI20", - "R_RISCV_TLS_GOT_HI20", - "R_RISCV_TLS_TPREL32", - "R_RISCV_TLS_TPREL64", - "R_RISCV_TPREL_ADD", - "R_RISCV_TPREL_HI20", - "R_RISCV_TPREL_I", - "R_RISCV_TPREL_LO12_I", - "R_RISCV_TPREL_LO12_S", - "R_RISCV_TPREL_S", - "R_SPARC", - "R_SPARC_10", - "R_SPARC_11", - "R_SPARC_13", - "R_SPARC_16", - "R_SPARC_22", - "R_SPARC_32", - "R_SPARC_5", - "R_SPARC_6", - "R_SPARC_64", - "R_SPARC_7", - "R_SPARC_8", - "R_SPARC_COPY", - "R_SPARC_DISP16", - "R_SPARC_DISP32", - "R_SPARC_DISP64", - "R_SPARC_DISP8", - "R_SPARC_GLOB_DAT", - "R_SPARC_GLOB_JMP", - "R_SPARC_GOT10", - "R_SPARC_GOT13", - "R_SPARC_GOT22", - "R_SPARC_H44", - "R_SPARC_HH22", - "R_SPARC_HI22", - "R_SPARC_HIPLT22", - "R_SPARC_HIX22", - "R_SPARC_HM10", - "R_SPARC_JMP_SLOT", - "R_SPARC_L44", - "R_SPARC_LM22", - "R_SPARC_LO10", - "R_SPARC_LOPLT10", - "R_SPARC_LOX10", - "R_SPARC_M44", - "R_SPARC_NONE", - "R_SPARC_OLO10", - "R_SPARC_PC10", - "R_SPARC_PC22", - "R_SPARC_PCPLT10", - "R_SPARC_PCPLT22", - "R_SPARC_PCPLT32", - "R_SPARC_PC_HH22", - "R_SPARC_PC_HM10", - "R_SPARC_PC_LM22", - "R_SPARC_PLT32", - "R_SPARC_PLT64", - "R_SPARC_REGISTER", - "R_SPARC_RELATIVE", - "R_SPARC_UA16", - "R_SPARC_UA32", - "R_SPARC_UA64", - "R_SPARC_WDISP16", - "R_SPARC_WDISP19", - "R_SPARC_WDISP22", - "R_SPARC_WDISP30", - "R_SPARC_WPLT30", - "R_SYM32", - "R_SYM64", - "R_TYPE32", - "R_TYPE64", - "R_X86_64", - "R_X86_64_16", - "R_X86_64_32", - "R_X86_64_32S", - "R_X86_64_64", - "R_X86_64_8", - "R_X86_64_COPY", - "R_X86_64_DTPMOD64", - "R_X86_64_DTPOFF32", - "R_X86_64_DTPOFF64", - "R_X86_64_GLOB_DAT", - "R_X86_64_GOT32", - "R_X86_64_GOT64", - "R_X86_64_GOTOFF64", - "R_X86_64_GOTPC32", - "R_X86_64_GOTPC32_TLSDESC", - "R_X86_64_GOTPC64", - "R_X86_64_GOTPCREL", - "R_X86_64_GOTPCREL64", - "R_X86_64_GOTPCRELX", - "R_X86_64_GOTPLT64", - "R_X86_64_GOTTPOFF", - "R_X86_64_IRELATIVE", - "R_X86_64_JMP_SLOT", - "R_X86_64_NONE", - "R_X86_64_PC16", - "R_X86_64_PC32", - "R_X86_64_PC32_BND", - "R_X86_64_PC64", - "R_X86_64_PC8", - "R_X86_64_PLT32", - "R_X86_64_PLT32_BND", - "R_X86_64_PLTOFF64", - "R_X86_64_RELATIVE", - "R_X86_64_RELATIVE64", - "R_X86_64_REX_GOTPCRELX", - "R_X86_64_SIZE32", - "R_X86_64_SIZE64", - "R_X86_64_TLSDESC", - "R_X86_64_TLSDESC_CALL", - "R_X86_64_TLSGD", - "R_X86_64_TLSLD", - "R_X86_64_TPOFF32", - "R_X86_64_TPOFF64", - "Rel32", - "Rel64", - "Rela32", - "Rela64", - "SHF_ALLOC", - "SHF_COMPRESSED", - "SHF_EXECINSTR", - "SHF_GROUP", - "SHF_INFO_LINK", - "SHF_LINK_ORDER", - "SHF_MASKOS", - "SHF_MASKPROC", - "SHF_MERGE", - "SHF_OS_NONCONFORMING", - "SHF_STRINGS", - "SHF_TLS", - "SHF_WRITE", - "SHN_ABS", - "SHN_COMMON", - "SHN_HIOS", - "SHN_HIPROC", - "SHN_HIRESERVE", - "SHN_LOOS", - "SHN_LOPROC", - "SHN_LORESERVE", - "SHN_UNDEF", - "SHN_XINDEX", - "SHT_DYNAMIC", - "SHT_DYNSYM", - "SHT_FINI_ARRAY", - "SHT_GNU_ATTRIBUTES", - "SHT_GNU_HASH", - "SHT_GNU_LIBLIST", - "SHT_GNU_VERDEF", - "SHT_GNU_VERNEED", - "SHT_GNU_VERSYM", - "SHT_GROUP", - "SHT_HASH", - "SHT_HIOS", - "SHT_HIPROC", - "SHT_HIUSER", - "SHT_INIT_ARRAY", - "SHT_LOOS", - "SHT_LOPROC", - "SHT_LOUSER", - "SHT_MIPS_ABIFLAGS", - "SHT_NOBITS", - "SHT_NOTE", - "SHT_NULL", - "SHT_PREINIT_ARRAY", - "SHT_PROGBITS", - "SHT_REL", - "SHT_RELA", - "SHT_SHLIB", - "SHT_STRTAB", - "SHT_SYMTAB", - "SHT_SYMTAB_SHNDX", - "STB_GLOBAL", - "STB_HIOS", - "STB_HIPROC", - "STB_LOCAL", - "STB_LOOS", - "STB_LOPROC", - "STB_WEAK", - "STT_COMMON", - "STT_FILE", - "STT_FUNC", - "STT_HIOS", - "STT_HIPROC", - "STT_LOOS", - "STT_LOPROC", - "STT_NOTYPE", - "STT_OBJECT", - "STT_SECTION", - "STT_TLS", - "STV_DEFAULT", - "STV_HIDDEN", - "STV_INTERNAL", - "STV_PROTECTED", - "ST_BIND", - "ST_INFO", - "ST_TYPE", - "ST_VISIBILITY", - "Section", - "Section32", - "Section64", - "SectionFlag", - "SectionHeader", - "SectionIndex", - "SectionType", - "Sym32", - "Sym32Size", - "Sym64", - "Sym64Size", - "SymBind", - "SymType", - "SymVis", - "Symbol", - "Type", - "Version", - }, - "debug/gosym": { - "DecodingError", - "Func", - "LineTable", - "NewLineTable", - "NewTable", - "Obj", - "Sym", - "Table", - "UnknownFileError", - "UnknownLineError", - }, - "debug/macho": { - "ARM64_RELOC_ADDEND", - "ARM64_RELOC_BRANCH26", - "ARM64_RELOC_GOT_LOAD_PAGE21", - "ARM64_RELOC_GOT_LOAD_PAGEOFF12", - "ARM64_RELOC_PAGE21", - "ARM64_RELOC_PAGEOFF12", - "ARM64_RELOC_POINTER_TO_GOT", - "ARM64_RELOC_SUBTRACTOR", - "ARM64_RELOC_TLVP_LOAD_PAGE21", - "ARM64_RELOC_TLVP_LOAD_PAGEOFF12", - "ARM64_RELOC_UNSIGNED", - "ARM_RELOC_BR24", - "ARM_RELOC_HALF", - "ARM_RELOC_HALF_SECTDIFF", - "ARM_RELOC_LOCAL_SECTDIFF", - "ARM_RELOC_PAIR", - "ARM_RELOC_PB_LA_PTR", - "ARM_RELOC_SECTDIFF", - "ARM_RELOC_VANILLA", - "ARM_THUMB_32BIT_BRANCH", - "ARM_THUMB_RELOC_BR22", - "Cpu", - "Cpu386", - "CpuAmd64", - "CpuArm", - "CpuArm64", - "CpuPpc", - "CpuPpc64", - "Dylib", - "DylibCmd", - "Dysymtab", - "DysymtabCmd", - "ErrNotFat", - "FatArch", - "FatArchHeader", - "FatFile", - "File", - "FileHeader", - "FlagAllModsBound", - "FlagAllowStackExecution", - "FlagAppExtensionSafe", - "FlagBindAtLoad", - "FlagBindsToWeak", - "FlagCanonical", - "FlagDeadStrippableDylib", - "FlagDyldLink", - "FlagForceFlat", - "FlagHasTLVDescriptors", - "FlagIncrLink", - "FlagLazyInit", - "FlagNoFixPrebinding", - "FlagNoHeapExecution", - "FlagNoMultiDefs", - "FlagNoReexportedDylibs", - "FlagNoUndefs", - "FlagPIE", - "FlagPrebindable", - "FlagPrebound", - "FlagRootSafe", - "FlagSetuidSafe", - "FlagSplitSegs", - "FlagSubsectionsViaSymbols", - "FlagTwoLevel", - "FlagWeakDefines", - "FormatError", - "GENERIC_RELOC_LOCAL_SECTDIFF", - "GENERIC_RELOC_PAIR", - "GENERIC_RELOC_PB_LA_PTR", - "GENERIC_RELOC_SECTDIFF", - "GENERIC_RELOC_TLV", - "GENERIC_RELOC_VANILLA", - "Load", - "LoadBytes", - "LoadCmd", - "LoadCmdDylib", - "LoadCmdDylinker", - "LoadCmdDysymtab", - "LoadCmdRpath", - "LoadCmdSegment", - "LoadCmdSegment64", - "LoadCmdSymtab", - "LoadCmdThread", - "LoadCmdUnixThread", - "Magic32", - "Magic64", - "MagicFat", - "NewFatFile", - "NewFile", - "Nlist32", - "Nlist64", - "Open", - "OpenFat", - "Regs386", - "RegsAMD64", - "Reloc", - "RelocTypeARM", - "RelocTypeARM64", - "RelocTypeGeneric", - "RelocTypeX86_64", - "Rpath", - "RpathCmd", - "Section", - "Section32", - "Section64", - "SectionHeader", - "Segment", - "Segment32", - "Segment64", - "SegmentHeader", - "Symbol", - "Symtab", - "SymtabCmd", - "Thread", - "Type", - "TypeBundle", - "TypeDylib", - "TypeExec", - "TypeObj", - "X86_64_RELOC_BRANCH", - "X86_64_RELOC_GOT", - "X86_64_RELOC_GOT_LOAD", - "X86_64_RELOC_SIGNED", - "X86_64_RELOC_SIGNED_1", - "X86_64_RELOC_SIGNED_2", - "X86_64_RELOC_SIGNED_4", - "X86_64_RELOC_SUBTRACTOR", - "X86_64_RELOC_TLV", - "X86_64_RELOC_UNSIGNED", - }, - "debug/pe": { - "COFFSymbol", - "COFFSymbolAuxFormat5", - "COFFSymbolSize", - "DataDirectory", - "File", - "FileHeader", - "FormatError", - "IMAGE_COMDAT_SELECT_ANY", - "IMAGE_COMDAT_SELECT_ASSOCIATIVE", - "IMAGE_COMDAT_SELECT_EXACT_MATCH", - "IMAGE_COMDAT_SELECT_LARGEST", - "IMAGE_COMDAT_SELECT_NODUPLICATES", - "IMAGE_COMDAT_SELECT_SAME_SIZE", - "IMAGE_DIRECTORY_ENTRY_ARCHITECTURE", - "IMAGE_DIRECTORY_ENTRY_BASERELOC", - "IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT", - "IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR", - "IMAGE_DIRECTORY_ENTRY_DEBUG", - "IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT", - "IMAGE_DIRECTORY_ENTRY_EXCEPTION", - "IMAGE_DIRECTORY_ENTRY_EXPORT", - "IMAGE_DIRECTORY_ENTRY_GLOBALPTR", - "IMAGE_DIRECTORY_ENTRY_IAT", - "IMAGE_DIRECTORY_ENTRY_IMPORT", - "IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG", - "IMAGE_DIRECTORY_ENTRY_RESOURCE", - "IMAGE_DIRECTORY_ENTRY_SECURITY", - "IMAGE_DIRECTORY_ENTRY_TLS", - "IMAGE_DLLCHARACTERISTICS_APPCONTAINER", - "IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE", - "IMAGE_DLLCHARACTERISTICS_FORCE_INTEGRITY", - "IMAGE_DLLCHARACTERISTICS_GUARD_CF", - "IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA", - "IMAGE_DLLCHARACTERISTICS_NO_BIND", - "IMAGE_DLLCHARACTERISTICS_NO_ISOLATION", - "IMAGE_DLLCHARACTERISTICS_NO_SEH", - "IMAGE_DLLCHARACTERISTICS_NX_COMPAT", - "IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE", - "IMAGE_DLLCHARACTERISTICS_WDM_DRIVER", - "IMAGE_FILE_32BIT_MACHINE", - "IMAGE_FILE_AGGRESIVE_WS_TRIM", - "IMAGE_FILE_BYTES_REVERSED_HI", - "IMAGE_FILE_BYTES_REVERSED_LO", - "IMAGE_FILE_DEBUG_STRIPPED", - "IMAGE_FILE_DLL", - "IMAGE_FILE_EXECUTABLE_IMAGE", - "IMAGE_FILE_LARGE_ADDRESS_AWARE", - "IMAGE_FILE_LINE_NUMS_STRIPPED", - "IMAGE_FILE_LOCAL_SYMS_STRIPPED", - "IMAGE_FILE_MACHINE_AM33", - "IMAGE_FILE_MACHINE_AMD64", - "IMAGE_FILE_MACHINE_ARM", - "IMAGE_FILE_MACHINE_ARM64", - "IMAGE_FILE_MACHINE_ARMNT", - "IMAGE_FILE_MACHINE_EBC", - "IMAGE_FILE_MACHINE_I386", - "IMAGE_FILE_MACHINE_IA64", - "IMAGE_FILE_MACHINE_LOONGARCH32", - "IMAGE_FILE_MACHINE_LOONGARCH64", - "IMAGE_FILE_MACHINE_M32R", - "IMAGE_FILE_MACHINE_MIPS16", - "IMAGE_FILE_MACHINE_MIPSFPU", - "IMAGE_FILE_MACHINE_MIPSFPU16", - "IMAGE_FILE_MACHINE_POWERPC", - "IMAGE_FILE_MACHINE_POWERPCFP", - "IMAGE_FILE_MACHINE_R4000", - "IMAGE_FILE_MACHINE_RISCV128", - "IMAGE_FILE_MACHINE_RISCV32", - "IMAGE_FILE_MACHINE_RISCV64", - "IMAGE_FILE_MACHINE_SH3", - "IMAGE_FILE_MACHINE_SH3DSP", - "IMAGE_FILE_MACHINE_SH4", - "IMAGE_FILE_MACHINE_SH5", - "IMAGE_FILE_MACHINE_THUMB", - "IMAGE_FILE_MACHINE_UNKNOWN", - "IMAGE_FILE_MACHINE_WCEMIPSV2", - "IMAGE_FILE_NET_RUN_FROM_SWAP", - "IMAGE_FILE_RELOCS_STRIPPED", - "IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP", - "IMAGE_FILE_SYSTEM", - "IMAGE_FILE_UP_SYSTEM_ONLY", - "IMAGE_SCN_CNT_CODE", - "IMAGE_SCN_CNT_INITIALIZED_DATA", - "IMAGE_SCN_CNT_UNINITIALIZED_DATA", - "IMAGE_SCN_LNK_COMDAT", - "IMAGE_SCN_MEM_DISCARDABLE", - "IMAGE_SCN_MEM_EXECUTE", - "IMAGE_SCN_MEM_READ", - "IMAGE_SCN_MEM_WRITE", - "IMAGE_SUBSYSTEM_EFI_APPLICATION", - "IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER", - "IMAGE_SUBSYSTEM_EFI_ROM", - "IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER", - "IMAGE_SUBSYSTEM_NATIVE", - "IMAGE_SUBSYSTEM_NATIVE_WINDOWS", - "IMAGE_SUBSYSTEM_OS2_CUI", - "IMAGE_SUBSYSTEM_POSIX_CUI", - "IMAGE_SUBSYSTEM_UNKNOWN", - "IMAGE_SUBSYSTEM_WINDOWS_BOOT_APPLICATION", - "IMAGE_SUBSYSTEM_WINDOWS_CE_GUI", - "IMAGE_SUBSYSTEM_WINDOWS_CUI", - "IMAGE_SUBSYSTEM_WINDOWS_GUI", - "IMAGE_SUBSYSTEM_XBOX", - "ImportDirectory", - "NewFile", - "Open", - "OptionalHeader32", - "OptionalHeader64", - "Reloc", - "Section", - "SectionHeader", - "SectionHeader32", - "StringTable", - "Symbol", - }, - "debug/plan9obj": { - "ErrNoSymbols", - "File", - "FileHeader", - "Magic386", - "Magic64", - "MagicAMD64", - "MagicARM", - "NewFile", - "Open", - "Section", - "SectionHeader", - "Sym", - }, - "embed": { - "FS", - }, - "encoding": { - "BinaryMarshaler", - "BinaryUnmarshaler", - "TextMarshaler", - "TextUnmarshaler", - }, - "encoding/ascii85": { - "CorruptInputError", - "Decode", - "Encode", - "MaxEncodedLen", - "NewDecoder", - "NewEncoder", - }, - "encoding/asn1": { - "BitString", - "ClassApplication", - "ClassContextSpecific", - "ClassPrivate", - "ClassUniversal", - "Enumerated", - "Flag", - "Marshal", - "MarshalWithParams", - "NullBytes", - "NullRawValue", - "ObjectIdentifier", - "RawContent", - "RawValue", - "StructuralError", - "SyntaxError", - "TagBMPString", - "TagBitString", - "TagBoolean", - "TagEnum", - "TagGeneralString", - "TagGeneralizedTime", - "TagIA5String", - "TagInteger", - "TagNull", - "TagNumericString", - "TagOID", - "TagOctetString", - "TagPrintableString", - "TagSequence", - "TagSet", - "TagT61String", - "TagUTCTime", - "TagUTF8String", - "Unmarshal", - "UnmarshalWithParams", - }, - "encoding/base32": { - "CorruptInputError", - "Encoding", - "HexEncoding", - "NewDecoder", - "NewEncoder", - "NewEncoding", - "NoPadding", - "StdEncoding", - "StdPadding", - }, - "encoding/base64": { - "CorruptInputError", - "Encoding", - "NewDecoder", - "NewEncoder", - "NewEncoding", - "NoPadding", - "RawStdEncoding", - "RawURLEncoding", - "StdEncoding", - "StdPadding", - "URLEncoding", - }, - "encoding/binary": { - "AppendByteOrder", - "AppendUvarint", - "AppendVarint", - "BigEndian", - "ByteOrder", - "LittleEndian", - "MaxVarintLen16", - "MaxVarintLen32", - "MaxVarintLen64", - "NativeEndian", - "PutUvarint", - "PutVarint", - "Read", - "ReadUvarint", - "ReadVarint", - "Size", - "Uvarint", - "Varint", - "Write", - }, - "encoding/csv": { - "ErrBareQuote", - "ErrFieldCount", - "ErrQuote", - "ErrTrailingComma", - "NewReader", - "NewWriter", - "ParseError", - "Reader", - "Writer", - }, - "encoding/gob": { - "CommonType", - "Decoder", - "Encoder", - "GobDecoder", - "GobEncoder", - "NewDecoder", - "NewEncoder", - "Register", - "RegisterName", - }, - "encoding/hex": { - "AppendDecode", - "AppendEncode", - "Decode", - "DecodeString", - "DecodedLen", - "Dump", - "Dumper", - "Encode", - "EncodeToString", - "EncodedLen", - "ErrLength", - "InvalidByteError", - "NewDecoder", - "NewEncoder", - }, - "encoding/json": { - "Compact", - "Decoder", - "Delim", - "Encoder", - "HTMLEscape", - "Indent", - "InvalidUTF8Error", - "InvalidUnmarshalError", - "Marshal", - "MarshalIndent", - "Marshaler", - "MarshalerError", - "NewDecoder", - "NewEncoder", - "Number", - "RawMessage", - "SyntaxError", - "Token", - "Unmarshal", - "UnmarshalFieldError", - "UnmarshalTypeError", - "Unmarshaler", - "UnsupportedTypeError", - "UnsupportedValueError", - "Valid", - }, - "encoding/pem": { - "Block", - "Decode", - "Encode", - "EncodeToMemory", - }, - "encoding/xml": { - "Attr", - "CharData", - "Comment", - "CopyToken", - "Decoder", - "Directive", - "Encoder", - "EndElement", - "Escape", - "EscapeText", - "HTMLAutoClose", - "HTMLEntity", - "Header", - "Marshal", - "MarshalIndent", - "Marshaler", - "MarshalerAttr", - "Name", - "NewDecoder", - "NewEncoder", - "NewTokenDecoder", - "ProcInst", - "StartElement", - "SyntaxError", - "TagPathError", - "Token", - "TokenReader", - "Unmarshal", - "UnmarshalError", - "Unmarshaler", - "UnmarshalerAttr", - "UnsupportedTypeError", - }, - "errors": { - "As", - "ErrUnsupported", - "Is", - "Join", - "New", - "Unwrap", - }, - "expvar": { - "Do", - "Float", - "Func", - "Get", - "Handler", - "Int", - "KeyValue", - "Map", - "NewFloat", - "NewInt", - "NewMap", - "NewString", - "Publish", - "String", - "Var", - }, - "flag": { - "Arg", - "Args", - "Bool", - "BoolFunc", - "BoolVar", - "CommandLine", - "ContinueOnError", - "Duration", - "DurationVar", - "ErrHelp", - "ErrorHandling", - "ExitOnError", - "Flag", - "FlagSet", - "Float64", - "Float64Var", - "Func", - "Getter", - "Int", - "Int64", - "Int64Var", - "IntVar", - "Lookup", - "NArg", - "NFlag", - "NewFlagSet", - "PanicOnError", - "Parse", - "Parsed", - "PrintDefaults", - "Set", - "String", - "StringVar", - "TextVar", - "Uint", - "Uint64", - "Uint64Var", - "UintVar", - "UnquoteUsage", - "Usage", - "Value", - "Var", - "Visit", - "VisitAll", - }, - "fmt": { - "Append", - "Appendf", - "Appendln", - "Errorf", - "FormatString", - "Formatter", - "Fprint", - "Fprintf", - "Fprintln", - "Fscan", - "Fscanf", - "Fscanln", - "GoStringer", - "Print", - "Printf", - "Println", - "Scan", - "ScanState", - "Scanf", - "Scanln", - "Scanner", - "Sprint", - "Sprintf", - "Sprintln", - "Sscan", - "Sscanf", - "Sscanln", - "State", - "Stringer", - }, - "go/ast": { - "ArrayType", - "AssignStmt", - "Bad", - "BadDecl", - "BadExpr", - "BadStmt", - "BasicLit", - "BinaryExpr", - "BlockStmt", - "BranchStmt", - "CallExpr", - "CaseClause", - "ChanDir", - "ChanType", - "CommClause", - "Comment", - "CommentGroup", - "CommentMap", - "CompositeLit", - "Con", - "Decl", - "DeclStmt", - "DeferStmt", - "Ellipsis", - "EmptyStmt", - "Expr", - "ExprStmt", - "Field", - "FieldFilter", - "FieldList", - "File", - "FileExports", - "Filter", - "FilterDecl", - "FilterFile", - "FilterFuncDuplicates", - "FilterImportDuplicates", - "FilterPackage", - "FilterUnassociatedComments", - "ForStmt", - "Fprint", - "Fun", - "FuncDecl", - "FuncLit", - "FuncType", - "GenDecl", - "GoStmt", - "Ident", - "IfStmt", - "ImportSpec", - "Importer", - "IncDecStmt", - "IndexExpr", - "IndexListExpr", - "Inspect", - "InterfaceType", - "IsExported", - "IsGenerated", - "KeyValueExpr", - "LabeledStmt", - "Lbl", - "MapType", - "MergeMode", - "MergePackageFiles", - "NewCommentMap", - "NewIdent", - "NewObj", - "NewPackage", - "NewScope", - "Node", - "NotNilFilter", - "ObjKind", - "Object", - "Package", - "PackageExports", - "ParenExpr", - "Pkg", - "Print", - "RECV", - "RangeStmt", - "ReturnStmt", - "SEND", - "Scope", - "SelectStmt", - "SelectorExpr", - "SendStmt", - "SliceExpr", - "SortImports", - "Spec", - "StarExpr", - "Stmt", - "StructType", - "SwitchStmt", - "Typ", - "TypeAssertExpr", - "TypeSpec", - "TypeSwitchStmt", - "UnaryExpr", - "Unparen", - "ValueSpec", - "Var", - "Visitor", - "Walk", - }, - "go/build": { - "AllowBinary", - "ArchChar", - "Context", - "Default", - "Directive", - "FindOnly", - "IgnoreVendor", - "Import", - "ImportComment", - "ImportDir", - "ImportMode", - "IsLocalImport", - "MultiplePackageError", - "NoGoError", - "Package", - "ToolDir", - }, - "go/build/constraint": { - "AndExpr", - "Expr", - "GoVersion", - "IsGoBuild", - "IsPlusBuild", - "NotExpr", - "OrExpr", - "Parse", - "PlusBuildLines", - "SyntaxError", - "TagExpr", - }, - "go/constant": { - "BinaryOp", - "BitLen", - "Bool", - "BoolVal", - "Bytes", - "Compare", - "Complex", - "Denom", - "Float", - "Float32Val", - "Float64Val", - "Imag", - "Int", - "Int64Val", - "Kind", - "Make", - "MakeBool", - "MakeFloat64", - "MakeFromBytes", - "MakeFromLiteral", - "MakeImag", - "MakeInt64", - "MakeString", - "MakeUint64", - "MakeUnknown", - "Num", - "Real", - "Shift", - "Sign", - "String", - "StringVal", - "ToComplex", - "ToFloat", - "ToInt", - "Uint64Val", - "UnaryOp", - "Unknown", - "Val", - "Value", - }, - "go/doc": { - "AllDecls", - "AllMethods", - "Example", - "Examples", - "Filter", - "Func", - "IllegalPrefixes", - "IsPredeclared", - "Mode", - "New", - "NewFromFiles", - "Note", - "Package", - "PreserveAST", - "Synopsis", - "ToHTML", - "ToText", - "Type", - "Value", - }, - "go/doc/comment": { - "Block", - "Code", - "DefaultLookupPackage", - "Doc", - "DocLink", - "Heading", - "Italic", - "Link", - "LinkDef", - "List", - "ListItem", - "Paragraph", - "Parser", - "Plain", - "Printer", - "Text", - }, - "go/format": { - "Node", - "Source", - }, - "go/importer": { - "Default", - "For", - "ForCompiler", - "Lookup", - }, - "go/parser": { - "AllErrors", - "DeclarationErrors", - "ImportsOnly", - "Mode", - "PackageClauseOnly", - "ParseComments", - "ParseDir", - "ParseExpr", - "ParseExprFrom", - "ParseFile", - "SkipObjectResolution", - "SpuriousErrors", - "Trace", - }, - "go/printer": { - "CommentedNode", - "Config", - "Fprint", - "Mode", - "RawFormat", - "SourcePos", - "TabIndent", - "UseSpaces", - }, - "go/scanner": { - "Error", - "ErrorHandler", - "ErrorList", - "Mode", - "PrintError", - "ScanComments", - "Scanner", - }, - "go/token": { - "ADD", - "ADD_ASSIGN", - "AND", - "AND_ASSIGN", - "AND_NOT", - "AND_NOT_ASSIGN", - "ARROW", - "ASSIGN", - "BREAK", - "CASE", - "CHAN", - "CHAR", - "COLON", - "COMMA", - "COMMENT", - "CONST", - "CONTINUE", - "DEC", - "DEFAULT", - "DEFER", - "DEFINE", - "ELLIPSIS", - "ELSE", - "EOF", - "EQL", - "FALLTHROUGH", - "FLOAT", - "FOR", - "FUNC", - "File", - "FileSet", - "GEQ", - "GO", - "GOTO", - "GTR", - "HighestPrec", - "IDENT", - "IF", - "ILLEGAL", - "IMAG", - "IMPORT", - "INC", - "INT", - "INTERFACE", - "IsExported", - "IsIdentifier", - "IsKeyword", - "LAND", - "LBRACE", - "LBRACK", - "LEQ", - "LOR", - "LPAREN", - "LSS", - "Lookup", - "LowestPrec", - "MAP", - "MUL", - "MUL_ASSIGN", - "NEQ", - "NOT", - "NewFileSet", - "NoPos", - "OR", - "OR_ASSIGN", - "PACKAGE", - "PERIOD", - "Pos", - "Position", - "QUO", - "QUO_ASSIGN", - "RANGE", - "RBRACE", - "RBRACK", - "REM", - "REM_ASSIGN", - "RETURN", - "RPAREN", - "SELECT", - "SEMICOLON", - "SHL", - "SHL_ASSIGN", - "SHR", - "SHR_ASSIGN", - "STRING", - "STRUCT", - "SUB", - "SUB_ASSIGN", - "SWITCH", - "TILDE", - "TYPE", - "Token", - "UnaryPrec", - "VAR", - "XOR", - "XOR_ASSIGN", - }, - "go/types": { - "Alias", - "ArgumentError", - "Array", - "AssertableTo", - "AssignableTo", - "Basic", - "BasicInfo", - "BasicKind", - "Bool", - "Builtin", - "Byte", - "Chan", - "ChanDir", - "CheckExpr", - "Checker", - "Comparable", - "Complex128", - "Complex64", - "Config", - "Const", - "Context", - "ConvertibleTo", - "DefPredeclaredTestFuncs", - "Default", - "Error", - "Eval", - "ExprString", - "FieldVal", - "Float32", - "Float64", - "Func", - "Id", - "Identical", - "IdenticalIgnoreTags", - "Implements", - "ImportMode", - "Importer", - "ImporterFrom", - "Info", - "Initializer", - "Instance", - "Instantiate", - "Int", - "Int16", - "Int32", - "Int64", - "Int8", - "Interface", - "Invalid", - "IsBoolean", - "IsComplex", - "IsConstType", - "IsFloat", - "IsInteger", - "IsInterface", - "IsNumeric", - "IsOrdered", - "IsString", - "IsUnsigned", - "IsUntyped", - "Label", - "LookupFieldOrMethod", - "Map", - "MethodExpr", - "MethodSet", - "MethodVal", - "MissingMethod", - "Named", - "NewAlias", - "NewArray", - "NewChan", - "NewChecker", - "NewConst", - "NewContext", - "NewField", - "NewFunc", - "NewInterface", - "NewInterfaceType", - "NewLabel", - "NewMap", - "NewMethodSet", - "NewNamed", - "NewPackage", - "NewParam", - "NewPkgName", - "NewPointer", - "NewScope", - "NewSignature", - "NewSignatureType", - "NewSlice", - "NewStruct", - "NewTerm", - "NewTuple", - "NewTypeName", - "NewTypeParam", - "NewUnion", - "NewVar", - "Nil", - "Object", - "ObjectString", - "Package", - "PkgName", - "Pointer", - "Qualifier", - "RecvOnly", - "RelativeTo", - "Rune", - "Satisfies", - "Scope", - "Selection", - "SelectionKind", - "SelectionString", - "SendOnly", - "SendRecv", - "Signature", - "Sizes", - "SizesFor", - "Slice", - "StdSizes", - "String", - "Struct", - "Term", - "Tuple", - "Typ", - "Type", - "TypeAndValue", - "TypeList", - "TypeName", - "TypeParam", - "TypeParamList", - "TypeString", - "Uint", - "Uint16", - "Uint32", - "Uint64", - "Uint8", - "Uintptr", - "Unalias", - "Union", - "Universe", - "Unsafe", - "UnsafePointer", - "UntypedBool", - "UntypedComplex", - "UntypedFloat", - "UntypedInt", - "UntypedNil", - "UntypedRune", - "UntypedString", - "Var", - "WriteExpr", - "WriteSignature", - "WriteType", - }, - "go/version": { - "Compare", - "IsValid", - "Lang", - }, - "hash": { - "Hash", - "Hash32", - "Hash64", - }, - "hash/adler32": { - "Checksum", - "New", - "Size", - }, - "hash/crc32": { - "Castagnoli", - "Checksum", - "ChecksumIEEE", - "IEEE", - "IEEETable", - "Koopman", - "MakeTable", - "New", - "NewIEEE", - "Size", - "Table", - "Update", - }, - "hash/crc64": { - "Checksum", - "ECMA", - "ISO", - "MakeTable", - "New", - "Size", - "Table", - "Update", - }, - "hash/fnv": { - "New128", - "New128a", - "New32", - "New32a", - "New64", - "New64a", - }, - "hash/maphash": { - "Bytes", - "Hash", - "MakeSeed", - "Seed", - "String", - }, - "html": { - "EscapeString", - "UnescapeString", - }, - "html/template": { - "CSS", - "ErrAmbigContext", - "ErrBadHTML", - "ErrBranchEnd", - "ErrEndContext", - "ErrJSTemplate", - "ErrNoSuchTemplate", - "ErrOutputContext", - "ErrPartialCharset", - "ErrPartialEscape", - "ErrPredefinedEscaper", - "ErrRangeLoopReentry", - "ErrSlashAmbig", - "Error", - "ErrorCode", - "FuncMap", - "HTML", - "HTMLAttr", - "HTMLEscape", - "HTMLEscapeString", - "HTMLEscaper", - "IsTrue", - "JS", - "JSEscape", - "JSEscapeString", - "JSEscaper", - "JSStr", - "Must", - "New", - "OK", - "ParseFS", - "ParseFiles", - "ParseGlob", - "Srcset", - "Template", - "URL", - "URLQueryEscaper", - }, - "image": { - "Alpha", - "Alpha16", - "Black", - "CMYK", - "Config", - "Decode", - "DecodeConfig", - "ErrFormat", - "Gray", - "Gray16", - "Image", - "NRGBA", - "NRGBA64", - "NYCbCrA", - "NewAlpha", - "NewAlpha16", - "NewCMYK", - "NewGray", - "NewGray16", - "NewNRGBA", - "NewNRGBA64", - "NewNYCbCrA", - "NewPaletted", - "NewRGBA", - "NewRGBA64", - "NewUniform", - "NewYCbCr", - "Opaque", - "Paletted", - "PalettedImage", - "Point", - "Pt", - "RGBA", - "RGBA64", - "RGBA64Image", - "Rect", - "Rectangle", - "RegisterFormat", - "Transparent", - "Uniform", - "White", - "YCbCr", - "YCbCrSubsampleRatio", - "YCbCrSubsampleRatio410", - "YCbCrSubsampleRatio411", - "YCbCrSubsampleRatio420", - "YCbCrSubsampleRatio422", - "YCbCrSubsampleRatio440", - "YCbCrSubsampleRatio444", - "ZP", - "ZR", - }, - "image/color": { - "Alpha", - "Alpha16", - "Alpha16Model", - "AlphaModel", - "Black", - "CMYK", - "CMYKModel", - "CMYKToRGB", - "Color", - "Gray", - "Gray16", - "Gray16Model", - "GrayModel", - "Model", - "ModelFunc", - "NRGBA", - "NRGBA64", - "NRGBA64Model", - "NRGBAModel", - "NYCbCrA", - "NYCbCrAModel", - "Opaque", - "Palette", - "RGBA", - "RGBA64", - "RGBA64Model", - "RGBAModel", - "RGBToCMYK", - "RGBToYCbCr", - "Transparent", - "White", - "YCbCr", - "YCbCrModel", - "YCbCrToRGB", - }, - "image/color/palette": { - "Plan9", - "WebSafe", - }, - "image/draw": { - "Draw", - "DrawMask", - "Drawer", - "FloydSteinberg", - "Image", - "Op", - "Over", - "Quantizer", - "RGBA64Image", - "Src", - }, - "image/gif": { - "Decode", - "DecodeAll", - "DecodeConfig", - "DisposalBackground", - "DisposalNone", - "DisposalPrevious", - "Encode", - "EncodeAll", - "GIF", - "Options", - }, - "image/jpeg": { - "Decode", - "DecodeConfig", - "DefaultQuality", - "Encode", - "FormatError", - "Options", - "Reader", - "UnsupportedError", - }, - "image/png": { - "BestCompression", - "BestSpeed", - "CompressionLevel", - "Decode", - "DecodeConfig", - "DefaultCompression", - "Encode", - "Encoder", - "EncoderBuffer", - "EncoderBufferPool", - "FormatError", - "NoCompression", - "UnsupportedError", - }, - "index/suffixarray": { - "Index", - "New", - }, - "io": { - "ByteReader", - "ByteScanner", - "ByteWriter", - "Closer", - "Copy", - "CopyBuffer", - "CopyN", - "Discard", - "EOF", - "ErrClosedPipe", - "ErrNoProgress", - "ErrShortBuffer", - "ErrShortWrite", - "ErrUnexpectedEOF", - "LimitReader", - "LimitedReader", - "MultiReader", - "MultiWriter", - "NewOffsetWriter", - "NewSectionReader", - "NopCloser", - "OffsetWriter", - "Pipe", - "PipeReader", - "PipeWriter", - "ReadAll", - "ReadAtLeast", - "ReadCloser", - "ReadFull", - "ReadSeekCloser", - "ReadSeeker", - "ReadWriteCloser", - "ReadWriteSeeker", - "ReadWriter", - "Reader", - "ReaderAt", - "ReaderFrom", - "RuneReader", - "RuneScanner", - "SectionReader", - "SeekCurrent", - "SeekEnd", - "SeekStart", - "Seeker", - "StringWriter", - "TeeReader", - "WriteCloser", - "WriteSeeker", - "WriteString", - "Writer", - "WriterAt", - "WriterTo", - }, - "io/fs": { - "DirEntry", - "ErrClosed", - "ErrExist", - "ErrInvalid", - "ErrNotExist", - "ErrPermission", - "FS", - "File", - "FileInfo", - "FileInfoToDirEntry", - "FileMode", - "FormatDirEntry", - "FormatFileInfo", - "Glob", - "GlobFS", - "ModeAppend", - "ModeCharDevice", - "ModeDevice", - "ModeDir", - "ModeExclusive", - "ModeIrregular", - "ModeNamedPipe", - "ModePerm", - "ModeSetgid", - "ModeSetuid", - "ModeSocket", - "ModeSticky", - "ModeSymlink", - "ModeTemporary", - "ModeType", - "PathError", - "ReadDir", - "ReadDirFS", - "ReadDirFile", - "ReadFile", - "ReadFileFS", - "SkipAll", - "SkipDir", - "Stat", - "StatFS", - "Sub", - "SubFS", - "ValidPath", - "WalkDir", - "WalkDirFunc", - }, - "io/ioutil": { - "Discard", - "NopCloser", - "ReadAll", - "ReadDir", - "ReadFile", - "TempDir", - "TempFile", - "WriteFile", - }, - "log": { - "Default", - "Fatal", - "Fatalf", - "Fatalln", - "Flags", - "LUTC", - "Ldate", - "Llongfile", - "Lmicroseconds", - "Lmsgprefix", - "Logger", - "Lshortfile", - "LstdFlags", - "Ltime", - "New", - "Output", - "Panic", - "Panicf", - "Panicln", - "Prefix", - "Print", - "Printf", - "Println", - "SetFlags", - "SetOutput", - "SetPrefix", - "Writer", - }, - "log/slog": { - "Any", - "AnyValue", - "Attr", - "Bool", - "BoolValue", - "Debug", - "DebugContext", - "Default", - "Duration", - "DurationValue", - "Error", - "ErrorContext", - "Float64", - "Float64Value", - "Group", - "GroupValue", - "Handler", - "HandlerOptions", - "Info", - "InfoContext", - "Int", - "Int64", - "Int64Value", - "IntValue", - "JSONHandler", - "Kind", - "KindAny", - "KindBool", - "KindDuration", - "KindFloat64", - "KindGroup", - "KindInt64", - "KindLogValuer", - "KindString", - "KindTime", - "KindUint64", - "Level", - "LevelDebug", - "LevelError", - "LevelInfo", - "LevelKey", - "LevelVar", - "LevelWarn", - "Leveler", - "Log", - "LogAttrs", - "LogValuer", - "Logger", - "MessageKey", - "New", - "NewJSONHandler", - "NewLogLogger", - "NewRecord", - "NewTextHandler", - "Record", - "SetDefault", - "SetLogLoggerLevel", - "Source", - "SourceKey", - "String", - "StringValue", - "TextHandler", - "Time", - "TimeKey", - "TimeValue", - "Uint64", - "Uint64Value", - "Value", - "Warn", - "WarnContext", - "With", - }, - "log/syslog": { - "Dial", - "LOG_ALERT", - "LOG_AUTH", - "LOG_AUTHPRIV", - "LOG_CRIT", - "LOG_CRON", - "LOG_DAEMON", - "LOG_DEBUG", - "LOG_EMERG", - "LOG_ERR", - "LOG_FTP", - "LOG_INFO", - "LOG_KERN", - "LOG_LOCAL0", - "LOG_LOCAL1", - "LOG_LOCAL2", - "LOG_LOCAL3", - "LOG_LOCAL4", - "LOG_LOCAL5", - "LOG_LOCAL6", - "LOG_LOCAL7", - "LOG_LPR", - "LOG_MAIL", - "LOG_NEWS", - "LOG_NOTICE", - "LOG_SYSLOG", - "LOG_USER", - "LOG_UUCP", - "LOG_WARNING", - "New", - "NewLogger", - "Priority", - "Writer", - }, - "maps": { - "Clone", - "Copy", - "DeleteFunc", - "Equal", - "EqualFunc", - }, - "math": { - "Abs", - "Acos", - "Acosh", - "Asin", - "Asinh", - "Atan", - "Atan2", - "Atanh", - "Cbrt", - "Ceil", - "Copysign", - "Cos", - "Cosh", - "Dim", - "E", - "Erf", - "Erfc", - "Erfcinv", - "Erfinv", - "Exp", - "Exp2", - "Expm1", - "FMA", - "Float32bits", - "Float32frombits", - "Float64bits", - "Float64frombits", - "Floor", - "Frexp", - "Gamma", - "Hypot", - "Ilogb", - "Inf", - "IsInf", - "IsNaN", - "J0", - "J1", - "Jn", - "Ldexp", - "Lgamma", - "Ln10", - "Ln2", - "Log", - "Log10", - "Log10E", - "Log1p", - "Log2", - "Log2E", - "Logb", - "Max", - "MaxFloat32", - "MaxFloat64", - "MaxInt", - "MaxInt16", - "MaxInt32", - "MaxInt64", - "MaxInt8", - "MaxUint", - "MaxUint16", - "MaxUint32", - "MaxUint64", - "MaxUint8", - "Min", - "MinInt", - "MinInt16", - "MinInt32", - "MinInt64", - "MinInt8", - "Mod", - "Modf", - "NaN", - "Nextafter", - "Nextafter32", - "Phi", - "Pi", - "Pow", - "Pow10", - "Remainder", - "Round", - "RoundToEven", - "Signbit", - "Sin", - "Sincos", - "Sinh", - "SmallestNonzeroFloat32", - "SmallestNonzeroFloat64", - "Sqrt", - "Sqrt2", - "SqrtE", - "SqrtPhi", - "SqrtPi", - "Tan", - "Tanh", - "Trunc", - "Y0", - "Y1", - "Yn", - }, - "math/big": { - "Above", - "Accuracy", - "AwayFromZero", - "Below", - "ErrNaN", - "Exact", - "Float", - "Int", - "Jacobi", - "MaxBase", - "MaxExp", - "MaxPrec", - "MinExp", - "NewFloat", - "NewInt", - "NewRat", - "ParseFloat", - "Rat", - "RoundingMode", - "ToNearestAway", - "ToNearestEven", - "ToNegativeInf", - "ToPositiveInf", - "ToZero", - "Word", - }, - "math/bits": { - "Add", - "Add32", - "Add64", - "Div", - "Div32", - "Div64", - "LeadingZeros", - "LeadingZeros16", - "LeadingZeros32", - "LeadingZeros64", - "LeadingZeros8", - "Len", - "Len16", - "Len32", - "Len64", - "Len8", - "Mul", - "Mul32", - "Mul64", - "OnesCount", - "OnesCount16", - "OnesCount32", - "OnesCount64", - "OnesCount8", - "Rem", - "Rem32", - "Rem64", - "Reverse", - "Reverse16", - "Reverse32", - "Reverse64", - "Reverse8", - "ReverseBytes", - "ReverseBytes16", - "ReverseBytes32", - "ReverseBytes64", - "RotateLeft", - "RotateLeft16", - "RotateLeft32", - "RotateLeft64", - "RotateLeft8", - "Sub", - "Sub32", - "Sub64", - "TrailingZeros", - "TrailingZeros16", - "TrailingZeros32", - "TrailingZeros64", - "TrailingZeros8", - "UintSize", - }, - "math/cmplx": { - "Abs", - "Acos", - "Acosh", - "Asin", - "Asinh", - "Atan", - "Atanh", - "Conj", - "Cos", - "Cosh", - "Cot", - "Exp", - "Inf", - "IsInf", - "IsNaN", - "Log", - "Log10", - "NaN", - "Phase", - "Polar", - "Pow", - "Rect", - "Sin", - "Sinh", - "Sqrt", - "Tan", - "Tanh", - }, - "math/rand": { - "ExpFloat64", - "Float32", - "Float64", - "Int", - "Int31", - "Int31n", - "Int63", - "Int63n", - "Intn", - "New", - "NewSource", - "NewZipf", - "NormFloat64", - "Perm", - "Rand", - "Read", - "Seed", - "Shuffle", - "Source", - "Source64", - "Uint32", - "Uint64", - "Zipf", - }, - "math/rand/v2": { - "ChaCha8", - "ExpFloat64", - "Float32", - "Float64", - "Int", - "Int32", - "Int32N", - "Int64", - "Int64N", - "IntN", - "N", - "New", - "NewChaCha8", - "NewPCG", - "NewZipf", - "NormFloat64", - "PCG", - "Perm", - "Rand", - "Shuffle", - "Source", - "Uint32", - "Uint32N", - "Uint64", - "Uint64N", - "UintN", - "Zipf", - }, - "mime": { - "AddExtensionType", - "BEncoding", - "ErrInvalidMediaParameter", - "ExtensionsByType", - "FormatMediaType", - "ParseMediaType", - "QEncoding", - "TypeByExtension", - "WordDecoder", - "WordEncoder", - }, - "mime/multipart": { - "ErrMessageTooLarge", - "File", - "FileHeader", - "Form", - "NewReader", - "NewWriter", - "Part", - "Reader", - "Writer", - }, - "mime/quotedprintable": { - "NewReader", - "NewWriter", - "Reader", - "Writer", - }, - "net": { - "Addr", - "AddrError", - "Buffers", - "CIDRMask", - "Conn", - "DNSConfigError", - "DNSError", - "DefaultResolver", - "Dial", - "DialIP", - "DialTCP", - "DialTimeout", - "DialUDP", - "DialUnix", - "Dialer", - "ErrClosed", - "ErrWriteToConnected", - "Error", - "FileConn", - "FileListener", - "FilePacketConn", - "FlagBroadcast", - "FlagLoopback", - "FlagMulticast", - "FlagPointToPoint", - "FlagRunning", - "FlagUp", - "Flags", - "HardwareAddr", - "IP", - "IPAddr", - "IPConn", - "IPMask", - "IPNet", - "IPv4", - "IPv4Mask", - "IPv4allrouter", - "IPv4allsys", - "IPv4bcast", - "IPv4len", - "IPv4zero", - "IPv6interfacelocalallnodes", - "IPv6len", - "IPv6linklocalallnodes", - "IPv6linklocalallrouters", - "IPv6loopback", - "IPv6unspecified", - "IPv6zero", - "Interface", - "InterfaceAddrs", - "InterfaceByIndex", - "InterfaceByName", - "Interfaces", - "InvalidAddrError", - "JoinHostPort", - "Listen", - "ListenConfig", - "ListenIP", - "ListenMulticastUDP", - "ListenPacket", - "ListenTCP", - "ListenUDP", - "ListenUnix", - "ListenUnixgram", - "Listener", - "LookupAddr", - "LookupCNAME", - "LookupHost", - "LookupIP", - "LookupMX", - "LookupNS", - "LookupPort", - "LookupSRV", - "LookupTXT", - "MX", - "NS", - "OpError", - "PacketConn", - "ParseCIDR", - "ParseError", - "ParseIP", - "ParseMAC", - "Pipe", - "ResolveIPAddr", - "ResolveTCPAddr", - "ResolveUDPAddr", - "ResolveUnixAddr", - "Resolver", - "SRV", - "SplitHostPort", - "TCPAddr", - "TCPAddrFromAddrPort", - "TCPConn", - "TCPListener", - "UDPAddr", - "UDPAddrFromAddrPort", - "UDPConn", - "UnixAddr", - "UnixConn", - "UnixListener", - "UnknownNetworkError", - }, - "net/http": { - "AllowQuerySemicolons", - "CanonicalHeaderKey", - "Client", - "CloseNotifier", - "ConnState", - "Cookie", - "CookieJar", - "DefaultClient", - "DefaultMaxHeaderBytes", - "DefaultMaxIdleConnsPerHost", - "DefaultServeMux", - "DefaultTransport", - "DetectContentType", - "Dir", - "ErrAbortHandler", - "ErrBodyNotAllowed", - "ErrBodyReadAfterClose", - "ErrContentLength", - "ErrHandlerTimeout", - "ErrHeaderTooLong", - "ErrHijacked", - "ErrLineTooLong", - "ErrMissingBoundary", - "ErrMissingContentLength", - "ErrMissingFile", - "ErrNoCookie", - "ErrNoLocation", - "ErrNotMultipart", - "ErrNotSupported", - "ErrSchemeMismatch", - "ErrServerClosed", - "ErrShortBody", - "ErrSkipAltProtocol", - "ErrUnexpectedTrailer", - "ErrUseLastResponse", - "ErrWriteAfterFlush", - "Error", - "FS", - "File", - "FileServer", - "FileServerFS", - "FileSystem", - "Flusher", - "Get", - "Handle", - "HandleFunc", - "Handler", - "HandlerFunc", - "Head", - "Header", - "Hijacker", - "ListenAndServe", - "ListenAndServeTLS", - "LocalAddrContextKey", - "MaxBytesError", - "MaxBytesHandler", - "MaxBytesReader", - "MethodConnect", - "MethodDelete", - "MethodGet", - "MethodHead", - "MethodOptions", - "MethodPatch", - "MethodPost", - "MethodPut", - "MethodTrace", - "NewFileTransport", - "NewFileTransportFS", - "NewRequest", - "NewRequestWithContext", - "NewResponseController", - "NewServeMux", - "NoBody", - "NotFound", - "NotFoundHandler", - "ParseHTTPVersion", - "ParseTime", - "Post", - "PostForm", - "ProtocolError", - "ProxyFromEnvironment", - "ProxyURL", - "PushOptions", - "Pusher", - "ReadRequest", - "ReadResponse", - "Redirect", - "RedirectHandler", - "Request", - "Response", - "ResponseController", - "ResponseWriter", - "RoundTripper", - "SameSite", - "SameSiteDefaultMode", - "SameSiteLaxMode", - "SameSiteNoneMode", - "SameSiteStrictMode", - "Serve", - "ServeContent", - "ServeFile", - "ServeFileFS", - "ServeMux", - "ServeTLS", - "Server", - "ServerContextKey", - "SetCookie", - "StateActive", - "StateClosed", - "StateHijacked", - "StateIdle", - "StateNew", - "StatusAccepted", - "StatusAlreadyReported", - "StatusBadGateway", - "StatusBadRequest", - "StatusConflict", - "StatusContinue", - "StatusCreated", - "StatusEarlyHints", - "StatusExpectationFailed", - "StatusFailedDependency", - "StatusForbidden", - "StatusFound", - "StatusGatewayTimeout", - "StatusGone", - "StatusHTTPVersionNotSupported", - "StatusIMUsed", - "StatusInsufficientStorage", - "StatusInternalServerError", - "StatusLengthRequired", - "StatusLocked", - "StatusLoopDetected", - "StatusMethodNotAllowed", - "StatusMisdirectedRequest", - "StatusMovedPermanently", - "StatusMultiStatus", - "StatusMultipleChoices", - "StatusNetworkAuthenticationRequired", - "StatusNoContent", - "StatusNonAuthoritativeInfo", - "StatusNotAcceptable", - "StatusNotExtended", - "StatusNotFound", - "StatusNotImplemented", - "StatusNotModified", - "StatusOK", - "StatusPartialContent", - "StatusPaymentRequired", - "StatusPermanentRedirect", - "StatusPreconditionFailed", - "StatusPreconditionRequired", - "StatusProcessing", - "StatusProxyAuthRequired", - "StatusRequestEntityTooLarge", - "StatusRequestHeaderFieldsTooLarge", - "StatusRequestTimeout", - "StatusRequestURITooLong", - "StatusRequestedRangeNotSatisfiable", - "StatusResetContent", - "StatusSeeOther", - "StatusServiceUnavailable", - "StatusSwitchingProtocols", - "StatusTeapot", - "StatusTemporaryRedirect", - "StatusText", - "StatusTooEarly", - "StatusTooManyRequests", - "StatusUnauthorized", - "StatusUnavailableForLegalReasons", - "StatusUnprocessableEntity", - "StatusUnsupportedMediaType", - "StatusUpgradeRequired", - "StatusUseProxy", - "StatusVariantAlsoNegotiates", - "StripPrefix", - "TimeFormat", - "TimeoutHandler", - "TrailerPrefix", - "Transport", - }, - "net/http/cgi": { - "Handler", - "Request", - "RequestFromMap", - "Serve", - }, - "net/http/cookiejar": { - "Jar", - "New", - "Options", - "PublicSuffixList", - }, - "net/http/fcgi": { - "ErrConnClosed", - "ErrRequestAborted", - "ProcessEnv", - "Serve", - }, - "net/http/httptest": { - "DefaultRemoteAddr", - "NewRecorder", - "NewRequest", - "NewServer", - "NewTLSServer", - "NewUnstartedServer", - "ResponseRecorder", - "Server", - }, - "net/http/httptrace": { - "ClientTrace", - "ContextClientTrace", - "DNSDoneInfo", - "DNSStartInfo", - "GotConnInfo", - "WithClientTrace", - "WroteRequestInfo", - }, - "net/http/httputil": { - "BufferPool", - "ClientConn", - "DumpRequest", - "DumpRequestOut", - "DumpResponse", - "ErrClosed", - "ErrLineTooLong", - "ErrPersistEOF", - "ErrPipeline", - "NewChunkedReader", - "NewChunkedWriter", - "NewClientConn", - "NewProxyClientConn", - "NewServerConn", - "NewSingleHostReverseProxy", - "ProxyRequest", - "ReverseProxy", - "ServerConn", - }, - "net/http/pprof": { - "Cmdline", - "Handler", - "Index", - "Profile", - "Symbol", - "Trace", - }, - "net/mail": { - "Address", - "AddressParser", - "ErrHeaderNotPresent", - "Header", - "Message", - "ParseAddress", - "ParseAddressList", - "ParseDate", - "ReadMessage", - }, - "net/netip": { - "Addr", - "AddrFrom16", - "AddrFrom4", - "AddrFromSlice", - "AddrPort", - "AddrPortFrom", - "IPv4Unspecified", - "IPv6LinkLocalAllNodes", - "IPv6LinkLocalAllRouters", - "IPv6Loopback", - "IPv6Unspecified", - "MustParseAddr", - "MustParseAddrPort", - "MustParsePrefix", - "ParseAddr", - "ParseAddrPort", - "ParsePrefix", - "Prefix", - "PrefixFrom", - }, - "net/rpc": { - "Accept", - "Call", - "Client", - "ClientCodec", - "DefaultDebugPath", - "DefaultRPCPath", - "DefaultServer", - "Dial", - "DialHTTP", - "DialHTTPPath", - "ErrShutdown", - "HandleHTTP", - "NewClient", - "NewClientWithCodec", - "NewServer", - "Register", - "RegisterName", - "Request", - "Response", - "ServeCodec", - "ServeConn", - "ServeRequest", - "Server", - "ServerCodec", - "ServerError", - }, - "net/rpc/jsonrpc": { - "Dial", - "NewClient", - "NewClientCodec", - "NewServerCodec", - "ServeConn", - }, - "net/smtp": { - "Auth", - "CRAMMD5Auth", - "Client", - "Dial", - "NewClient", - "PlainAuth", - "SendMail", - "ServerInfo", - }, - "net/textproto": { - "CanonicalMIMEHeaderKey", - "Conn", - "Dial", - "Error", - "MIMEHeader", - "NewConn", - "NewReader", - "NewWriter", - "Pipeline", - "ProtocolError", - "Reader", - "TrimBytes", - "TrimString", - "Writer", - }, - "net/url": { - "Error", - "EscapeError", - "InvalidHostError", - "JoinPath", - "Parse", - "ParseQuery", - "ParseRequestURI", - "PathEscape", - "PathUnescape", - "QueryEscape", - "QueryUnescape", - "URL", - "User", - "UserPassword", - "Userinfo", - "Values", - }, - "os": { - "Args", - "Chdir", - "Chmod", - "Chown", - "Chtimes", - "Clearenv", - "Create", - "CreateTemp", - "DevNull", - "DirEntry", - "DirFS", - "Environ", - "ErrClosed", - "ErrDeadlineExceeded", - "ErrExist", - "ErrInvalid", - "ErrNoDeadline", - "ErrNotExist", - "ErrPermission", - "ErrProcessDone", - "Executable", - "Exit", - "Expand", - "ExpandEnv", - "File", - "FileInfo", - "FileMode", - "FindProcess", - "Getegid", - "Getenv", - "Geteuid", - "Getgid", - "Getgroups", - "Getpagesize", - "Getpid", - "Getppid", - "Getuid", - "Getwd", - "Hostname", - "Interrupt", - "IsExist", - "IsNotExist", - "IsPathSeparator", - "IsPermission", - "IsTimeout", - "Kill", - "Lchown", - "Link", - "LinkError", - "LookupEnv", - "Lstat", - "Mkdir", - "MkdirAll", - "MkdirTemp", - "ModeAppend", - "ModeCharDevice", - "ModeDevice", - "ModeDir", - "ModeExclusive", - "ModeIrregular", - "ModeNamedPipe", - "ModePerm", - "ModeSetgid", - "ModeSetuid", - "ModeSocket", - "ModeSticky", - "ModeSymlink", - "ModeTemporary", - "ModeType", - "NewFile", - "NewSyscallError", - "O_APPEND", - "O_CREATE", - "O_EXCL", - "O_RDONLY", - "O_RDWR", - "O_SYNC", - "O_TRUNC", - "O_WRONLY", - "Open", - "OpenFile", - "PathError", - "PathListSeparator", - "PathSeparator", - "Pipe", - "ProcAttr", - "Process", - "ProcessState", - "ReadDir", - "ReadFile", - "Readlink", - "Remove", - "RemoveAll", - "Rename", - "SEEK_CUR", - "SEEK_END", - "SEEK_SET", - "SameFile", - "Setenv", - "Signal", - "StartProcess", - "Stat", - "Stderr", - "Stdin", - "Stdout", - "Symlink", - "SyscallError", - "TempDir", - "Truncate", - "Unsetenv", - "UserCacheDir", - "UserConfigDir", - "UserHomeDir", - "WriteFile", - }, - "os/exec": { - "Cmd", - "Command", - "CommandContext", - "ErrDot", - "ErrNotFound", - "ErrWaitDelay", - "Error", - "ExitError", - "LookPath", - }, - "os/signal": { - "Ignore", - "Ignored", - "Notify", - "NotifyContext", - "Reset", - "Stop", - }, - "os/user": { - "Current", - "Group", - "Lookup", - "LookupGroup", - "LookupGroupId", - "LookupId", - "UnknownGroupError", - "UnknownGroupIdError", - "UnknownUserError", - "UnknownUserIdError", - "User", - }, - "path": { - "Base", - "Clean", - "Dir", - "ErrBadPattern", - "Ext", - "IsAbs", - "Join", - "Match", - "Split", - }, - "path/filepath": { - "Abs", - "Base", - "Clean", - "Dir", - "ErrBadPattern", - "EvalSymlinks", - "Ext", - "FromSlash", - "Glob", - "HasPrefix", - "IsAbs", - "IsLocal", - "Join", - "ListSeparator", - "Match", - "Rel", - "Separator", - "SkipAll", - "SkipDir", - "Split", - "SplitList", - "ToSlash", - "VolumeName", - "Walk", - "WalkDir", - "WalkFunc", - }, - "plugin": { - "Open", - "Plugin", - "Symbol", - }, - "reflect": { - "Append", - "AppendSlice", - "Array", - "ArrayOf", - "Bool", - "BothDir", - "Chan", - "ChanDir", - "ChanOf", - "Complex128", - "Complex64", - "Copy", - "DeepEqual", - "Float32", - "Float64", - "Func", - "FuncOf", - "Indirect", - "Int", - "Int16", - "Int32", - "Int64", - "Int8", - "Interface", - "Invalid", - "Kind", - "MakeChan", - "MakeFunc", - "MakeMap", - "MakeMapWithSize", - "MakeSlice", - "Map", - "MapIter", - "MapOf", - "Method", - "New", - "NewAt", - "Pointer", - "PointerTo", - "Ptr", - "PtrTo", - "RecvDir", - "Select", - "SelectCase", - "SelectDefault", - "SelectDir", - "SelectRecv", - "SelectSend", - "SendDir", - "Slice", - "SliceHeader", - "SliceOf", - "String", - "StringHeader", - "Struct", - "StructField", - "StructOf", - "StructTag", - "Swapper", - "Type", - "TypeFor", - "TypeOf", - "Uint", - "Uint16", - "Uint32", - "Uint64", - "Uint8", - "Uintptr", - "UnsafePointer", - "Value", - "ValueError", - "ValueOf", - "VisibleFields", - "Zero", - }, - "regexp": { - "Compile", - "CompilePOSIX", - "Match", - "MatchReader", - "MatchString", - "MustCompile", - "MustCompilePOSIX", - "QuoteMeta", - "Regexp", - }, - "regexp/syntax": { - "ClassNL", - "Compile", - "DotNL", - "EmptyBeginLine", - "EmptyBeginText", - "EmptyEndLine", - "EmptyEndText", - "EmptyNoWordBoundary", - "EmptyOp", - "EmptyOpContext", - "EmptyWordBoundary", - "ErrInternalError", - "ErrInvalidCharClass", - "ErrInvalidCharRange", - "ErrInvalidEscape", - "ErrInvalidNamedCapture", - "ErrInvalidPerlOp", - "ErrInvalidRepeatOp", - "ErrInvalidRepeatSize", - "ErrInvalidUTF8", - "ErrLarge", - "ErrMissingBracket", - "ErrMissingParen", - "ErrMissingRepeatArgument", - "ErrNestingDepth", - "ErrTrailingBackslash", - "ErrUnexpectedParen", - "Error", - "ErrorCode", - "Flags", - "FoldCase", - "Inst", - "InstAlt", - "InstAltMatch", - "InstCapture", - "InstEmptyWidth", - "InstFail", - "InstMatch", - "InstNop", - "InstOp", - "InstRune", - "InstRune1", - "InstRuneAny", - "InstRuneAnyNotNL", - "IsWordChar", - "Literal", - "MatchNL", - "NonGreedy", - "OneLine", - "Op", - "OpAlternate", - "OpAnyChar", - "OpAnyCharNotNL", - "OpBeginLine", - "OpBeginText", - "OpCapture", - "OpCharClass", - "OpConcat", - "OpEmptyMatch", - "OpEndLine", - "OpEndText", - "OpLiteral", - "OpNoMatch", - "OpNoWordBoundary", - "OpPlus", - "OpQuest", - "OpRepeat", - "OpStar", - "OpWordBoundary", - "POSIX", - "Parse", - "Perl", - "PerlX", - "Prog", - "Regexp", - "Simple", - "UnicodeGroups", - "WasDollar", - }, - "runtime": { - "BlockProfile", - "BlockProfileRecord", - "Breakpoint", - "CPUProfile", - "Caller", - "Callers", - "CallersFrames", - "Compiler", - "Error", - "Frame", - "Frames", - "Func", - "FuncForPC", - "GC", - "GOARCH", - "GOMAXPROCS", - "GOOS", - "GOROOT", - "Goexit", - "GoroutineProfile", - "Gosched", - "KeepAlive", - "LockOSThread", - "MemProfile", - "MemProfileRate", - "MemProfileRecord", - "MemStats", - "MutexProfile", - "NumCPU", - "NumCgoCall", - "NumGoroutine", - "PanicNilError", - "Pinner", - "ReadMemStats", - "ReadTrace", - "SetBlockProfileRate", - "SetCPUProfileRate", - "SetCgoTraceback", - "SetFinalizer", - "SetMutexProfileFraction", - "Stack", - "StackRecord", - "StartTrace", - "StopTrace", - "ThreadCreateProfile", - "TypeAssertionError", - "UnlockOSThread", - "Version", - }, - "runtime/cgo": { - "Handle", - "Incomplete", - "NewHandle", - }, - "runtime/coverage": { - "ClearCounters", - "WriteCounters", - "WriteCountersDir", - "WriteMeta", - "WriteMetaDir", - }, - "runtime/debug": { - "BuildInfo", - "BuildSetting", - "FreeOSMemory", - "GCStats", - "Module", - "ParseBuildInfo", - "PrintStack", - "ReadBuildInfo", - "ReadGCStats", - "SetGCPercent", - "SetMaxStack", - "SetMaxThreads", - "SetMemoryLimit", - "SetPanicOnFault", - "SetTraceback", - "Stack", - "WriteHeapDump", - }, - "runtime/metrics": { - "All", - "Description", - "Float64Histogram", - "KindBad", - "KindFloat64", - "KindFloat64Histogram", - "KindUint64", - "Read", - "Sample", - "Value", - "ValueKind", - }, - "runtime/pprof": { - "Do", - "ForLabels", - "Label", - "LabelSet", - "Labels", - "Lookup", - "NewProfile", - "Profile", - "Profiles", - "SetGoroutineLabels", - "StartCPUProfile", - "StopCPUProfile", - "WithLabels", - "WriteHeapProfile", - }, - "runtime/trace": { - "IsEnabled", - "Log", - "Logf", - "NewTask", - "Region", - "Start", - "StartRegion", - "Stop", - "Task", - "WithRegion", - }, - "slices": { - "BinarySearch", - "BinarySearchFunc", - "Clip", - "Clone", - "Compact", - "CompactFunc", - "Compare", - "CompareFunc", - "Concat", - "Contains", - "ContainsFunc", - "Delete", - "DeleteFunc", - "Equal", - "EqualFunc", - "Grow", - "Index", - "IndexFunc", - "Insert", - "IsSorted", - "IsSortedFunc", - "Max", - "MaxFunc", - "Min", - "MinFunc", - "Replace", - "Reverse", - "Sort", - "SortFunc", - "SortStableFunc", - }, - "sort": { - "Find", - "Float64Slice", - "Float64s", - "Float64sAreSorted", - "IntSlice", - "Interface", - "Ints", - "IntsAreSorted", - "IsSorted", - "Reverse", - "Search", - "SearchFloat64s", - "SearchInts", - "SearchStrings", - "Slice", - "SliceIsSorted", - "SliceStable", - "Sort", - "Stable", - "StringSlice", - "Strings", - "StringsAreSorted", - }, - "strconv": { - "AppendBool", - "AppendFloat", - "AppendInt", - "AppendQuote", - "AppendQuoteRune", - "AppendQuoteRuneToASCII", - "AppendQuoteRuneToGraphic", - "AppendQuoteToASCII", - "AppendQuoteToGraphic", - "AppendUint", - "Atoi", - "CanBackquote", - "ErrRange", - "ErrSyntax", - "FormatBool", - "FormatComplex", - "FormatFloat", - "FormatInt", - "FormatUint", - "IntSize", - "IsGraphic", - "IsPrint", - "Itoa", - "NumError", - "ParseBool", - "ParseComplex", - "ParseFloat", - "ParseInt", - "ParseUint", - "Quote", - "QuoteRune", - "QuoteRuneToASCII", - "QuoteRuneToGraphic", - "QuoteToASCII", - "QuoteToGraphic", - "QuotedPrefix", - "Unquote", - "UnquoteChar", - }, - "strings": { - "Builder", - "Clone", - "Compare", - "Contains", - "ContainsAny", - "ContainsFunc", - "ContainsRune", - "Count", - "Cut", - "CutPrefix", - "CutSuffix", - "EqualFold", - "Fields", - "FieldsFunc", - "HasPrefix", - "HasSuffix", - "Index", - "IndexAny", - "IndexByte", - "IndexFunc", - "IndexRune", - "Join", - "LastIndex", - "LastIndexAny", - "LastIndexByte", - "LastIndexFunc", - "Map", - "NewReader", - "NewReplacer", - "Reader", - "Repeat", - "Replace", - "ReplaceAll", - "Replacer", - "Split", - "SplitAfter", - "SplitAfterN", - "SplitN", - "Title", - "ToLower", - "ToLowerSpecial", - "ToTitle", - "ToTitleSpecial", - "ToUpper", - "ToUpperSpecial", - "ToValidUTF8", - "Trim", - "TrimFunc", - "TrimLeft", - "TrimLeftFunc", - "TrimPrefix", - "TrimRight", - "TrimRightFunc", - "TrimSpace", - "TrimSuffix", - }, - "sync": { - "Cond", - "Locker", - "Map", - "Mutex", - "NewCond", - "Once", - "OnceFunc", - "OnceValue", - "OnceValues", - "Pool", - "RWMutex", - "WaitGroup", - }, - "sync/atomic": { - "AddInt32", - "AddInt64", - "AddUint32", - "AddUint64", - "AddUintptr", - "Bool", - "CompareAndSwapInt32", - "CompareAndSwapInt64", - "CompareAndSwapPointer", - "CompareAndSwapUint32", - "CompareAndSwapUint64", - "CompareAndSwapUintptr", - "Int32", - "Int64", - "LoadInt32", - "LoadInt64", - "LoadPointer", - "LoadUint32", - "LoadUint64", - "LoadUintptr", - "Pointer", - "StoreInt32", - "StoreInt64", - "StorePointer", - "StoreUint32", - "StoreUint64", - "StoreUintptr", - "SwapInt32", - "SwapInt64", - "SwapPointer", - "SwapUint32", - "SwapUint64", - "SwapUintptr", - "Uint32", - "Uint64", - "Uintptr", - "Value", - }, - "syscall": { - "AF_ALG", - "AF_APPLETALK", - "AF_ARP", - "AF_ASH", - "AF_ATM", - "AF_ATMPVC", - "AF_ATMSVC", - "AF_AX25", - "AF_BLUETOOTH", - "AF_BRIDGE", - "AF_CAIF", - "AF_CAN", - "AF_CCITT", - "AF_CHAOS", - "AF_CNT", - "AF_COIP", - "AF_DATAKIT", - "AF_DECnet", - "AF_DLI", - "AF_E164", - "AF_ECMA", - "AF_ECONET", - "AF_ENCAP", - "AF_FILE", - "AF_HYLINK", - "AF_IEEE80211", - "AF_IEEE802154", - "AF_IMPLINK", - "AF_INET", - "AF_INET6", - "AF_INET6_SDP", - "AF_INET_SDP", - "AF_IPX", - "AF_IRDA", - "AF_ISDN", - "AF_ISO", - "AF_IUCV", - "AF_KEY", - "AF_LAT", - "AF_LINK", - "AF_LLC", - "AF_LOCAL", - "AF_MAX", - "AF_MPLS", - "AF_NATM", - "AF_NDRV", - "AF_NETBEUI", - "AF_NETBIOS", - "AF_NETGRAPH", - "AF_NETLINK", - "AF_NETROM", - "AF_NS", - "AF_OROUTE", - "AF_OSI", - "AF_PACKET", - "AF_PHONET", - "AF_PPP", - "AF_PPPOX", - "AF_PUP", - "AF_RDS", - "AF_RESERVED_36", - "AF_ROSE", - "AF_ROUTE", - "AF_RXRPC", - "AF_SCLUSTER", - "AF_SECURITY", - "AF_SIP", - "AF_SLOW", - "AF_SNA", - "AF_SYSTEM", - "AF_TIPC", - "AF_UNIX", - "AF_UNSPEC", - "AF_UTUN", - "AF_VENDOR00", - "AF_VENDOR01", - "AF_VENDOR02", - "AF_VENDOR03", - "AF_VENDOR04", - "AF_VENDOR05", - "AF_VENDOR06", - "AF_VENDOR07", - "AF_VENDOR08", - "AF_VENDOR09", - "AF_VENDOR10", - "AF_VENDOR11", - "AF_VENDOR12", - "AF_VENDOR13", - "AF_VENDOR14", - "AF_VENDOR15", - "AF_VENDOR16", - "AF_VENDOR17", - "AF_VENDOR18", - "AF_VENDOR19", - "AF_VENDOR20", - "AF_VENDOR21", - "AF_VENDOR22", - "AF_VENDOR23", - "AF_VENDOR24", - "AF_VENDOR25", - "AF_VENDOR26", - "AF_VENDOR27", - "AF_VENDOR28", - "AF_VENDOR29", - "AF_VENDOR30", - "AF_VENDOR31", - "AF_VENDOR32", - "AF_VENDOR33", - "AF_VENDOR34", - "AF_VENDOR35", - "AF_VENDOR36", - "AF_VENDOR37", - "AF_VENDOR38", - "AF_VENDOR39", - "AF_VENDOR40", - "AF_VENDOR41", - "AF_VENDOR42", - "AF_VENDOR43", - "AF_VENDOR44", - "AF_VENDOR45", - "AF_VENDOR46", - "AF_VENDOR47", - "AF_WANPIPE", - "AF_X25", - "AI_CANONNAME", - "AI_NUMERICHOST", - "AI_PASSIVE", - "APPLICATION_ERROR", - "ARPHRD_ADAPT", - "ARPHRD_APPLETLK", - "ARPHRD_ARCNET", - "ARPHRD_ASH", - "ARPHRD_ATM", - "ARPHRD_AX25", - "ARPHRD_BIF", - "ARPHRD_CHAOS", - "ARPHRD_CISCO", - "ARPHRD_CSLIP", - "ARPHRD_CSLIP6", - "ARPHRD_DDCMP", - "ARPHRD_DLCI", - "ARPHRD_ECONET", - "ARPHRD_EETHER", - "ARPHRD_ETHER", - "ARPHRD_EUI64", - "ARPHRD_FCAL", - "ARPHRD_FCFABRIC", - "ARPHRD_FCPL", - "ARPHRD_FCPP", - "ARPHRD_FDDI", - "ARPHRD_FRAD", - "ARPHRD_FRELAY", - "ARPHRD_HDLC", - "ARPHRD_HIPPI", - "ARPHRD_HWX25", - "ARPHRD_IEEE1394", - "ARPHRD_IEEE802", - "ARPHRD_IEEE80211", - "ARPHRD_IEEE80211_PRISM", - "ARPHRD_IEEE80211_RADIOTAP", - "ARPHRD_IEEE802154", - "ARPHRD_IEEE802154_PHY", - "ARPHRD_IEEE802_TR", - "ARPHRD_INFINIBAND", - "ARPHRD_IPDDP", - "ARPHRD_IPGRE", - "ARPHRD_IRDA", - "ARPHRD_LAPB", - "ARPHRD_LOCALTLK", - "ARPHRD_LOOPBACK", - "ARPHRD_METRICOM", - "ARPHRD_NETROM", - "ARPHRD_NONE", - "ARPHRD_PIMREG", - "ARPHRD_PPP", - "ARPHRD_PRONET", - "ARPHRD_RAWHDLC", - "ARPHRD_ROSE", - "ARPHRD_RSRVD", - "ARPHRD_SIT", - "ARPHRD_SKIP", - "ARPHRD_SLIP", - "ARPHRD_SLIP6", - "ARPHRD_STRIP", - "ARPHRD_TUNNEL", - "ARPHRD_TUNNEL6", - "ARPHRD_VOID", - "ARPHRD_X25", - "AUTHTYPE_CLIENT", - "AUTHTYPE_SERVER", - "Accept", - "Accept4", - "AcceptEx", - "Access", - "Acct", - "AddrinfoW", - "Adjtime", - "Adjtimex", - "AllThreadsSyscall", - "AllThreadsSyscall6", - "AttachLsf", - "B0", - "B1000000", - "B110", - "B115200", - "B1152000", - "B1200", - "B134", - "B14400", - "B150", - "B1500000", - "B1800", - "B19200", - "B200", - "B2000000", - "B230400", - "B2400", - "B2500000", - "B28800", - "B300", - "B3000000", - "B3500000", - "B38400", - "B4000000", - "B460800", - "B4800", - "B50", - "B500000", - "B57600", - "B576000", - "B600", - "B7200", - "B75", - "B76800", - "B921600", - "B9600", - "BASE_PROTOCOL", - "BIOCFEEDBACK", - "BIOCFLUSH", - "BIOCGBLEN", - "BIOCGDIRECTION", - "BIOCGDIRFILT", - "BIOCGDLT", - "BIOCGDLTLIST", - "BIOCGETBUFMODE", - "BIOCGETIF", - "BIOCGETZMAX", - "BIOCGFEEDBACK", - "BIOCGFILDROP", - "BIOCGHDRCMPLT", - "BIOCGRSIG", - "BIOCGRTIMEOUT", - "BIOCGSEESENT", - "BIOCGSTATS", - "BIOCGSTATSOLD", - "BIOCGTSTAMP", - "BIOCIMMEDIATE", - "BIOCLOCK", - "BIOCPROMISC", - "BIOCROTZBUF", - "BIOCSBLEN", - "BIOCSDIRECTION", - "BIOCSDIRFILT", - "BIOCSDLT", - "BIOCSETBUFMODE", - "BIOCSETF", - "BIOCSETFNR", - "BIOCSETIF", - "BIOCSETWF", - "BIOCSETZBUF", - "BIOCSFEEDBACK", - "BIOCSFILDROP", - "BIOCSHDRCMPLT", - "BIOCSRSIG", - "BIOCSRTIMEOUT", - "BIOCSSEESENT", - "BIOCSTCPF", - "BIOCSTSTAMP", - "BIOCSUDPF", - "BIOCVERSION", - "BPF_A", - "BPF_ABS", - "BPF_ADD", - "BPF_ALIGNMENT", - "BPF_ALIGNMENT32", - "BPF_ALU", - "BPF_AND", - "BPF_B", - "BPF_BUFMODE_BUFFER", - "BPF_BUFMODE_ZBUF", - "BPF_DFLTBUFSIZE", - "BPF_DIRECTION_IN", - "BPF_DIRECTION_OUT", - "BPF_DIV", - "BPF_H", - "BPF_IMM", - "BPF_IND", - "BPF_JA", - "BPF_JEQ", - "BPF_JGE", - "BPF_JGT", - "BPF_JMP", - "BPF_JSET", - "BPF_K", - "BPF_LD", - "BPF_LDX", - "BPF_LEN", - "BPF_LSH", - "BPF_MAJOR_VERSION", - "BPF_MAXBUFSIZE", - "BPF_MAXINSNS", - "BPF_MEM", - "BPF_MEMWORDS", - "BPF_MINBUFSIZE", - "BPF_MINOR_VERSION", - "BPF_MISC", - "BPF_MSH", - "BPF_MUL", - "BPF_NEG", - "BPF_OR", - "BPF_RELEASE", - "BPF_RET", - "BPF_RSH", - "BPF_ST", - "BPF_STX", - "BPF_SUB", - "BPF_TAX", - "BPF_TXA", - "BPF_T_BINTIME", - "BPF_T_BINTIME_FAST", - "BPF_T_BINTIME_MONOTONIC", - "BPF_T_BINTIME_MONOTONIC_FAST", - "BPF_T_FAST", - "BPF_T_FLAG_MASK", - "BPF_T_FORMAT_MASK", - "BPF_T_MICROTIME", - "BPF_T_MICROTIME_FAST", - "BPF_T_MICROTIME_MONOTONIC", - "BPF_T_MICROTIME_MONOTONIC_FAST", - "BPF_T_MONOTONIC", - "BPF_T_MONOTONIC_FAST", - "BPF_T_NANOTIME", - "BPF_T_NANOTIME_FAST", - "BPF_T_NANOTIME_MONOTONIC", - "BPF_T_NANOTIME_MONOTONIC_FAST", - "BPF_T_NONE", - "BPF_T_NORMAL", - "BPF_W", - "BPF_X", - "BRKINT", - "Bind", - "BindToDevice", - "BpfBuflen", - "BpfDatalink", - "BpfHdr", - "BpfHeadercmpl", - "BpfInsn", - "BpfInterface", - "BpfJump", - "BpfProgram", - "BpfStat", - "BpfStats", - "BpfStmt", - "BpfTimeout", - "BpfTimeval", - "BpfVersion", - "BpfZbuf", - "BpfZbufHeader", - "ByHandleFileInformation", - "BytePtrFromString", - "ByteSliceFromString", - "CCR0_FLUSH", - "CERT_CHAIN_POLICY_AUTHENTICODE", - "CERT_CHAIN_POLICY_AUTHENTICODE_TS", - "CERT_CHAIN_POLICY_BASE", - "CERT_CHAIN_POLICY_BASIC_CONSTRAINTS", - "CERT_CHAIN_POLICY_EV", - "CERT_CHAIN_POLICY_MICROSOFT_ROOT", - "CERT_CHAIN_POLICY_NT_AUTH", - "CERT_CHAIN_POLICY_SSL", - "CERT_E_CN_NO_MATCH", - "CERT_E_EXPIRED", - "CERT_E_PURPOSE", - "CERT_E_ROLE", - "CERT_E_UNTRUSTEDROOT", - "CERT_STORE_ADD_ALWAYS", - "CERT_STORE_DEFER_CLOSE_UNTIL_LAST_FREE_FLAG", - "CERT_STORE_PROV_MEMORY", - "CERT_TRUST_HAS_EXCLUDED_NAME_CONSTRAINT", - "CERT_TRUST_HAS_NOT_DEFINED_NAME_CONSTRAINT", - "CERT_TRUST_HAS_NOT_PERMITTED_NAME_CONSTRAINT", - "CERT_TRUST_HAS_NOT_SUPPORTED_CRITICAL_EXT", - "CERT_TRUST_HAS_NOT_SUPPORTED_NAME_CONSTRAINT", - "CERT_TRUST_INVALID_BASIC_CONSTRAINTS", - "CERT_TRUST_INVALID_EXTENSION", - "CERT_TRUST_INVALID_NAME_CONSTRAINTS", - "CERT_TRUST_INVALID_POLICY_CONSTRAINTS", - "CERT_TRUST_IS_CYCLIC", - "CERT_TRUST_IS_EXPLICIT_DISTRUST", - "CERT_TRUST_IS_NOT_SIGNATURE_VALID", - "CERT_TRUST_IS_NOT_TIME_VALID", - "CERT_TRUST_IS_NOT_VALID_FOR_USAGE", - "CERT_TRUST_IS_OFFLINE_REVOCATION", - "CERT_TRUST_IS_REVOKED", - "CERT_TRUST_IS_UNTRUSTED_ROOT", - "CERT_TRUST_NO_ERROR", - "CERT_TRUST_NO_ISSUANCE_CHAIN_POLICY", - "CERT_TRUST_REVOCATION_STATUS_UNKNOWN", - "CFLUSH", - "CLOCAL", - "CLONE_CHILD_CLEARTID", - "CLONE_CHILD_SETTID", - "CLONE_CLEAR_SIGHAND", - "CLONE_CSIGNAL", - "CLONE_DETACHED", - "CLONE_FILES", - "CLONE_FS", - "CLONE_INTO_CGROUP", - "CLONE_IO", - "CLONE_NEWCGROUP", - "CLONE_NEWIPC", - "CLONE_NEWNET", - "CLONE_NEWNS", - "CLONE_NEWPID", - "CLONE_NEWTIME", - "CLONE_NEWUSER", - "CLONE_NEWUTS", - "CLONE_PARENT", - "CLONE_PARENT_SETTID", - "CLONE_PID", - "CLONE_PIDFD", - "CLONE_PTRACE", - "CLONE_SETTLS", - "CLONE_SIGHAND", - "CLONE_SYSVSEM", - "CLONE_THREAD", - "CLONE_UNTRACED", - "CLONE_VFORK", - "CLONE_VM", - "CPUID_CFLUSH", - "CREAD", - "CREATE_ALWAYS", - "CREATE_NEW", - "CREATE_NEW_PROCESS_GROUP", - "CREATE_UNICODE_ENVIRONMENT", - "CRYPT_DEFAULT_CONTAINER_OPTIONAL", - "CRYPT_DELETEKEYSET", - "CRYPT_MACHINE_KEYSET", - "CRYPT_NEWKEYSET", - "CRYPT_SILENT", - "CRYPT_VERIFYCONTEXT", - "CS5", - "CS6", - "CS7", - "CS8", - "CSIZE", - "CSTART", - "CSTATUS", - "CSTOP", - "CSTOPB", - "CSUSP", - "CTL_MAXNAME", - "CTL_NET", - "CTL_QUERY", - "CTRL_BREAK_EVENT", - "CTRL_CLOSE_EVENT", - "CTRL_C_EVENT", - "CTRL_LOGOFF_EVENT", - "CTRL_SHUTDOWN_EVENT", - "CancelIo", - "CancelIoEx", - "CertAddCertificateContextToStore", - "CertChainContext", - "CertChainElement", - "CertChainPara", - "CertChainPolicyPara", - "CertChainPolicyStatus", - "CertCloseStore", - "CertContext", - "CertCreateCertificateContext", - "CertEnhKeyUsage", - "CertEnumCertificatesInStore", - "CertFreeCertificateChain", - "CertFreeCertificateContext", - "CertGetCertificateChain", - "CertInfo", - "CertOpenStore", - "CertOpenSystemStore", - "CertRevocationCrlInfo", - "CertRevocationInfo", - "CertSimpleChain", - "CertTrustListInfo", - "CertTrustStatus", - "CertUsageMatch", - "CertVerifyCertificateChainPolicy", - "Chdir", - "CheckBpfVersion", - "Chflags", - "Chmod", - "Chown", - "Chroot", - "Clearenv", - "Close", - "CloseHandle", - "CloseOnExec", - "Closesocket", - "CmsgLen", - "CmsgSpace", - "Cmsghdr", - "CommandLineToArgv", - "ComputerName", - "Conn", - "Connect", - "ConnectEx", - "ConvertSidToStringSid", - "ConvertStringSidToSid", - "CopySid", - "Creat", - "CreateDirectory", - "CreateFile", - "CreateFileMapping", - "CreateHardLink", - "CreateIoCompletionPort", - "CreatePipe", - "CreateProcess", - "CreateProcessAsUser", - "CreateSymbolicLink", - "CreateToolhelp32Snapshot", - "Credential", - "CryptAcquireContext", - "CryptGenRandom", - "CryptReleaseContext", - "DIOCBSFLUSH", - "DIOCOSFPFLUSH", - "DLL", - "DLLError", - "DLT_A429", - "DLT_A653_ICM", - "DLT_AIRONET_HEADER", - "DLT_AOS", - "DLT_APPLE_IP_OVER_IEEE1394", - "DLT_ARCNET", - "DLT_ARCNET_LINUX", - "DLT_ATM_CLIP", - "DLT_ATM_RFC1483", - "DLT_AURORA", - "DLT_AX25", - "DLT_AX25_KISS", - "DLT_BACNET_MS_TP", - "DLT_BLUETOOTH_HCI_H4", - "DLT_BLUETOOTH_HCI_H4_WITH_PHDR", - "DLT_CAN20B", - "DLT_CAN_SOCKETCAN", - "DLT_CHAOS", - "DLT_CHDLC", - "DLT_CISCO_IOS", - "DLT_C_HDLC", - "DLT_C_HDLC_WITH_DIR", - "DLT_DBUS", - "DLT_DECT", - "DLT_DOCSIS", - "DLT_DVB_CI", - "DLT_ECONET", - "DLT_EN10MB", - "DLT_EN3MB", - "DLT_ENC", - "DLT_ERF", - "DLT_ERF_ETH", - "DLT_ERF_POS", - "DLT_FC_2", - "DLT_FC_2_WITH_FRAME_DELIMS", - "DLT_FDDI", - "DLT_FLEXRAY", - "DLT_FRELAY", - "DLT_FRELAY_WITH_DIR", - "DLT_GCOM_SERIAL", - "DLT_GCOM_T1E1", - "DLT_GPF_F", - "DLT_GPF_T", - "DLT_GPRS_LLC", - "DLT_GSMTAP_ABIS", - "DLT_GSMTAP_UM", - "DLT_HDLC", - "DLT_HHDLC", - "DLT_HIPPI", - "DLT_IBM_SN", - "DLT_IBM_SP", - "DLT_IEEE802", - "DLT_IEEE802_11", - "DLT_IEEE802_11_RADIO", - "DLT_IEEE802_11_RADIO_AVS", - "DLT_IEEE802_15_4", - "DLT_IEEE802_15_4_LINUX", - "DLT_IEEE802_15_4_NOFCS", - "DLT_IEEE802_15_4_NONASK_PHY", - "DLT_IEEE802_16_MAC_CPS", - "DLT_IEEE802_16_MAC_CPS_RADIO", - "DLT_IPFILTER", - "DLT_IPMB", - "DLT_IPMB_LINUX", - "DLT_IPNET", - "DLT_IPOIB", - "DLT_IPV4", - "DLT_IPV6", - "DLT_IP_OVER_FC", - "DLT_JUNIPER_ATM1", - "DLT_JUNIPER_ATM2", - "DLT_JUNIPER_ATM_CEMIC", - "DLT_JUNIPER_CHDLC", - "DLT_JUNIPER_ES", - "DLT_JUNIPER_ETHER", - "DLT_JUNIPER_FIBRECHANNEL", - "DLT_JUNIPER_FRELAY", - "DLT_JUNIPER_GGSN", - "DLT_JUNIPER_ISM", - "DLT_JUNIPER_MFR", - "DLT_JUNIPER_MLFR", - "DLT_JUNIPER_MLPPP", - "DLT_JUNIPER_MONITOR", - "DLT_JUNIPER_PIC_PEER", - "DLT_JUNIPER_PPP", - "DLT_JUNIPER_PPPOE", - "DLT_JUNIPER_PPPOE_ATM", - "DLT_JUNIPER_SERVICES", - "DLT_JUNIPER_SRX_E2E", - "DLT_JUNIPER_ST", - "DLT_JUNIPER_VP", - "DLT_JUNIPER_VS", - "DLT_LAPB_WITH_DIR", - "DLT_LAPD", - "DLT_LIN", - "DLT_LINUX_EVDEV", - "DLT_LINUX_IRDA", - "DLT_LINUX_LAPD", - "DLT_LINUX_PPP_WITHDIRECTION", - "DLT_LINUX_SLL", - "DLT_LOOP", - "DLT_LTALK", - "DLT_MATCHING_MAX", - "DLT_MATCHING_MIN", - "DLT_MFR", - "DLT_MOST", - "DLT_MPEG_2_TS", - "DLT_MPLS", - "DLT_MTP2", - "DLT_MTP2_WITH_PHDR", - "DLT_MTP3", - "DLT_MUX27010", - "DLT_NETANALYZER", - "DLT_NETANALYZER_TRANSPARENT", - "DLT_NFC_LLCP", - "DLT_NFLOG", - "DLT_NG40", - "DLT_NULL", - "DLT_PCI_EXP", - "DLT_PFLOG", - "DLT_PFSYNC", - "DLT_PPI", - "DLT_PPP", - "DLT_PPP_BSDOS", - "DLT_PPP_ETHER", - "DLT_PPP_PPPD", - "DLT_PPP_SERIAL", - "DLT_PPP_WITH_DIR", - "DLT_PPP_WITH_DIRECTION", - "DLT_PRISM_HEADER", - "DLT_PRONET", - "DLT_RAIF1", - "DLT_RAW", - "DLT_RAWAF_MASK", - "DLT_RIO", - "DLT_SCCP", - "DLT_SITA", - "DLT_SLIP", - "DLT_SLIP_BSDOS", - "DLT_STANAG_5066_D_PDU", - "DLT_SUNATM", - "DLT_SYMANTEC_FIREWALL", - "DLT_TZSP", - "DLT_USB", - "DLT_USB_LINUX", - "DLT_USB_LINUX_MMAPPED", - "DLT_USER0", - "DLT_USER1", - "DLT_USER10", - "DLT_USER11", - "DLT_USER12", - "DLT_USER13", - "DLT_USER14", - "DLT_USER15", - "DLT_USER2", - "DLT_USER3", - "DLT_USER4", - "DLT_USER5", - "DLT_USER6", - "DLT_USER7", - "DLT_USER8", - "DLT_USER9", - "DLT_WIHART", - "DLT_X2E_SERIAL", - "DLT_X2E_XORAYA", - "DNSMXData", - "DNSPTRData", - "DNSRecord", - "DNSSRVData", - "DNSTXTData", - "DNS_INFO_NO_RECORDS", - "DNS_TYPE_A", - "DNS_TYPE_A6", - "DNS_TYPE_AAAA", - "DNS_TYPE_ADDRS", - "DNS_TYPE_AFSDB", - "DNS_TYPE_ALL", - "DNS_TYPE_ANY", - "DNS_TYPE_ATMA", - "DNS_TYPE_AXFR", - "DNS_TYPE_CERT", - "DNS_TYPE_CNAME", - "DNS_TYPE_DHCID", - "DNS_TYPE_DNAME", - "DNS_TYPE_DNSKEY", - "DNS_TYPE_DS", - "DNS_TYPE_EID", - "DNS_TYPE_GID", - "DNS_TYPE_GPOS", - "DNS_TYPE_HINFO", - "DNS_TYPE_ISDN", - "DNS_TYPE_IXFR", - "DNS_TYPE_KEY", - "DNS_TYPE_KX", - "DNS_TYPE_LOC", - "DNS_TYPE_MAILA", - "DNS_TYPE_MAILB", - "DNS_TYPE_MB", - "DNS_TYPE_MD", - "DNS_TYPE_MF", - "DNS_TYPE_MG", - "DNS_TYPE_MINFO", - "DNS_TYPE_MR", - "DNS_TYPE_MX", - "DNS_TYPE_NAPTR", - "DNS_TYPE_NBSTAT", - "DNS_TYPE_NIMLOC", - "DNS_TYPE_NS", - "DNS_TYPE_NSAP", - "DNS_TYPE_NSAPPTR", - "DNS_TYPE_NSEC", - "DNS_TYPE_NULL", - "DNS_TYPE_NXT", - "DNS_TYPE_OPT", - "DNS_TYPE_PTR", - "DNS_TYPE_PX", - "DNS_TYPE_RP", - "DNS_TYPE_RRSIG", - "DNS_TYPE_RT", - "DNS_TYPE_SIG", - "DNS_TYPE_SINK", - "DNS_TYPE_SOA", - "DNS_TYPE_SRV", - "DNS_TYPE_TEXT", - "DNS_TYPE_TKEY", - "DNS_TYPE_TSIG", - "DNS_TYPE_UID", - "DNS_TYPE_UINFO", - "DNS_TYPE_UNSPEC", - "DNS_TYPE_WINS", - "DNS_TYPE_WINSR", - "DNS_TYPE_WKS", - "DNS_TYPE_X25", - "DT_BLK", - "DT_CHR", - "DT_DIR", - "DT_FIFO", - "DT_LNK", - "DT_REG", - "DT_SOCK", - "DT_UNKNOWN", - "DT_WHT", - "DUPLICATE_CLOSE_SOURCE", - "DUPLICATE_SAME_ACCESS", - "DeleteFile", - "DetachLsf", - "DeviceIoControl", - "Dirent", - "DnsNameCompare", - "DnsQuery", - "DnsRecordListFree", - "DnsSectionAdditional", - "DnsSectionAnswer", - "DnsSectionAuthority", - "DnsSectionQuestion", - "Dup", - "Dup2", - "Dup3", - "DuplicateHandle", - "E2BIG", - "EACCES", - "EADDRINUSE", - "EADDRNOTAVAIL", - "EADV", - "EAFNOSUPPORT", - "EAGAIN", - "EALREADY", - "EAUTH", - "EBADARCH", - "EBADE", - "EBADEXEC", - "EBADF", - "EBADFD", - "EBADMACHO", - "EBADMSG", - "EBADR", - "EBADRPC", - "EBADRQC", - "EBADSLT", - "EBFONT", - "EBUSY", - "ECANCELED", - "ECAPMODE", - "ECHILD", - "ECHO", - "ECHOCTL", - "ECHOE", - "ECHOK", - "ECHOKE", - "ECHONL", - "ECHOPRT", - "ECHRNG", - "ECOMM", - "ECONNABORTED", - "ECONNREFUSED", - "ECONNRESET", - "EDEADLK", - "EDEADLOCK", - "EDESTADDRREQ", - "EDEVERR", - "EDOM", - "EDOOFUS", - "EDOTDOT", - "EDQUOT", - "EEXIST", - "EFAULT", - "EFBIG", - "EFER_LMA", - "EFER_LME", - "EFER_NXE", - "EFER_SCE", - "EFTYPE", - "EHOSTDOWN", - "EHOSTUNREACH", - "EHWPOISON", - "EIDRM", - "EILSEQ", - "EINPROGRESS", - "EINTR", - "EINVAL", - "EIO", - "EIPSEC", - "EISCONN", - "EISDIR", - "EISNAM", - "EKEYEXPIRED", - "EKEYREJECTED", - "EKEYREVOKED", - "EL2HLT", - "EL2NSYNC", - "EL3HLT", - "EL3RST", - "ELAST", - "ELF_NGREG", - "ELF_PRARGSZ", - "ELIBACC", - "ELIBBAD", - "ELIBEXEC", - "ELIBMAX", - "ELIBSCN", - "ELNRNG", - "ELOOP", - "EMEDIUMTYPE", - "EMFILE", - "EMLINK", - "EMSGSIZE", - "EMT_TAGOVF", - "EMULTIHOP", - "EMUL_ENABLED", - "EMUL_LINUX", - "EMUL_LINUX32", - "EMUL_MAXID", - "EMUL_NATIVE", - "ENAMETOOLONG", - "ENAVAIL", - "ENDRUNDISC", - "ENEEDAUTH", - "ENETDOWN", - "ENETRESET", - "ENETUNREACH", - "ENFILE", - "ENOANO", - "ENOATTR", - "ENOBUFS", - "ENOCSI", - "ENODATA", - "ENODEV", - "ENOENT", - "ENOEXEC", - "ENOKEY", - "ENOLCK", - "ENOLINK", - "ENOMEDIUM", - "ENOMEM", - "ENOMSG", - "ENONET", - "ENOPKG", - "ENOPOLICY", - "ENOPROTOOPT", - "ENOSPC", - "ENOSR", - "ENOSTR", - "ENOSYS", - "ENOTBLK", - "ENOTCAPABLE", - "ENOTCONN", - "ENOTDIR", - "ENOTEMPTY", - "ENOTNAM", - "ENOTRECOVERABLE", - "ENOTSOCK", - "ENOTSUP", - "ENOTTY", - "ENOTUNIQ", - "ENXIO", - "EN_SW_CTL_INF", - "EN_SW_CTL_PREC", - "EN_SW_CTL_ROUND", - "EN_SW_DATACHAIN", - "EN_SW_DENORM", - "EN_SW_INVOP", - "EN_SW_OVERFLOW", - "EN_SW_PRECLOSS", - "EN_SW_UNDERFLOW", - "EN_SW_ZERODIV", - "EOPNOTSUPP", - "EOVERFLOW", - "EOWNERDEAD", - "EPERM", - "EPFNOSUPPORT", - "EPIPE", - "EPOLLERR", - "EPOLLET", - "EPOLLHUP", - "EPOLLIN", - "EPOLLMSG", - "EPOLLONESHOT", - "EPOLLOUT", - "EPOLLPRI", - "EPOLLRDBAND", - "EPOLLRDHUP", - "EPOLLRDNORM", - "EPOLLWRBAND", - "EPOLLWRNORM", - "EPOLL_CLOEXEC", - "EPOLL_CTL_ADD", - "EPOLL_CTL_DEL", - "EPOLL_CTL_MOD", - "EPOLL_NONBLOCK", - "EPROCLIM", - "EPROCUNAVAIL", - "EPROGMISMATCH", - "EPROGUNAVAIL", - "EPROTO", - "EPROTONOSUPPORT", - "EPROTOTYPE", - "EPWROFF", - "EQFULL", - "ERANGE", - "EREMCHG", - "EREMOTE", - "EREMOTEIO", - "ERESTART", - "ERFKILL", - "EROFS", - "ERPCMISMATCH", - "ERROR_ACCESS_DENIED", - "ERROR_ALREADY_EXISTS", - "ERROR_BROKEN_PIPE", - "ERROR_BUFFER_OVERFLOW", - "ERROR_DIR_NOT_EMPTY", - "ERROR_ENVVAR_NOT_FOUND", - "ERROR_FILE_EXISTS", - "ERROR_FILE_NOT_FOUND", - "ERROR_HANDLE_EOF", - "ERROR_INSUFFICIENT_BUFFER", - "ERROR_IO_PENDING", - "ERROR_MOD_NOT_FOUND", - "ERROR_MORE_DATA", - "ERROR_NETNAME_DELETED", - "ERROR_NOT_FOUND", - "ERROR_NO_MORE_FILES", - "ERROR_OPERATION_ABORTED", - "ERROR_PATH_NOT_FOUND", - "ERROR_PRIVILEGE_NOT_HELD", - "ERROR_PROC_NOT_FOUND", - "ESHLIBVERS", - "ESHUTDOWN", - "ESOCKTNOSUPPORT", - "ESPIPE", - "ESRCH", - "ESRMNT", - "ESTALE", - "ESTRPIPE", - "ETHERCAP_JUMBO_MTU", - "ETHERCAP_VLAN_HWTAGGING", - "ETHERCAP_VLAN_MTU", - "ETHERMIN", - "ETHERMTU", - "ETHERMTU_JUMBO", - "ETHERTYPE_8023", - "ETHERTYPE_AARP", - "ETHERTYPE_ACCTON", - "ETHERTYPE_AEONIC", - "ETHERTYPE_ALPHA", - "ETHERTYPE_AMBER", - "ETHERTYPE_AMOEBA", - "ETHERTYPE_AOE", - "ETHERTYPE_APOLLO", - "ETHERTYPE_APOLLODOMAIN", - "ETHERTYPE_APPLETALK", - "ETHERTYPE_APPLITEK", - "ETHERTYPE_ARGONAUT", - "ETHERTYPE_ARP", - "ETHERTYPE_AT", - "ETHERTYPE_ATALK", - "ETHERTYPE_ATOMIC", - "ETHERTYPE_ATT", - "ETHERTYPE_ATTSTANFORD", - "ETHERTYPE_AUTOPHON", - "ETHERTYPE_AXIS", - "ETHERTYPE_BCLOOP", - "ETHERTYPE_BOFL", - "ETHERTYPE_CABLETRON", - "ETHERTYPE_CHAOS", - "ETHERTYPE_COMDESIGN", - "ETHERTYPE_COMPUGRAPHIC", - "ETHERTYPE_COUNTERPOINT", - "ETHERTYPE_CRONUS", - "ETHERTYPE_CRONUSVLN", - "ETHERTYPE_DCA", - "ETHERTYPE_DDE", - "ETHERTYPE_DEBNI", - "ETHERTYPE_DECAM", - "ETHERTYPE_DECCUST", - "ETHERTYPE_DECDIAG", - "ETHERTYPE_DECDNS", - "ETHERTYPE_DECDTS", - "ETHERTYPE_DECEXPER", - "ETHERTYPE_DECLAST", - "ETHERTYPE_DECLTM", - "ETHERTYPE_DECMUMPS", - "ETHERTYPE_DECNETBIOS", - "ETHERTYPE_DELTACON", - "ETHERTYPE_DIDDLE", - "ETHERTYPE_DLOG1", - "ETHERTYPE_DLOG2", - "ETHERTYPE_DN", - "ETHERTYPE_DOGFIGHT", - "ETHERTYPE_DSMD", - "ETHERTYPE_ECMA", - "ETHERTYPE_ENCRYPT", - "ETHERTYPE_ES", - "ETHERTYPE_EXCELAN", - "ETHERTYPE_EXPERDATA", - "ETHERTYPE_FLIP", - "ETHERTYPE_FLOWCONTROL", - "ETHERTYPE_FRARP", - "ETHERTYPE_GENDYN", - "ETHERTYPE_HAYES", - "ETHERTYPE_HIPPI_FP", - "ETHERTYPE_HITACHI", - "ETHERTYPE_HP", - "ETHERTYPE_IEEEPUP", - "ETHERTYPE_IEEEPUPAT", - "ETHERTYPE_IMLBL", - "ETHERTYPE_IMLBLDIAG", - "ETHERTYPE_IP", - "ETHERTYPE_IPAS", - "ETHERTYPE_IPV6", - "ETHERTYPE_IPX", - "ETHERTYPE_IPXNEW", - "ETHERTYPE_KALPANA", - "ETHERTYPE_LANBRIDGE", - "ETHERTYPE_LANPROBE", - "ETHERTYPE_LAT", - "ETHERTYPE_LBACK", - "ETHERTYPE_LITTLE", - "ETHERTYPE_LLDP", - "ETHERTYPE_LOGICRAFT", - "ETHERTYPE_LOOPBACK", - "ETHERTYPE_MATRA", - "ETHERTYPE_MAX", - "ETHERTYPE_MERIT", - "ETHERTYPE_MICP", - "ETHERTYPE_MOPDL", - "ETHERTYPE_MOPRC", - "ETHERTYPE_MOTOROLA", - "ETHERTYPE_MPLS", - "ETHERTYPE_MPLS_MCAST", - "ETHERTYPE_MUMPS", - "ETHERTYPE_NBPCC", - "ETHERTYPE_NBPCLAIM", - "ETHERTYPE_NBPCLREQ", - "ETHERTYPE_NBPCLRSP", - "ETHERTYPE_NBPCREQ", - "ETHERTYPE_NBPCRSP", - "ETHERTYPE_NBPDG", - "ETHERTYPE_NBPDGB", - "ETHERTYPE_NBPDLTE", - "ETHERTYPE_NBPRAR", - "ETHERTYPE_NBPRAS", - "ETHERTYPE_NBPRST", - "ETHERTYPE_NBPSCD", - "ETHERTYPE_NBPVCD", - "ETHERTYPE_NBS", - "ETHERTYPE_NCD", - "ETHERTYPE_NESTAR", - "ETHERTYPE_NETBEUI", - "ETHERTYPE_NOVELL", - "ETHERTYPE_NS", - "ETHERTYPE_NSAT", - "ETHERTYPE_NSCOMPAT", - "ETHERTYPE_NTRAILER", - "ETHERTYPE_OS9", - "ETHERTYPE_OS9NET", - "ETHERTYPE_PACER", - "ETHERTYPE_PAE", - "ETHERTYPE_PCS", - "ETHERTYPE_PLANNING", - "ETHERTYPE_PPP", - "ETHERTYPE_PPPOE", - "ETHERTYPE_PPPOEDISC", - "ETHERTYPE_PRIMENTS", - "ETHERTYPE_PUP", - "ETHERTYPE_PUPAT", - "ETHERTYPE_QINQ", - "ETHERTYPE_RACAL", - "ETHERTYPE_RATIONAL", - "ETHERTYPE_RAWFR", - "ETHERTYPE_RCL", - "ETHERTYPE_RDP", - "ETHERTYPE_RETIX", - "ETHERTYPE_REVARP", - "ETHERTYPE_SCA", - "ETHERTYPE_SECTRA", - "ETHERTYPE_SECUREDATA", - "ETHERTYPE_SGITW", - "ETHERTYPE_SG_BOUNCE", - "ETHERTYPE_SG_DIAG", - "ETHERTYPE_SG_NETGAMES", - "ETHERTYPE_SG_RESV", - "ETHERTYPE_SIMNET", - "ETHERTYPE_SLOW", - "ETHERTYPE_SLOWPROTOCOLS", - "ETHERTYPE_SNA", - "ETHERTYPE_SNMP", - "ETHERTYPE_SONIX", - "ETHERTYPE_SPIDER", - "ETHERTYPE_SPRITE", - "ETHERTYPE_STP", - "ETHERTYPE_TALARIS", - "ETHERTYPE_TALARISMC", - "ETHERTYPE_TCPCOMP", - "ETHERTYPE_TCPSM", - "ETHERTYPE_TEC", - "ETHERTYPE_TIGAN", - "ETHERTYPE_TRAIL", - "ETHERTYPE_TRANSETHER", - "ETHERTYPE_TYMSHARE", - "ETHERTYPE_UBBST", - "ETHERTYPE_UBDEBUG", - "ETHERTYPE_UBDIAGLOOP", - "ETHERTYPE_UBDL", - "ETHERTYPE_UBNIU", - "ETHERTYPE_UBNMC", - "ETHERTYPE_VALID", - "ETHERTYPE_VARIAN", - "ETHERTYPE_VAXELN", - "ETHERTYPE_VEECO", - "ETHERTYPE_VEXP", - "ETHERTYPE_VGLAB", - "ETHERTYPE_VINES", - "ETHERTYPE_VINESECHO", - "ETHERTYPE_VINESLOOP", - "ETHERTYPE_VITAL", - "ETHERTYPE_VLAN", - "ETHERTYPE_VLTLMAN", - "ETHERTYPE_VPROD", - "ETHERTYPE_VURESERVED", - "ETHERTYPE_WATERLOO", - "ETHERTYPE_WELLFLEET", - "ETHERTYPE_X25", - "ETHERTYPE_X75", - "ETHERTYPE_XNSSM", - "ETHERTYPE_XTP", - "ETHER_ADDR_LEN", - "ETHER_ALIGN", - "ETHER_CRC_LEN", - "ETHER_CRC_POLY_BE", - "ETHER_CRC_POLY_LE", - "ETHER_HDR_LEN", - "ETHER_MAX_DIX_LEN", - "ETHER_MAX_LEN", - "ETHER_MAX_LEN_JUMBO", - "ETHER_MIN_LEN", - "ETHER_PPPOE_ENCAP_LEN", - "ETHER_TYPE_LEN", - "ETHER_VLAN_ENCAP_LEN", - "ETH_P_1588", - "ETH_P_8021Q", - "ETH_P_802_2", - "ETH_P_802_3", - "ETH_P_AARP", - "ETH_P_ALL", - "ETH_P_AOE", - "ETH_P_ARCNET", - "ETH_P_ARP", - "ETH_P_ATALK", - "ETH_P_ATMFATE", - "ETH_P_ATMMPOA", - "ETH_P_AX25", - "ETH_P_BPQ", - "ETH_P_CAIF", - "ETH_P_CAN", - "ETH_P_CONTROL", - "ETH_P_CUST", - "ETH_P_DDCMP", - "ETH_P_DEC", - "ETH_P_DIAG", - "ETH_P_DNA_DL", - "ETH_P_DNA_RC", - "ETH_P_DNA_RT", - "ETH_P_DSA", - "ETH_P_ECONET", - "ETH_P_EDSA", - "ETH_P_FCOE", - "ETH_P_FIP", - "ETH_P_HDLC", - "ETH_P_IEEE802154", - "ETH_P_IEEEPUP", - "ETH_P_IEEEPUPAT", - "ETH_P_IP", - "ETH_P_IPV6", - "ETH_P_IPX", - "ETH_P_IRDA", - "ETH_P_LAT", - "ETH_P_LINK_CTL", - "ETH_P_LOCALTALK", - "ETH_P_LOOP", - "ETH_P_MOBITEX", - "ETH_P_MPLS_MC", - "ETH_P_MPLS_UC", - "ETH_P_PAE", - "ETH_P_PAUSE", - "ETH_P_PHONET", - "ETH_P_PPPTALK", - "ETH_P_PPP_DISC", - "ETH_P_PPP_MP", - "ETH_P_PPP_SES", - "ETH_P_PUP", - "ETH_P_PUPAT", - "ETH_P_RARP", - "ETH_P_SCA", - "ETH_P_SLOW", - "ETH_P_SNAP", - "ETH_P_TEB", - "ETH_P_TIPC", - "ETH_P_TRAILER", - "ETH_P_TR_802_2", - "ETH_P_WAN_PPP", - "ETH_P_WCCP", - "ETH_P_X25", - "ETIME", - "ETIMEDOUT", - "ETOOMANYREFS", - "ETXTBSY", - "EUCLEAN", - "EUNATCH", - "EUSERS", - "EVFILT_AIO", - "EVFILT_FS", - "EVFILT_LIO", - "EVFILT_MACHPORT", - "EVFILT_PROC", - "EVFILT_READ", - "EVFILT_SIGNAL", - "EVFILT_SYSCOUNT", - "EVFILT_THREADMARKER", - "EVFILT_TIMER", - "EVFILT_USER", - "EVFILT_VM", - "EVFILT_VNODE", - "EVFILT_WRITE", - "EV_ADD", - "EV_CLEAR", - "EV_DELETE", - "EV_DISABLE", - "EV_DISPATCH", - "EV_DROP", - "EV_ENABLE", - "EV_EOF", - "EV_ERROR", - "EV_FLAG0", - "EV_FLAG1", - "EV_ONESHOT", - "EV_OOBAND", - "EV_POLL", - "EV_RECEIPT", - "EV_SYSFLAGS", - "EWINDOWS", - "EWOULDBLOCK", - "EXDEV", - "EXFULL", - "EXTA", - "EXTB", - "EXTPROC", - "Environ", - "EpollCreate", - "EpollCreate1", - "EpollCtl", - "EpollEvent", - "EpollWait", - "Errno", - "EscapeArg", - "Exchangedata", - "Exec", - "Exit", - "ExitProcess", - "FD_CLOEXEC", - "FD_SETSIZE", - "FILE_ACTION_ADDED", - "FILE_ACTION_MODIFIED", - "FILE_ACTION_REMOVED", - "FILE_ACTION_RENAMED_NEW_NAME", - "FILE_ACTION_RENAMED_OLD_NAME", - "FILE_APPEND_DATA", - "FILE_ATTRIBUTE_ARCHIVE", - "FILE_ATTRIBUTE_DIRECTORY", - "FILE_ATTRIBUTE_HIDDEN", - "FILE_ATTRIBUTE_NORMAL", - "FILE_ATTRIBUTE_READONLY", - "FILE_ATTRIBUTE_REPARSE_POINT", - "FILE_ATTRIBUTE_SYSTEM", - "FILE_BEGIN", - "FILE_CURRENT", - "FILE_END", - "FILE_FLAG_BACKUP_SEMANTICS", - "FILE_FLAG_OPEN_REPARSE_POINT", - "FILE_FLAG_OVERLAPPED", - "FILE_LIST_DIRECTORY", - "FILE_MAP_COPY", - "FILE_MAP_EXECUTE", - "FILE_MAP_READ", - "FILE_MAP_WRITE", - "FILE_NOTIFY_CHANGE_ATTRIBUTES", - "FILE_NOTIFY_CHANGE_CREATION", - "FILE_NOTIFY_CHANGE_DIR_NAME", - "FILE_NOTIFY_CHANGE_FILE_NAME", - "FILE_NOTIFY_CHANGE_LAST_ACCESS", - "FILE_NOTIFY_CHANGE_LAST_WRITE", - "FILE_NOTIFY_CHANGE_SIZE", - "FILE_SHARE_DELETE", - "FILE_SHARE_READ", - "FILE_SHARE_WRITE", - "FILE_SKIP_COMPLETION_PORT_ON_SUCCESS", - "FILE_SKIP_SET_EVENT_ON_HANDLE", - "FILE_TYPE_CHAR", - "FILE_TYPE_DISK", - "FILE_TYPE_PIPE", - "FILE_TYPE_REMOTE", - "FILE_TYPE_UNKNOWN", - "FILE_WRITE_ATTRIBUTES", - "FLUSHO", - "FORMAT_MESSAGE_ALLOCATE_BUFFER", - "FORMAT_MESSAGE_ARGUMENT_ARRAY", - "FORMAT_MESSAGE_FROM_HMODULE", - "FORMAT_MESSAGE_FROM_STRING", - "FORMAT_MESSAGE_FROM_SYSTEM", - "FORMAT_MESSAGE_IGNORE_INSERTS", - "FORMAT_MESSAGE_MAX_WIDTH_MASK", - "FSCTL_GET_REPARSE_POINT", - "F_ADDFILESIGS", - "F_ADDSIGS", - "F_ALLOCATEALL", - "F_ALLOCATECONTIG", - "F_CANCEL", - "F_CHKCLEAN", - "F_CLOSEM", - "F_DUP2FD", - "F_DUP2FD_CLOEXEC", - "F_DUPFD", - "F_DUPFD_CLOEXEC", - "F_EXLCK", - "F_FINDSIGS", - "F_FLUSH_DATA", - "F_FREEZE_FS", - "F_FSCTL", - "F_FSDIRMASK", - "F_FSIN", - "F_FSINOUT", - "F_FSOUT", - "F_FSPRIV", - "F_FSVOID", - "F_FULLFSYNC", - "F_GETCODEDIR", - "F_GETFD", - "F_GETFL", - "F_GETLEASE", - "F_GETLK", - "F_GETLK64", - "F_GETLKPID", - "F_GETNOSIGPIPE", - "F_GETOWN", - "F_GETOWN_EX", - "F_GETPATH", - "F_GETPATH_MTMINFO", - "F_GETPIPE_SZ", - "F_GETPROTECTIONCLASS", - "F_GETPROTECTIONLEVEL", - "F_GETSIG", - "F_GLOBAL_NOCACHE", - "F_LOCK", - "F_LOG2PHYS", - "F_LOG2PHYS_EXT", - "F_MARKDEPENDENCY", - "F_MAXFD", - "F_NOCACHE", - "F_NODIRECT", - "F_NOTIFY", - "F_OGETLK", - "F_OK", - "F_OSETLK", - "F_OSETLKW", - "F_PARAM_MASK", - "F_PARAM_MAX", - "F_PATHPKG_CHECK", - "F_PEOFPOSMODE", - "F_PREALLOCATE", - "F_RDADVISE", - "F_RDAHEAD", - "F_RDLCK", - "F_READAHEAD", - "F_READBOOTSTRAP", - "F_SETBACKINGSTORE", - "F_SETFD", - "F_SETFL", - "F_SETLEASE", - "F_SETLK", - "F_SETLK64", - "F_SETLKW", - "F_SETLKW64", - "F_SETLKWTIMEOUT", - "F_SETLK_REMOTE", - "F_SETNOSIGPIPE", - "F_SETOWN", - "F_SETOWN_EX", - "F_SETPIPE_SZ", - "F_SETPROTECTIONCLASS", - "F_SETSIG", - "F_SETSIZE", - "F_SHLCK", - "F_SINGLE_WRITER", - "F_TEST", - "F_THAW_FS", - "F_TLOCK", - "F_TRANSCODEKEY", - "F_ULOCK", - "F_UNLCK", - "F_UNLCKSYS", - "F_VOLPOSMODE", - "F_WRITEBOOTSTRAP", - "F_WRLCK", - "Faccessat", - "Fallocate", - "Fbootstraptransfer_t", - "Fchdir", - "Fchflags", - "Fchmod", - "Fchmodat", - "Fchown", - "Fchownat", - "FcntlFlock", - "FdSet", - "Fdatasync", - "FileNotifyInformation", - "Filetime", - "FindClose", - "FindFirstFile", - "FindNextFile", - "Flock", - "Flock_t", - "FlushBpf", - "FlushFileBuffers", - "FlushViewOfFile", - "ForkExec", - "ForkLock", - "FormatMessage", - "Fpathconf", - "FreeAddrInfoW", - "FreeEnvironmentStrings", - "FreeLibrary", - "Fsid", - "Fstat", - "Fstatat", - "Fstatfs", - "Fstore_t", - "Fsync", - "Ftruncate", - "FullPath", - "Futimes", - "Futimesat", - "GENERIC_ALL", - "GENERIC_EXECUTE", - "GENERIC_READ", - "GENERIC_WRITE", - "GUID", - "GetAcceptExSockaddrs", - "GetAdaptersInfo", - "GetAddrInfoW", - "GetCommandLine", - "GetComputerName", - "GetConsoleMode", - "GetCurrentDirectory", - "GetCurrentProcess", - "GetEnvironmentStrings", - "GetEnvironmentVariable", - "GetExitCodeProcess", - "GetFileAttributes", - "GetFileAttributesEx", - "GetFileExInfoStandard", - "GetFileExMaxInfoLevel", - "GetFileInformationByHandle", - "GetFileType", - "GetFullPathName", - "GetHostByName", - "GetIfEntry", - "GetLastError", - "GetLengthSid", - "GetLongPathName", - "GetProcAddress", - "GetProcessTimes", - "GetProtoByName", - "GetQueuedCompletionStatus", - "GetServByName", - "GetShortPathName", - "GetStartupInfo", - "GetStdHandle", - "GetSystemTimeAsFileTime", - "GetTempPath", - "GetTimeZoneInformation", - "GetTokenInformation", - "GetUserNameEx", - "GetUserProfileDirectory", - "GetVersion", - "Getcwd", - "Getdents", - "Getdirentries", - "Getdtablesize", - "Getegid", - "Getenv", - "Geteuid", - "Getfsstat", - "Getgid", - "Getgroups", - "Getpagesize", - "Getpeername", - "Getpgid", - "Getpgrp", - "Getpid", - "Getppid", - "Getpriority", - "Getrlimit", - "Getrusage", - "Getsid", - "Getsockname", - "Getsockopt", - "GetsockoptByte", - "GetsockoptICMPv6Filter", - "GetsockoptIPMreq", - "GetsockoptIPMreqn", - "GetsockoptIPv6MTUInfo", - "GetsockoptIPv6Mreq", - "GetsockoptInet4Addr", - "GetsockoptInt", - "GetsockoptUcred", - "Gettid", - "Gettimeofday", - "Getuid", - "Getwd", - "Getxattr", - "HANDLE_FLAG_INHERIT", - "HKEY_CLASSES_ROOT", - "HKEY_CURRENT_CONFIG", - "HKEY_CURRENT_USER", - "HKEY_DYN_DATA", - "HKEY_LOCAL_MACHINE", - "HKEY_PERFORMANCE_DATA", - "HKEY_USERS", - "HUPCL", - "Handle", - "Hostent", - "ICANON", - "ICMP6_FILTER", - "ICMPV6_FILTER", - "ICMPv6Filter", - "ICRNL", - "IEXTEN", - "IFAN_ARRIVAL", - "IFAN_DEPARTURE", - "IFA_ADDRESS", - "IFA_ANYCAST", - "IFA_BROADCAST", - "IFA_CACHEINFO", - "IFA_F_DADFAILED", - "IFA_F_DEPRECATED", - "IFA_F_HOMEADDRESS", - "IFA_F_NODAD", - "IFA_F_OPTIMISTIC", - "IFA_F_PERMANENT", - "IFA_F_SECONDARY", - "IFA_F_TEMPORARY", - "IFA_F_TENTATIVE", - "IFA_LABEL", - "IFA_LOCAL", - "IFA_MAX", - "IFA_MULTICAST", - "IFA_ROUTE", - "IFA_UNSPEC", - "IFF_ALLMULTI", - "IFF_ALTPHYS", - "IFF_AUTOMEDIA", - "IFF_BROADCAST", - "IFF_CANTCHANGE", - "IFF_CANTCONFIG", - "IFF_DEBUG", - "IFF_DRV_OACTIVE", - "IFF_DRV_RUNNING", - "IFF_DYING", - "IFF_DYNAMIC", - "IFF_LINK0", - "IFF_LINK1", - "IFF_LINK2", - "IFF_LOOPBACK", - "IFF_MASTER", - "IFF_MONITOR", - "IFF_MULTICAST", - "IFF_NOARP", - "IFF_NOTRAILERS", - "IFF_NO_PI", - "IFF_OACTIVE", - "IFF_ONE_QUEUE", - "IFF_POINTOPOINT", - "IFF_POINTTOPOINT", - "IFF_PORTSEL", - "IFF_PPROMISC", - "IFF_PROMISC", - "IFF_RENAMING", - "IFF_RUNNING", - "IFF_SIMPLEX", - "IFF_SLAVE", - "IFF_SMART", - "IFF_STATICARP", - "IFF_TAP", - "IFF_TUN", - "IFF_TUN_EXCL", - "IFF_UP", - "IFF_VNET_HDR", - "IFLA_ADDRESS", - "IFLA_BROADCAST", - "IFLA_COST", - "IFLA_IFALIAS", - "IFLA_IFNAME", - "IFLA_LINK", - "IFLA_LINKINFO", - "IFLA_LINKMODE", - "IFLA_MAP", - "IFLA_MASTER", - "IFLA_MAX", - "IFLA_MTU", - "IFLA_NET_NS_PID", - "IFLA_OPERSTATE", - "IFLA_PRIORITY", - "IFLA_PROTINFO", - "IFLA_QDISC", - "IFLA_STATS", - "IFLA_TXQLEN", - "IFLA_UNSPEC", - "IFLA_WEIGHT", - "IFLA_WIRELESS", - "IFNAMSIZ", - "IFT_1822", - "IFT_A12MPPSWITCH", - "IFT_AAL2", - "IFT_AAL5", - "IFT_ADSL", - "IFT_AFLANE8023", - "IFT_AFLANE8025", - "IFT_ARAP", - "IFT_ARCNET", - "IFT_ARCNETPLUS", - "IFT_ASYNC", - "IFT_ATM", - "IFT_ATMDXI", - "IFT_ATMFUNI", - "IFT_ATMIMA", - "IFT_ATMLOGICAL", - "IFT_ATMRADIO", - "IFT_ATMSUBINTERFACE", - "IFT_ATMVCIENDPT", - "IFT_ATMVIRTUAL", - "IFT_BGPPOLICYACCOUNTING", - "IFT_BLUETOOTH", - "IFT_BRIDGE", - "IFT_BSC", - "IFT_CARP", - "IFT_CCTEMUL", - "IFT_CELLULAR", - "IFT_CEPT", - "IFT_CES", - "IFT_CHANNEL", - "IFT_CNR", - "IFT_COFFEE", - "IFT_COMPOSITELINK", - "IFT_DCN", - "IFT_DIGITALPOWERLINE", - "IFT_DIGITALWRAPPEROVERHEADCHANNEL", - "IFT_DLSW", - "IFT_DOCSCABLEDOWNSTREAM", - "IFT_DOCSCABLEMACLAYER", - "IFT_DOCSCABLEUPSTREAM", - "IFT_DOCSCABLEUPSTREAMCHANNEL", - "IFT_DS0", - "IFT_DS0BUNDLE", - "IFT_DS1FDL", - "IFT_DS3", - "IFT_DTM", - "IFT_DUMMY", - "IFT_DVBASILN", - "IFT_DVBASIOUT", - "IFT_DVBRCCDOWNSTREAM", - "IFT_DVBRCCMACLAYER", - "IFT_DVBRCCUPSTREAM", - "IFT_ECONET", - "IFT_ENC", - "IFT_EON", - "IFT_EPLRS", - "IFT_ESCON", - "IFT_ETHER", - "IFT_FAITH", - "IFT_FAST", - "IFT_FASTETHER", - "IFT_FASTETHERFX", - "IFT_FDDI", - "IFT_FIBRECHANNEL", - "IFT_FRAMERELAYINTERCONNECT", - "IFT_FRAMERELAYMPI", - "IFT_FRDLCIENDPT", - "IFT_FRELAY", - "IFT_FRELAYDCE", - "IFT_FRF16MFRBUNDLE", - "IFT_FRFORWARD", - "IFT_G703AT2MB", - "IFT_G703AT64K", - "IFT_GIF", - "IFT_GIGABITETHERNET", - "IFT_GR303IDT", - "IFT_GR303RDT", - "IFT_H323GATEKEEPER", - "IFT_H323PROXY", - "IFT_HDH1822", - "IFT_HDLC", - "IFT_HDSL2", - "IFT_HIPERLAN2", - "IFT_HIPPI", - "IFT_HIPPIINTERFACE", - "IFT_HOSTPAD", - "IFT_HSSI", - "IFT_HY", - "IFT_IBM370PARCHAN", - "IFT_IDSL", - "IFT_IEEE1394", - "IFT_IEEE80211", - "IFT_IEEE80212", - "IFT_IEEE8023ADLAG", - "IFT_IFGSN", - "IFT_IMT", - "IFT_INFINIBAND", - "IFT_INTERLEAVE", - "IFT_IP", - "IFT_IPFORWARD", - "IFT_IPOVERATM", - "IFT_IPOVERCDLC", - "IFT_IPOVERCLAW", - "IFT_IPSWITCH", - "IFT_IPXIP", - "IFT_ISDN", - "IFT_ISDNBASIC", - "IFT_ISDNPRIMARY", - "IFT_ISDNS", - "IFT_ISDNU", - "IFT_ISO88022LLC", - "IFT_ISO88023", - "IFT_ISO88024", - "IFT_ISO88025", - "IFT_ISO88025CRFPINT", - "IFT_ISO88025DTR", - "IFT_ISO88025FIBER", - "IFT_ISO88026", - "IFT_ISUP", - "IFT_L2VLAN", - "IFT_L3IPVLAN", - "IFT_L3IPXVLAN", - "IFT_LAPB", - "IFT_LAPD", - "IFT_LAPF", - "IFT_LINEGROUP", - "IFT_LOCALTALK", - "IFT_LOOP", - "IFT_MEDIAMAILOVERIP", - "IFT_MFSIGLINK", - "IFT_MIOX25", - "IFT_MODEM", - "IFT_MPC", - "IFT_MPLS", - "IFT_MPLSTUNNEL", - "IFT_MSDSL", - "IFT_MVL", - "IFT_MYRINET", - "IFT_NFAS", - "IFT_NSIP", - "IFT_OPTICALCHANNEL", - "IFT_OPTICALTRANSPORT", - "IFT_OTHER", - "IFT_P10", - "IFT_P80", - "IFT_PARA", - "IFT_PDP", - "IFT_PFLOG", - "IFT_PFLOW", - "IFT_PFSYNC", - "IFT_PLC", - "IFT_PON155", - "IFT_PON622", - "IFT_POS", - "IFT_PPP", - "IFT_PPPMULTILINKBUNDLE", - "IFT_PROPATM", - "IFT_PROPBWAP2MP", - "IFT_PROPCNLS", - "IFT_PROPDOCSWIRELESSDOWNSTREAM", - "IFT_PROPDOCSWIRELESSMACLAYER", - "IFT_PROPDOCSWIRELESSUPSTREAM", - "IFT_PROPMUX", - "IFT_PROPVIRTUAL", - "IFT_PROPWIRELESSP2P", - "IFT_PTPSERIAL", - "IFT_PVC", - "IFT_Q2931", - "IFT_QLLC", - "IFT_RADIOMAC", - "IFT_RADSL", - "IFT_REACHDSL", - "IFT_RFC1483", - "IFT_RS232", - "IFT_RSRB", - "IFT_SDLC", - "IFT_SDSL", - "IFT_SHDSL", - "IFT_SIP", - "IFT_SIPSIG", - "IFT_SIPTG", - "IFT_SLIP", - "IFT_SMDSDXI", - "IFT_SMDSICIP", - "IFT_SONET", - "IFT_SONETOVERHEADCHANNEL", - "IFT_SONETPATH", - "IFT_SONETVT", - "IFT_SRP", - "IFT_SS7SIGLINK", - "IFT_STACKTOSTACK", - "IFT_STARLAN", - "IFT_STF", - "IFT_T1", - "IFT_TDLC", - "IFT_TELINK", - "IFT_TERMPAD", - "IFT_TR008", - "IFT_TRANSPHDLC", - "IFT_TUNNEL", - "IFT_ULTRA", - "IFT_USB", - "IFT_V11", - "IFT_V35", - "IFT_V36", - "IFT_V37", - "IFT_VDSL", - "IFT_VIRTUALIPADDRESS", - "IFT_VIRTUALTG", - "IFT_VOICEDID", - "IFT_VOICEEM", - "IFT_VOICEEMFGD", - "IFT_VOICEENCAP", - "IFT_VOICEFGDEANA", - "IFT_VOICEFXO", - "IFT_VOICEFXS", - "IFT_VOICEOVERATM", - "IFT_VOICEOVERCABLE", - "IFT_VOICEOVERFRAMERELAY", - "IFT_VOICEOVERIP", - "IFT_X213", - "IFT_X25", - "IFT_X25DDN", - "IFT_X25HUNTGROUP", - "IFT_X25MLP", - "IFT_X25PLE", - "IFT_XETHER", - "IGNBRK", - "IGNCR", - "IGNORE", - "IGNPAR", - "IMAXBEL", - "INFINITE", - "INLCR", - "INPCK", - "INVALID_FILE_ATTRIBUTES", - "IN_ACCESS", - "IN_ALL_EVENTS", - "IN_ATTRIB", - "IN_CLASSA_HOST", - "IN_CLASSA_MAX", - "IN_CLASSA_NET", - "IN_CLASSA_NSHIFT", - "IN_CLASSB_HOST", - "IN_CLASSB_MAX", - "IN_CLASSB_NET", - "IN_CLASSB_NSHIFT", - "IN_CLASSC_HOST", - "IN_CLASSC_NET", - "IN_CLASSC_NSHIFT", - "IN_CLASSD_HOST", - "IN_CLASSD_NET", - "IN_CLASSD_NSHIFT", - "IN_CLOEXEC", - "IN_CLOSE", - "IN_CLOSE_NOWRITE", - "IN_CLOSE_WRITE", - "IN_CREATE", - "IN_DELETE", - "IN_DELETE_SELF", - "IN_DONT_FOLLOW", - "IN_EXCL_UNLINK", - "IN_IGNORED", - "IN_ISDIR", - "IN_LINKLOCALNETNUM", - "IN_LOOPBACKNET", - "IN_MASK_ADD", - "IN_MODIFY", - "IN_MOVE", - "IN_MOVED_FROM", - "IN_MOVED_TO", - "IN_MOVE_SELF", - "IN_NONBLOCK", - "IN_ONESHOT", - "IN_ONLYDIR", - "IN_OPEN", - "IN_Q_OVERFLOW", - "IN_RFC3021_HOST", - "IN_RFC3021_MASK", - "IN_RFC3021_NET", - "IN_RFC3021_NSHIFT", - "IN_UNMOUNT", - "IOC_IN", - "IOC_INOUT", - "IOC_OUT", - "IOC_VENDOR", - "IOC_WS2", - "IO_REPARSE_TAG_SYMLINK", - "IPMreq", - "IPMreqn", - "IPPROTO_3PC", - "IPPROTO_ADFS", - "IPPROTO_AH", - "IPPROTO_AHIP", - "IPPROTO_APES", - "IPPROTO_ARGUS", - "IPPROTO_AX25", - "IPPROTO_BHA", - "IPPROTO_BLT", - "IPPROTO_BRSATMON", - "IPPROTO_CARP", - "IPPROTO_CFTP", - "IPPROTO_CHAOS", - "IPPROTO_CMTP", - "IPPROTO_COMP", - "IPPROTO_CPHB", - "IPPROTO_CPNX", - "IPPROTO_DCCP", - "IPPROTO_DDP", - "IPPROTO_DGP", - "IPPROTO_DIVERT", - "IPPROTO_DIVERT_INIT", - "IPPROTO_DIVERT_RESP", - "IPPROTO_DONE", - "IPPROTO_DSTOPTS", - "IPPROTO_EGP", - "IPPROTO_EMCON", - "IPPROTO_ENCAP", - "IPPROTO_EON", - "IPPROTO_ESP", - "IPPROTO_ETHERIP", - "IPPROTO_FRAGMENT", - "IPPROTO_GGP", - "IPPROTO_GMTP", - "IPPROTO_GRE", - "IPPROTO_HELLO", - "IPPROTO_HMP", - "IPPROTO_HOPOPTS", - "IPPROTO_ICMP", - "IPPROTO_ICMPV6", - "IPPROTO_IDP", - "IPPROTO_IDPR", - "IPPROTO_IDRP", - "IPPROTO_IGMP", - "IPPROTO_IGP", - "IPPROTO_IGRP", - "IPPROTO_IL", - "IPPROTO_INLSP", - "IPPROTO_INP", - "IPPROTO_IP", - "IPPROTO_IPCOMP", - "IPPROTO_IPCV", - "IPPROTO_IPEIP", - "IPPROTO_IPIP", - "IPPROTO_IPPC", - "IPPROTO_IPV4", - "IPPROTO_IPV6", - "IPPROTO_IPV6_ICMP", - "IPPROTO_IRTP", - "IPPROTO_KRYPTOLAN", - "IPPROTO_LARP", - "IPPROTO_LEAF1", - "IPPROTO_LEAF2", - "IPPROTO_MAX", - "IPPROTO_MAXID", - "IPPROTO_MEAS", - "IPPROTO_MH", - "IPPROTO_MHRP", - "IPPROTO_MICP", - "IPPROTO_MOBILE", - "IPPROTO_MPLS", - "IPPROTO_MTP", - "IPPROTO_MUX", - "IPPROTO_ND", - "IPPROTO_NHRP", - "IPPROTO_NONE", - "IPPROTO_NSP", - "IPPROTO_NVPII", - "IPPROTO_OLD_DIVERT", - "IPPROTO_OSPFIGP", - "IPPROTO_PFSYNC", - "IPPROTO_PGM", - "IPPROTO_PIGP", - "IPPROTO_PIM", - "IPPROTO_PRM", - "IPPROTO_PUP", - "IPPROTO_PVP", - "IPPROTO_RAW", - "IPPROTO_RCCMON", - "IPPROTO_RDP", - "IPPROTO_ROUTING", - "IPPROTO_RSVP", - "IPPROTO_RVD", - "IPPROTO_SATEXPAK", - "IPPROTO_SATMON", - "IPPROTO_SCCSP", - "IPPROTO_SCTP", - "IPPROTO_SDRP", - "IPPROTO_SEND", - "IPPROTO_SEP", - "IPPROTO_SKIP", - "IPPROTO_SPACER", - "IPPROTO_SRPC", - "IPPROTO_ST", - "IPPROTO_SVMTP", - "IPPROTO_SWIPE", - "IPPROTO_TCF", - "IPPROTO_TCP", - "IPPROTO_TLSP", - "IPPROTO_TP", - "IPPROTO_TPXX", - "IPPROTO_TRUNK1", - "IPPROTO_TRUNK2", - "IPPROTO_TTP", - "IPPROTO_UDP", - "IPPROTO_UDPLITE", - "IPPROTO_VINES", - "IPPROTO_VISA", - "IPPROTO_VMTP", - "IPPROTO_VRRP", - "IPPROTO_WBEXPAK", - "IPPROTO_WBMON", - "IPPROTO_WSN", - "IPPROTO_XNET", - "IPPROTO_XTP", - "IPV6_2292DSTOPTS", - "IPV6_2292HOPLIMIT", - "IPV6_2292HOPOPTS", - "IPV6_2292NEXTHOP", - "IPV6_2292PKTINFO", - "IPV6_2292PKTOPTIONS", - "IPV6_2292RTHDR", - "IPV6_ADDRFORM", - "IPV6_ADD_MEMBERSHIP", - "IPV6_AUTHHDR", - "IPV6_AUTH_LEVEL", - "IPV6_AUTOFLOWLABEL", - "IPV6_BINDANY", - "IPV6_BINDV6ONLY", - "IPV6_BOUND_IF", - "IPV6_CHECKSUM", - "IPV6_DEFAULT_MULTICAST_HOPS", - "IPV6_DEFAULT_MULTICAST_LOOP", - "IPV6_DEFHLIM", - "IPV6_DONTFRAG", - "IPV6_DROP_MEMBERSHIP", - "IPV6_DSTOPTS", - "IPV6_ESP_NETWORK_LEVEL", - "IPV6_ESP_TRANS_LEVEL", - "IPV6_FAITH", - "IPV6_FLOWINFO_MASK", - "IPV6_FLOWLABEL_MASK", - "IPV6_FRAGTTL", - "IPV6_FW_ADD", - "IPV6_FW_DEL", - "IPV6_FW_FLUSH", - "IPV6_FW_GET", - "IPV6_FW_ZERO", - "IPV6_HLIMDEC", - "IPV6_HOPLIMIT", - "IPV6_HOPOPTS", - "IPV6_IPCOMP_LEVEL", - "IPV6_IPSEC_POLICY", - "IPV6_JOIN_ANYCAST", - "IPV6_JOIN_GROUP", - "IPV6_LEAVE_ANYCAST", - "IPV6_LEAVE_GROUP", - "IPV6_MAXHLIM", - "IPV6_MAXOPTHDR", - "IPV6_MAXPACKET", - "IPV6_MAX_GROUP_SRC_FILTER", - "IPV6_MAX_MEMBERSHIPS", - "IPV6_MAX_SOCK_SRC_FILTER", - "IPV6_MIN_MEMBERSHIPS", - "IPV6_MMTU", - "IPV6_MSFILTER", - "IPV6_MTU", - "IPV6_MTU_DISCOVER", - "IPV6_MULTICAST_HOPS", - "IPV6_MULTICAST_IF", - "IPV6_MULTICAST_LOOP", - "IPV6_NEXTHOP", - "IPV6_OPTIONS", - "IPV6_PATHMTU", - "IPV6_PIPEX", - "IPV6_PKTINFO", - "IPV6_PMTUDISC_DO", - "IPV6_PMTUDISC_DONT", - "IPV6_PMTUDISC_PROBE", - "IPV6_PMTUDISC_WANT", - "IPV6_PORTRANGE", - "IPV6_PORTRANGE_DEFAULT", - "IPV6_PORTRANGE_HIGH", - "IPV6_PORTRANGE_LOW", - "IPV6_PREFER_TEMPADDR", - "IPV6_RECVDSTOPTS", - "IPV6_RECVDSTPORT", - "IPV6_RECVERR", - "IPV6_RECVHOPLIMIT", - "IPV6_RECVHOPOPTS", - "IPV6_RECVPATHMTU", - "IPV6_RECVPKTINFO", - "IPV6_RECVRTHDR", - "IPV6_RECVTCLASS", - "IPV6_ROUTER_ALERT", - "IPV6_RTABLE", - "IPV6_RTHDR", - "IPV6_RTHDRDSTOPTS", - "IPV6_RTHDR_LOOSE", - "IPV6_RTHDR_STRICT", - "IPV6_RTHDR_TYPE_0", - "IPV6_RXDSTOPTS", - "IPV6_RXHOPOPTS", - "IPV6_SOCKOPT_RESERVED1", - "IPV6_TCLASS", - "IPV6_UNICAST_HOPS", - "IPV6_USE_MIN_MTU", - "IPV6_V6ONLY", - "IPV6_VERSION", - "IPV6_VERSION_MASK", - "IPV6_XFRM_POLICY", - "IP_ADD_MEMBERSHIP", - "IP_ADD_SOURCE_MEMBERSHIP", - "IP_AUTH_LEVEL", - "IP_BINDANY", - "IP_BLOCK_SOURCE", - "IP_BOUND_IF", - "IP_DEFAULT_MULTICAST_LOOP", - "IP_DEFAULT_MULTICAST_TTL", - "IP_DF", - "IP_DIVERTFL", - "IP_DONTFRAG", - "IP_DROP_MEMBERSHIP", - "IP_DROP_SOURCE_MEMBERSHIP", - "IP_DUMMYNET3", - "IP_DUMMYNET_CONFIGURE", - "IP_DUMMYNET_DEL", - "IP_DUMMYNET_FLUSH", - "IP_DUMMYNET_GET", - "IP_EF", - "IP_ERRORMTU", - "IP_ESP_NETWORK_LEVEL", - "IP_ESP_TRANS_LEVEL", - "IP_FAITH", - "IP_FREEBIND", - "IP_FW3", - "IP_FW_ADD", - "IP_FW_DEL", - "IP_FW_FLUSH", - "IP_FW_GET", - "IP_FW_NAT_CFG", - "IP_FW_NAT_DEL", - "IP_FW_NAT_GET_CONFIG", - "IP_FW_NAT_GET_LOG", - "IP_FW_RESETLOG", - "IP_FW_TABLE_ADD", - "IP_FW_TABLE_DEL", - "IP_FW_TABLE_FLUSH", - "IP_FW_TABLE_GETSIZE", - "IP_FW_TABLE_LIST", - "IP_FW_ZERO", - "IP_HDRINCL", - "IP_IPCOMP_LEVEL", - "IP_IPSECFLOWINFO", - "IP_IPSEC_LOCAL_AUTH", - "IP_IPSEC_LOCAL_CRED", - "IP_IPSEC_LOCAL_ID", - "IP_IPSEC_POLICY", - "IP_IPSEC_REMOTE_AUTH", - "IP_IPSEC_REMOTE_CRED", - "IP_IPSEC_REMOTE_ID", - "IP_MAXPACKET", - "IP_MAX_GROUP_SRC_FILTER", - "IP_MAX_MEMBERSHIPS", - "IP_MAX_SOCK_MUTE_FILTER", - "IP_MAX_SOCK_SRC_FILTER", - "IP_MAX_SOURCE_FILTER", - "IP_MF", - "IP_MINFRAGSIZE", - "IP_MINTTL", - "IP_MIN_MEMBERSHIPS", - "IP_MSFILTER", - "IP_MSS", - "IP_MTU", - "IP_MTU_DISCOVER", - "IP_MULTICAST_IF", - "IP_MULTICAST_IFINDEX", - "IP_MULTICAST_LOOP", - "IP_MULTICAST_TTL", - "IP_MULTICAST_VIF", - "IP_NAT__XXX", - "IP_OFFMASK", - "IP_OLD_FW_ADD", - "IP_OLD_FW_DEL", - "IP_OLD_FW_FLUSH", - "IP_OLD_FW_GET", - "IP_OLD_FW_RESETLOG", - "IP_OLD_FW_ZERO", - "IP_ONESBCAST", - "IP_OPTIONS", - "IP_ORIGDSTADDR", - "IP_PASSSEC", - "IP_PIPEX", - "IP_PKTINFO", - "IP_PKTOPTIONS", - "IP_PMTUDISC", - "IP_PMTUDISC_DO", - "IP_PMTUDISC_DONT", - "IP_PMTUDISC_PROBE", - "IP_PMTUDISC_WANT", - "IP_PORTRANGE", - "IP_PORTRANGE_DEFAULT", - "IP_PORTRANGE_HIGH", - "IP_PORTRANGE_LOW", - "IP_RECVDSTADDR", - "IP_RECVDSTPORT", - "IP_RECVERR", - "IP_RECVIF", - "IP_RECVOPTS", - "IP_RECVORIGDSTADDR", - "IP_RECVPKTINFO", - "IP_RECVRETOPTS", - "IP_RECVRTABLE", - "IP_RECVTOS", - "IP_RECVTTL", - "IP_RETOPTS", - "IP_RF", - "IP_ROUTER_ALERT", - "IP_RSVP_OFF", - "IP_RSVP_ON", - "IP_RSVP_VIF_OFF", - "IP_RSVP_VIF_ON", - "IP_RTABLE", - "IP_SENDSRCADDR", - "IP_STRIPHDR", - "IP_TOS", - "IP_TRAFFIC_MGT_BACKGROUND", - "IP_TRANSPARENT", - "IP_TTL", - "IP_UNBLOCK_SOURCE", - "IP_XFRM_POLICY", - "IPv6MTUInfo", - "IPv6Mreq", - "ISIG", - "ISTRIP", - "IUCLC", - "IUTF8", - "IXANY", - "IXOFF", - "IXON", - "IfAddrmsg", - "IfAnnounceMsghdr", - "IfData", - "IfInfomsg", - "IfMsghdr", - "IfaMsghdr", - "IfmaMsghdr", - "IfmaMsghdr2", - "ImplementsGetwd", - "Inet4Pktinfo", - "Inet6Pktinfo", - "InotifyAddWatch", - "InotifyEvent", - "InotifyInit", - "InotifyInit1", - "InotifyRmWatch", - "InterfaceAddrMessage", - "InterfaceAnnounceMessage", - "InterfaceInfo", - "InterfaceMessage", - "InterfaceMulticastAddrMessage", - "InvalidHandle", - "Ioperm", - "Iopl", - "Iovec", - "IpAdapterInfo", - "IpAddrString", - "IpAddressString", - "IpMaskString", - "Issetugid", - "KEY_ALL_ACCESS", - "KEY_CREATE_LINK", - "KEY_CREATE_SUB_KEY", - "KEY_ENUMERATE_SUB_KEYS", - "KEY_EXECUTE", - "KEY_NOTIFY", - "KEY_QUERY_VALUE", - "KEY_READ", - "KEY_SET_VALUE", - "KEY_WOW64_32KEY", - "KEY_WOW64_64KEY", - "KEY_WRITE", - "Kevent", - "Kevent_t", - "Kill", - "Klogctl", - "Kqueue", - "LANG_ENGLISH", - "LAYERED_PROTOCOL", - "LCNT_OVERLOAD_FLUSH", - "LINUX_REBOOT_CMD_CAD_OFF", - "LINUX_REBOOT_CMD_CAD_ON", - "LINUX_REBOOT_CMD_HALT", - "LINUX_REBOOT_CMD_KEXEC", - "LINUX_REBOOT_CMD_POWER_OFF", - "LINUX_REBOOT_CMD_RESTART", - "LINUX_REBOOT_CMD_RESTART2", - "LINUX_REBOOT_CMD_SW_SUSPEND", - "LINUX_REBOOT_MAGIC1", - "LINUX_REBOOT_MAGIC2", - "LOCK_EX", - "LOCK_NB", - "LOCK_SH", - "LOCK_UN", - "LazyDLL", - "LazyProc", - "Lchown", - "Linger", - "Link", - "Listen", - "Listxattr", - "LoadCancelIoEx", - "LoadConnectEx", - "LoadCreateSymbolicLink", - "LoadDLL", - "LoadGetAddrInfo", - "LoadLibrary", - "LoadSetFileCompletionNotificationModes", - "LocalFree", - "Log2phys_t", - "LookupAccountName", - "LookupAccountSid", - "LookupSID", - "LsfJump", - "LsfSocket", - "LsfStmt", - "Lstat", - "MADV_AUTOSYNC", - "MADV_CAN_REUSE", - "MADV_CORE", - "MADV_DOFORK", - "MADV_DONTFORK", - "MADV_DONTNEED", - "MADV_FREE", - "MADV_FREE_REUSABLE", - "MADV_FREE_REUSE", - "MADV_HUGEPAGE", - "MADV_HWPOISON", - "MADV_MERGEABLE", - "MADV_NOCORE", - "MADV_NOHUGEPAGE", - "MADV_NORMAL", - "MADV_NOSYNC", - "MADV_PROTECT", - "MADV_RANDOM", - "MADV_REMOVE", - "MADV_SEQUENTIAL", - "MADV_SPACEAVAIL", - "MADV_UNMERGEABLE", - "MADV_WILLNEED", - "MADV_ZERO_WIRED_PAGES", - "MAP_32BIT", - "MAP_ALIGNED_SUPER", - "MAP_ALIGNMENT_16MB", - "MAP_ALIGNMENT_1TB", - "MAP_ALIGNMENT_256TB", - "MAP_ALIGNMENT_4GB", - "MAP_ALIGNMENT_64KB", - "MAP_ALIGNMENT_64PB", - "MAP_ALIGNMENT_MASK", - "MAP_ALIGNMENT_SHIFT", - "MAP_ANON", - "MAP_ANONYMOUS", - "MAP_COPY", - "MAP_DENYWRITE", - "MAP_EXECUTABLE", - "MAP_FILE", - "MAP_FIXED", - "MAP_FLAGMASK", - "MAP_GROWSDOWN", - "MAP_HASSEMAPHORE", - "MAP_HUGETLB", - "MAP_INHERIT", - "MAP_INHERIT_COPY", - "MAP_INHERIT_DEFAULT", - "MAP_INHERIT_DONATE_COPY", - "MAP_INHERIT_NONE", - "MAP_INHERIT_SHARE", - "MAP_JIT", - "MAP_LOCKED", - "MAP_NOCACHE", - "MAP_NOCORE", - "MAP_NOEXTEND", - "MAP_NONBLOCK", - "MAP_NORESERVE", - "MAP_NOSYNC", - "MAP_POPULATE", - "MAP_PREFAULT_READ", - "MAP_PRIVATE", - "MAP_RENAME", - "MAP_RESERVED0080", - "MAP_RESERVED0100", - "MAP_SHARED", - "MAP_STACK", - "MAP_TRYFIXED", - "MAP_TYPE", - "MAP_WIRED", - "MAXIMUM_REPARSE_DATA_BUFFER_SIZE", - "MAXLEN_IFDESCR", - "MAXLEN_PHYSADDR", - "MAX_ADAPTER_ADDRESS_LENGTH", - "MAX_ADAPTER_DESCRIPTION_LENGTH", - "MAX_ADAPTER_NAME_LENGTH", - "MAX_COMPUTERNAME_LENGTH", - "MAX_INTERFACE_NAME_LEN", - "MAX_LONG_PATH", - "MAX_PATH", - "MAX_PROTOCOL_CHAIN", - "MCL_CURRENT", - "MCL_FUTURE", - "MNT_DETACH", - "MNT_EXPIRE", - "MNT_FORCE", - "MSG_BCAST", - "MSG_CMSG_CLOEXEC", - "MSG_COMPAT", - "MSG_CONFIRM", - "MSG_CONTROLMBUF", - "MSG_CTRUNC", - "MSG_DONTROUTE", - "MSG_DONTWAIT", - "MSG_EOF", - "MSG_EOR", - "MSG_ERRQUEUE", - "MSG_FASTOPEN", - "MSG_FIN", - "MSG_FLUSH", - "MSG_HAVEMORE", - "MSG_HOLD", - "MSG_IOVUSRSPACE", - "MSG_LENUSRSPACE", - "MSG_MCAST", - "MSG_MORE", - "MSG_NAMEMBUF", - "MSG_NBIO", - "MSG_NEEDSA", - "MSG_NOSIGNAL", - "MSG_NOTIFICATION", - "MSG_OOB", - "MSG_PEEK", - "MSG_PROXY", - "MSG_RCVMORE", - "MSG_RST", - "MSG_SEND", - "MSG_SYN", - "MSG_TRUNC", - "MSG_TRYHARD", - "MSG_USERFLAGS", - "MSG_WAITALL", - "MSG_WAITFORONE", - "MSG_WAITSTREAM", - "MS_ACTIVE", - "MS_ASYNC", - "MS_BIND", - "MS_DEACTIVATE", - "MS_DIRSYNC", - "MS_INVALIDATE", - "MS_I_VERSION", - "MS_KERNMOUNT", - "MS_KILLPAGES", - "MS_MANDLOCK", - "MS_MGC_MSK", - "MS_MGC_VAL", - "MS_MOVE", - "MS_NOATIME", - "MS_NODEV", - "MS_NODIRATIME", - "MS_NOEXEC", - "MS_NOSUID", - "MS_NOUSER", - "MS_POSIXACL", - "MS_PRIVATE", - "MS_RDONLY", - "MS_REC", - "MS_RELATIME", - "MS_REMOUNT", - "MS_RMT_MASK", - "MS_SHARED", - "MS_SILENT", - "MS_SLAVE", - "MS_STRICTATIME", - "MS_SYNC", - "MS_SYNCHRONOUS", - "MS_UNBINDABLE", - "Madvise", - "MapViewOfFile", - "MaxTokenInfoClass", - "Mclpool", - "MibIfRow", - "Mkdir", - "Mkdirat", - "Mkfifo", - "Mknod", - "Mknodat", - "Mlock", - "Mlockall", - "Mmap", - "Mount", - "MoveFile", - "Mprotect", - "Msghdr", - "Munlock", - "Munlockall", - "Munmap", - "MustLoadDLL", - "NAME_MAX", - "NETLINK_ADD_MEMBERSHIP", - "NETLINK_AUDIT", - "NETLINK_BROADCAST_ERROR", - "NETLINK_CONNECTOR", - "NETLINK_DNRTMSG", - "NETLINK_DROP_MEMBERSHIP", - "NETLINK_ECRYPTFS", - "NETLINK_FIB_LOOKUP", - "NETLINK_FIREWALL", - "NETLINK_GENERIC", - "NETLINK_INET_DIAG", - "NETLINK_IP6_FW", - "NETLINK_ISCSI", - "NETLINK_KOBJECT_UEVENT", - "NETLINK_NETFILTER", - "NETLINK_NFLOG", - "NETLINK_NO_ENOBUFS", - "NETLINK_PKTINFO", - "NETLINK_RDMA", - "NETLINK_ROUTE", - "NETLINK_SCSITRANSPORT", - "NETLINK_SELINUX", - "NETLINK_UNUSED", - "NETLINK_USERSOCK", - "NETLINK_XFRM", - "NET_RT_DUMP", - "NET_RT_DUMP2", - "NET_RT_FLAGS", - "NET_RT_IFLIST", - "NET_RT_IFLIST2", - "NET_RT_IFLISTL", - "NET_RT_IFMALIST", - "NET_RT_MAXID", - "NET_RT_OIFLIST", - "NET_RT_OOIFLIST", - "NET_RT_STAT", - "NET_RT_STATS", - "NET_RT_TABLE", - "NET_RT_TRASH", - "NLA_ALIGNTO", - "NLA_F_NESTED", - "NLA_F_NET_BYTEORDER", - "NLA_HDRLEN", - "NLMSG_ALIGNTO", - "NLMSG_DONE", - "NLMSG_ERROR", - "NLMSG_HDRLEN", - "NLMSG_MIN_TYPE", - "NLMSG_NOOP", - "NLMSG_OVERRUN", - "NLM_F_ACK", - "NLM_F_APPEND", - "NLM_F_ATOMIC", - "NLM_F_CREATE", - "NLM_F_DUMP", - "NLM_F_ECHO", - "NLM_F_EXCL", - "NLM_F_MATCH", - "NLM_F_MULTI", - "NLM_F_REPLACE", - "NLM_F_REQUEST", - "NLM_F_ROOT", - "NOFLSH", - "NOTE_ABSOLUTE", - "NOTE_ATTRIB", - "NOTE_BACKGROUND", - "NOTE_CHILD", - "NOTE_CRITICAL", - "NOTE_DELETE", - "NOTE_EOF", - "NOTE_EXEC", - "NOTE_EXIT", - "NOTE_EXITSTATUS", - "NOTE_EXIT_CSERROR", - "NOTE_EXIT_DECRYPTFAIL", - "NOTE_EXIT_DETAIL", - "NOTE_EXIT_DETAIL_MASK", - "NOTE_EXIT_MEMORY", - "NOTE_EXIT_REPARENTED", - "NOTE_EXTEND", - "NOTE_FFAND", - "NOTE_FFCOPY", - "NOTE_FFCTRLMASK", - "NOTE_FFLAGSMASK", - "NOTE_FFNOP", - "NOTE_FFOR", - "NOTE_FORK", - "NOTE_LEEWAY", - "NOTE_LINK", - "NOTE_LOWAT", - "NOTE_NONE", - "NOTE_NSECONDS", - "NOTE_PCTRLMASK", - "NOTE_PDATAMASK", - "NOTE_REAP", - "NOTE_RENAME", - "NOTE_RESOURCEEND", - "NOTE_REVOKE", - "NOTE_SECONDS", - "NOTE_SIGNAL", - "NOTE_TRACK", - "NOTE_TRACKERR", - "NOTE_TRIGGER", - "NOTE_TRUNCATE", - "NOTE_USECONDS", - "NOTE_VM_ERROR", - "NOTE_VM_PRESSURE", - "NOTE_VM_PRESSURE_SUDDEN_TERMINATE", - "NOTE_VM_PRESSURE_TERMINATE", - "NOTE_WRITE", - "NameCanonical", - "NameCanonicalEx", - "NameDisplay", - "NameDnsDomain", - "NameFullyQualifiedDN", - "NameSamCompatible", - "NameServicePrincipal", - "NameUniqueId", - "NameUnknown", - "NameUserPrincipal", - "Nanosleep", - "NetApiBufferFree", - "NetGetJoinInformation", - "NetSetupDomainName", - "NetSetupUnjoined", - "NetSetupUnknownStatus", - "NetSetupWorkgroupName", - "NetUserGetInfo", - "NetlinkMessage", - "NetlinkRIB", - "NetlinkRouteAttr", - "NetlinkRouteRequest", - "NewCallback", - "NewCallbackCDecl", - "NewLazyDLL", - "NlAttr", - "NlMsgerr", - "NlMsghdr", - "NsecToFiletime", - "NsecToTimespec", - "NsecToTimeval", - "Ntohs", - "OCRNL", - "OFDEL", - "OFILL", - "OFIOGETBMAP", - "OID_PKIX_KP_SERVER_AUTH", - "OID_SERVER_GATED_CRYPTO", - "OID_SGC_NETSCAPE", - "OLCUC", - "ONLCR", - "ONLRET", - "ONOCR", - "ONOEOT", - "OPEN_ALWAYS", - "OPEN_EXISTING", - "OPOST", - "O_ACCMODE", - "O_ALERT", - "O_ALT_IO", - "O_APPEND", - "O_ASYNC", - "O_CLOEXEC", - "O_CREAT", - "O_DIRECT", - "O_DIRECTORY", - "O_DP_GETRAWENCRYPTED", - "O_DSYNC", - "O_EVTONLY", - "O_EXCL", - "O_EXEC", - "O_EXLOCK", - "O_FSYNC", - "O_LARGEFILE", - "O_NDELAY", - "O_NOATIME", - "O_NOCTTY", - "O_NOFOLLOW", - "O_NONBLOCK", - "O_NOSIGPIPE", - "O_POPUP", - "O_RDONLY", - "O_RDWR", - "O_RSYNC", - "O_SHLOCK", - "O_SYMLINK", - "O_SYNC", - "O_TRUNC", - "O_TTY_INIT", - "O_WRONLY", - "Open", - "OpenCurrentProcessToken", - "OpenProcess", - "OpenProcessToken", - "Openat", - "Overlapped", - "PACKET_ADD_MEMBERSHIP", - "PACKET_BROADCAST", - "PACKET_DROP_MEMBERSHIP", - "PACKET_FASTROUTE", - "PACKET_HOST", - "PACKET_LOOPBACK", - "PACKET_MR_ALLMULTI", - "PACKET_MR_MULTICAST", - "PACKET_MR_PROMISC", - "PACKET_MULTICAST", - "PACKET_OTHERHOST", - "PACKET_OUTGOING", - "PACKET_RECV_OUTPUT", - "PACKET_RX_RING", - "PACKET_STATISTICS", - "PAGE_EXECUTE_READ", - "PAGE_EXECUTE_READWRITE", - "PAGE_EXECUTE_WRITECOPY", - "PAGE_READONLY", - "PAGE_READWRITE", - "PAGE_WRITECOPY", - "PARENB", - "PARMRK", - "PARODD", - "PENDIN", - "PFL_HIDDEN", - "PFL_MATCHES_PROTOCOL_ZERO", - "PFL_MULTIPLE_PROTO_ENTRIES", - "PFL_NETWORKDIRECT_PROVIDER", - "PFL_RECOMMENDED_PROTO_ENTRY", - "PF_FLUSH", - "PKCS_7_ASN_ENCODING", - "PMC5_PIPELINE_FLUSH", - "PRIO_PGRP", - "PRIO_PROCESS", - "PRIO_USER", - "PRI_IOFLUSH", - "PROCESS_QUERY_INFORMATION", - "PROCESS_TERMINATE", - "PROT_EXEC", - "PROT_GROWSDOWN", - "PROT_GROWSUP", - "PROT_NONE", - "PROT_READ", - "PROT_WRITE", - "PROV_DH_SCHANNEL", - "PROV_DSS", - "PROV_DSS_DH", - "PROV_EC_ECDSA_FULL", - "PROV_EC_ECDSA_SIG", - "PROV_EC_ECNRA_FULL", - "PROV_EC_ECNRA_SIG", - "PROV_FORTEZZA", - "PROV_INTEL_SEC", - "PROV_MS_EXCHANGE", - "PROV_REPLACE_OWF", - "PROV_RNG", - "PROV_RSA_AES", - "PROV_RSA_FULL", - "PROV_RSA_SCHANNEL", - "PROV_RSA_SIG", - "PROV_SPYRUS_LYNKS", - "PROV_SSL", - "PR_CAPBSET_DROP", - "PR_CAPBSET_READ", - "PR_CLEAR_SECCOMP_FILTER", - "PR_ENDIAN_BIG", - "PR_ENDIAN_LITTLE", - "PR_ENDIAN_PPC_LITTLE", - "PR_FPEMU_NOPRINT", - "PR_FPEMU_SIGFPE", - "PR_FP_EXC_ASYNC", - "PR_FP_EXC_DISABLED", - "PR_FP_EXC_DIV", - "PR_FP_EXC_INV", - "PR_FP_EXC_NONRECOV", - "PR_FP_EXC_OVF", - "PR_FP_EXC_PRECISE", - "PR_FP_EXC_RES", - "PR_FP_EXC_SW_ENABLE", - "PR_FP_EXC_UND", - "PR_GET_DUMPABLE", - "PR_GET_ENDIAN", - "PR_GET_FPEMU", - "PR_GET_FPEXC", - "PR_GET_KEEPCAPS", - "PR_GET_NAME", - "PR_GET_PDEATHSIG", - "PR_GET_SECCOMP", - "PR_GET_SECCOMP_FILTER", - "PR_GET_SECUREBITS", - "PR_GET_TIMERSLACK", - "PR_GET_TIMING", - "PR_GET_TSC", - "PR_GET_UNALIGN", - "PR_MCE_KILL", - "PR_MCE_KILL_CLEAR", - "PR_MCE_KILL_DEFAULT", - "PR_MCE_KILL_EARLY", - "PR_MCE_KILL_GET", - "PR_MCE_KILL_LATE", - "PR_MCE_KILL_SET", - "PR_SECCOMP_FILTER_EVENT", - "PR_SECCOMP_FILTER_SYSCALL", - "PR_SET_DUMPABLE", - "PR_SET_ENDIAN", - "PR_SET_FPEMU", - "PR_SET_FPEXC", - "PR_SET_KEEPCAPS", - "PR_SET_NAME", - "PR_SET_PDEATHSIG", - "PR_SET_PTRACER", - "PR_SET_SECCOMP", - "PR_SET_SECCOMP_FILTER", - "PR_SET_SECUREBITS", - "PR_SET_TIMERSLACK", - "PR_SET_TIMING", - "PR_SET_TSC", - "PR_SET_UNALIGN", - "PR_TASK_PERF_EVENTS_DISABLE", - "PR_TASK_PERF_EVENTS_ENABLE", - "PR_TIMING_STATISTICAL", - "PR_TIMING_TIMESTAMP", - "PR_TSC_ENABLE", - "PR_TSC_SIGSEGV", - "PR_UNALIGN_NOPRINT", - "PR_UNALIGN_SIGBUS", - "PTRACE_ARCH_PRCTL", - "PTRACE_ATTACH", - "PTRACE_CONT", - "PTRACE_DETACH", - "PTRACE_EVENT_CLONE", - "PTRACE_EVENT_EXEC", - "PTRACE_EVENT_EXIT", - "PTRACE_EVENT_FORK", - "PTRACE_EVENT_VFORK", - "PTRACE_EVENT_VFORK_DONE", - "PTRACE_GETCRUNCHREGS", - "PTRACE_GETEVENTMSG", - "PTRACE_GETFPREGS", - "PTRACE_GETFPXREGS", - "PTRACE_GETHBPREGS", - "PTRACE_GETREGS", - "PTRACE_GETREGSET", - "PTRACE_GETSIGINFO", - "PTRACE_GETVFPREGS", - "PTRACE_GETWMMXREGS", - "PTRACE_GET_THREAD_AREA", - "PTRACE_KILL", - "PTRACE_OLDSETOPTIONS", - "PTRACE_O_MASK", - "PTRACE_O_TRACECLONE", - "PTRACE_O_TRACEEXEC", - "PTRACE_O_TRACEEXIT", - "PTRACE_O_TRACEFORK", - "PTRACE_O_TRACESYSGOOD", - "PTRACE_O_TRACEVFORK", - "PTRACE_O_TRACEVFORKDONE", - "PTRACE_PEEKDATA", - "PTRACE_PEEKTEXT", - "PTRACE_PEEKUSR", - "PTRACE_POKEDATA", - "PTRACE_POKETEXT", - "PTRACE_POKEUSR", - "PTRACE_SETCRUNCHREGS", - "PTRACE_SETFPREGS", - "PTRACE_SETFPXREGS", - "PTRACE_SETHBPREGS", - "PTRACE_SETOPTIONS", - "PTRACE_SETREGS", - "PTRACE_SETREGSET", - "PTRACE_SETSIGINFO", - "PTRACE_SETVFPREGS", - "PTRACE_SETWMMXREGS", - "PTRACE_SET_SYSCALL", - "PTRACE_SET_THREAD_AREA", - "PTRACE_SINGLEBLOCK", - "PTRACE_SINGLESTEP", - "PTRACE_SYSCALL", - "PTRACE_SYSEMU", - "PTRACE_SYSEMU_SINGLESTEP", - "PTRACE_TRACEME", - "PT_ATTACH", - "PT_ATTACHEXC", - "PT_CONTINUE", - "PT_DATA_ADDR", - "PT_DENY_ATTACH", - "PT_DETACH", - "PT_FIRSTMACH", - "PT_FORCEQUOTA", - "PT_KILL", - "PT_MASK", - "PT_READ_D", - "PT_READ_I", - "PT_READ_U", - "PT_SIGEXC", - "PT_STEP", - "PT_TEXT_ADDR", - "PT_TEXT_END_ADDR", - "PT_THUPDATE", - "PT_TRACE_ME", - "PT_WRITE_D", - "PT_WRITE_I", - "PT_WRITE_U", - "ParseDirent", - "ParseNetlinkMessage", - "ParseNetlinkRouteAttr", - "ParseRoutingMessage", - "ParseRoutingSockaddr", - "ParseSocketControlMessage", - "ParseUnixCredentials", - "ParseUnixRights", - "PathMax", - "Pathconf", - "Pause", - "Pipe", - "Pipe2", - "PivotRoot", - "Pointer", - "PostQueuedCompletionStatus", - "Pread", - "Proc", - "ProcAttr", - "Process32First", - "Process32Next", - "ProcessEntry32", - "ProcessInformation", - "Protoent", - "PtraceAttach", - "PtraceCont", - "PtraceDetach", - "PtraceGetEventMsg", - "PtraceGetRegs", - "PtracePeekData", - "PtracePeekText", - "PtracePokeData", - "PtracePokeText", - "PtraceRegs", - "PtraceSetOptions", - "PtraceSetRegs", - "PtraceSingleStep", - "PtraceSyscall", - "Pwrite", - "REG_BINARY", - "REG_DWORD", - "REG_DWORD_BIG_ENDIAN", - "REG_DWORD_LITTLE_ENDIAN", - "REG_EXPAND_SZ", - "REG_FULL_RESOURCE_DESCRIPTOR", - "REG_LINK", - "REG_MULTI_SZ", - "REG_NONE", - "REG_QWORD", - "REG_QWORD_LITTLE_ENDIAN", - "REG_RESOURCE_LIST", - "REG_RESOURCE_REQUIREMENTS_LIST", - "REG_SZ", - "RLIMIT_AS", - "RLIMIT_CORE", - "RLIMIT_CPU", - "RLIMIT_CPU_USAGE_MONITOR", - "RLIMIT_DATA", - "RLIMIT_FSIZE", - "RLIMIT_NOFILE", - "RLIMIT_STACK", - "RLIM_INFINITY", - "RTAX_ADVMSS", - "RTAX_AUTHOR", - "RTAX_BRD", - "RTAX_CWND", - "RTAX_DST", - "RTAX_FEATURES", - "RTAX_FEATURE_ALLFRAG", - "RTAX_FEATURE_ECN", - "RTAX_FEATURE_SACK", - "RTAX_FEATURE_TIMESTAMP", - "RTAX_GATEWAY", - "RTAX_GENMASK", - "RTAX_HOPLIMIT", - "RTAX_IFA", - "RTAX_IFP", - "RTAX_INITCWND", - "RTAX_INITRWND", - "RTAX_LABEL", - "RTAX_LOCK", - "RTAX_MAX", - "RTAX_MTU", - "RTAX_NETMASK", - "RTAX_REORDERING", - "RTAX_RTO_MIN", - "RTAX_RTT", - "RTAX_RTTVAR", - "RTAX_SRC", - "RTAX_SRCMASK", - "RTAX_SSTHRESH", - "RTAX_TAG", - "RTAX_UNSPEC", - "RTAX_WINDOW", - "RTA_ALIGNTO", - "RTA_AUTHOR", - "RTA_BRD", - "RTA_CACHEINFO", - "RTA_DST", - "RTA_FLOW", - "RTA_GATEWAY", - "RTA_GENMASK", - "RTA_IFA", - "RTA_IFP", - "RTA_IIF", - "RTA_LABEL", - "RTA_MAX", - "RTA_METRICS", - "RTA_MULTIPATH", - "RTA_NETMASK", - "RTA_OIF", - "RTA_PREFSRC", - "RTA_PRIORITY", - "RTA_SRC", - "RTA_SRCMASK", - "RTA_TABLE", - "RTA_TAG", - "RTA_UNSPEC", - "RTCF_DIRECTSRC", - "RTCF_DOREDIRECT", - "RTCF_LOG", - "RTCF_MASQ", - "RTCF_NAT", - "RTCF_VALVE", - "RTF_ADDRCLASSMASK", - "RTF_ADDRCONF", - "RTF_ALLONLINK", - "RTF_ANNOUNCE", - "RTF_BLACKHOLE", - "RTF_BROADCAST", - "RTF_CACHE", - "RTF_CLONED", - "RTF_CLONING", - "RTF_CONDEMNED", - "RTF_DEFAULT", - "RTF_DELCLONE", - "RTF_DONE", - "RTF_DYNAMIC", - "RTF_FLOW", - "RTF_FMASK", - "RTF_GATEWAY", - "RTF_GWFLAG_COMPAT", - "RTF_HOST", - "RTF_IFREF", - "RTF_IFSCOPE", - "RTF_INTERFACE", - "RTF_IRTT", - "RTF_LINKRT", - "RTF_LLDATA", - "RTF_LLINFO", - "RTF_LOCAL", - "RTF_MASK", - "RTF_MODIFIED", - "RTF_MPATH", - "RTF_MPLS", - "RTF_MSS", - "RTF_MTU", - "RTF_MULTICAST", - "RTF_NAT", - "RTF_NOFORWARD", - "RTF_NONEXTHOP", - "RTF_NOPMTUDISC", - "RTF_PERMANENT_ARP", - "RTF_PINNED", - "RTF_POLICY", - "RTF_PRCLONING", - "RTF_PROTO1", - "RTF_PROTO2", - "RTF_PROTO3", - "RTF_PROXY", - "RTF_REINSTATE", - "RTF_REJECT", - "RTF_RNH_LOCKED", - "RTF_ROUTER", - "RTF_SOURCE", - "RTF_SRC", - "RTF_STATIC", - "RTF_STICKY", - "RTF_THROW", - "RTF_TUNNEL", - "RTF_UP", - "RTF_USETRAILERS", - "RTF_WASCLONED", - "RTF_WINDOW", - "RTF_XRESOLVE", - "RTM_ADD", - "RTM_BASE", - "RTM_CHANGE", - "RTM_CHGADDR", - "RTM_DELACTION", - "RTM_DELADDR", - "RTM_DELADDRLABEL", - "RTM_DELETE", - "RTM_DELLINK", - "RTM_DELMADDR", - "RTM_DELNEIGH", - "RTM_DELQDISC", - "RTM_DELROUTE", - "RTM_DELRULE", - "RTM_DELTCLASS", - "RTM_DELTFILTER", - "RTM_DESYNC", - "RTM_F_CLONED", - "RTM_F_EQUALIZE", - "RTM_F_NOTIFY", - "RTM_F_PREFIX", - "RTM_GET", - "RTM_GET2", - "RTM_GETACTION", - "RTM_GETADDR", - "RTM_GETADDRLABEL", - "RTM_GETANYCAST", - "RTM_GETDCB", - "RTM_GETLINK", - "RTM_GETMULTICAST", - "RTM_GETNEIGH", - "RTM_GETNEIGHTBL", - "RTM_GETQDISC", - "RTM_GETROUTE", - "RTM_GETRULE", - "RTM_GETTCLASS", - "RTM_GETTFILTER", - "RTM_IEEE80211", - "RTM_IFANNOUNCE", - "RTM_IFINFO", - "RTM_IFINFO2", - "RTM_LLINFO_UPD", - "RTM_LOCK", - "RTM_LOSING", - "RTM_MAX", - "RTM_MAXSIZE", - "RTM_MISS", - "RTM_NEWACTION", - "RTM_NEWADDR", - "RTM_NEWADDRLABEL", - "RTM_NEWLINK", - "RTM_NEWMADDR", - "RTM_NEWMADDR2", - "RTM_NEWNDUSEROPT", - "RTM_NEWNEIGH", - "RTM_NEWNEIGHTBL", - "RTM_NEWPREFIX", - "RTM_NEWQDISC", - "RTM_NEWROUTE", - "RTM_NEWRULE", - "RTM_NEWTCLASS", - "RTM_NEWTFILTER", - "RTM_NR_FAMILIES", - "RTM_NR_MSGTYPES", - "RTM_OIFINFO", - "RTM_OLDADD", - "RTM_OLDDEL", - "RTM_OOIFINFO", - "RTM_REDIRECT", - "RTM_RESOLVE", - "RTM_RTTUNIT", - "RTM_SETDCB", - "RTM_SETGATE", - "RTM_SETLINK", - "RTM_SETNEIGHTBL", - "RTM_VERSION", - "RTNH_ALIGNTO", - "RTNH_F_DEAD", - "RTNH_F_ONLINK", - "RTNH_F_PERVASIVE", - "RTNLGRP_IPV4_IFADDR", - "RTNLGRP_IPV4_MROUTE", - "RTNLGRP_IPV4_ROUTE", - "RTNLGRP_IPV4_RULE", - "RTNLGRP_IPV6_IFADDR", - "RTNLGRP_IPV6_IFINFO", - "RTNLGRP_IPV6_MROUTE", - "RTNLGRP_IPV6_PREFIX", - "RTNLGRP_IPV6_ROUTE", - "RTNLGRP_IPV6_RULE", - "RTNLGRP_LINK", - "RTNLGRP_ND_USEROPT", - "RTNLGRP_NEIGH", - "RTNLGRP_NONE", - "RTNLGRP_NOTIFY", - "RTNLGRP_TC", - "RTN_ANYCAST", - "RTN_BLACKHOLE", - "RTN_BROADCAST", - "RTN_LOCAL", - "RTN_MAX", - "RTN_MULTICAST", - "RTN_NAT", - "RTN_PROHIBIT", - "RTN_THROW", - "RTN_UNICAST", - "RTN_UNREACHABLE", - "RTN_UNSPEC", - "RTN_XRESOLVE", - "RTPROT_BIRD", - "RTPROT_BOOT", - "RTPROT_DHCP", - "RTPROT_DNROUTED", - "RTPROT_GATED", - "RTPROT_KERNEL", - "RTPROT_MRT", - "RTPROT_NTK", - "RTPROT_RA", - "RTPROT_REDIRECT", - "RTPROT_STATIC", - "RTPROT_UNSPEC", - "RTPROT_XORP", - "RTPROT_ZEBRA", - "RTV_EXPIRE", - "RTV_HOPCOUNT", - "RTV_MTU", - "RTV_RPIPE", - "RTV_RTT", - "RTV_RTTVAR", - "RTV_SPIPE", - "RTV_SSTHRESH", - "RTV_WEIGHT", - "RT_CACHING_CONTEXT", - "RT_CLASS_DEFAULT", - "RT_CLASS_LOCAL", - "RT_CLASS_MAIN", - "RT_CLASS_MAX", - "RT_CLASS_UNSPEC", - "RT_DEFAULT_FIB", - "RT_NORTREF", - "RT_SCOPE_HOST", - "RT_SCOPE_LINK", - "RT_SCOPE_NOWHERE", - "RT_SCOPE_SITE", - "RT_SCOPE_UNIVERSE", - "RT_TABLEID_MAX", - "RT_TABLE_COMPAT", - "RT_TABLE_DEFAULT", - "RT_TABLE_LOCAL", - "RT_TABLE_MAIN", - "RT_TABLE_MAX", - "RT_TABLE_UNSPEC", - "RUSAGE_CHILDREN", - "RUSAGE_SELF", - "RUSAGE_THREAD", - "Radvisory_t", - "RawConn", - "RawSockaddr", - "RawSockaddrAny", - "RawSockaddrDatalink", - "RawSockaddrInet4", - "RawSockaddrInet6", - "RawSockaddrLinklayer", - "RawSockaddrNetlink", - "RawSockaddrUnix", - "RawSyscall", - "RawSyscall6", - "Read", - "ReadConsole", - "ReadDirectoryChanges", - "ReadDirent", - "ReadFile", - "Readlink", - "Reboot", - "Recvfrom", - "Recvmsg", - "RegCloseKey", - "RegEnumKeyEx", - "RegOpenKeyEx", - "RegQueryInfoKey", - "RegQueryValueEx", - "RemoveDirectory", - "Removexattr", - "Rename", - "Renameat", - "Revoke", - "Rlimit", - "Rmdir", - "RouteMessage", - "RouteRIB", - "RoutingMessage", - "RtAttr", - "RtGenmsg", - "RtMetrics", - "RtMsg", - "RtMsghdr", - "RtNexthop", - "Rusage", - "SCM_BINTIME", - "SCM_CREDENTIALS", - "SCM_CREDS", - "SCM_RIGHTS", - "SCM_TIMESTAMP", - "SCM_TIMESTAMPING", - "SCM_TIMESTAMPNS", - "SCM_TIMESTAMP_MONOTONIC", - "SHUT_RD", - "SHUT_RDWR", - "SHUT_WR", - "SID", - "SIDAndAttributes", - "SIGABRT", - "SIGALRM", - "SIGBUS", - "SIGCHLD", - "SIGCLD", - "SIGCONT", - "SIGEMT", - "SIGFPE", - "SIGHUP", - "SIGILL", - "SIGINFO", - "SIGINT", - "SIGIO", - "SIGIOT", - "SIGKILL", - "SIGLIBRT", - "SIGLWP", - "SIGPIPE", - "SIGPOLL", - "SIGPROF", - "SIGPWR", - "SIGQUIT", - "SIGSEGV", - "SIGSTKFLT", - "SIGSTOP", - "SIGSYS", - "SIGTERM", - "SIGTHR", - "SIGTRAP", - "SIGTSTP", - "SIGTTIN", - "SIGTTOU", - "SIGUNUSED", - "SIGURG", - "SIGUSR1", - "SIGUSR2", - "SIGVTALRM", - "SIGWINCH", - "SIGXCPU", - "SIGXFSZ", - "SIOCADDDLCI", - "SIOCADDMULTI", - "SIOCADDRT", - "SIOCAIFADDR", - "SIOCAIFGROUP", - "SIOCALIFADDR", - "SIOCARPIPLL", - "SIOCATMARK", - "SIOCAUTOADDR", - "SIOCAUTONETMASK", - "SIOCBRDGADD", - "SIOCBRDGADDS", - "SIOCBRDGARL", - "SIOCBRDGDADDR", - "SIOCBRDGDEL", - "SIOCBRDGDELS", - "SIOCBRDGFLUSH", - "SIOCBRDGFRL", - "SIOCBRDGGCACHE", - "SIOCBRDGGFD", - "SIOCBRDGGHT", - "SIOCBRDGGIFFLGS", - "SIOCBRDGGMA", - "SIOCBRDGGPARAM", - "SIOCBRDGGPRI", - "SIOCBRDGGRL", - "SIOCBRDGGSIFS", - "SIOCBRDGGTO", - "SIOCBRDGIFS", - "SIOCBRDGRTS", - "SIOCBRDGSADDR", - "SIOCBRDGSCACHE", - "SIOCBRDGSFD", - "SIOCBRDGSHT", - "SIOCBRDGSIFCOST", - "SIOCBRDGSIFFLGS", - "SIOCBRDGSIFPRIO", - "SIOCBRDGSMA", - "SIOCBRDGSPRI", - "SIOCBRDGSPROTO", - "SIOCBRDGSTO", - "SIOCBRDGSTXHC", - "SIOCDARP", - "SIOCDELDLCI", - "SIOCDELMULTI", - "SIOCDELRT", - "SIOCDEVPRIVATE", - "SIOCDIFADDR", - "SIOCDIFGROUP", - "SIOCDIFPHYADDR", - "SIOCDLIFADDR", - "SIOCDRARP", - "SIOCGARP", - "SIOCGDRVSPEC", - "SIOCGETKALIVE", - "SIOCGETLABEL", - "SIOCGETPFLOW", - "SIOCGETPFSYNC", - "SIOCGETSGCNT", - "SIOCGETVIFCNT", - "SIOCGETVLAN", - "SIOCGHIWAT", - "SIOCGIFADDR", - "SIOCGIFADDRPREF", - "SIOCGIFALIAS", - "SIOCGIFALTMTU", - "SIOCGIFASYNCMAP", - "SIOCGIFBOND", - "SIOCGIFBR", - "SIOCGIFBRDADDR", - "SIOCGIFCAP", - "SIOCGIFCONF", - "SIOCGIFCOUNT", - "SIOCGIFDATA", - "SIOCGIFDESCR", - "SIOCGIFDEVMTU", - "SIOCGIFDLT", - "SIOCGIFDSTADDR", - "SIOCGIFENCAP", - "SIOCGIFFIB", - "SIOCGIFFLAGS", - "SIOCGIFGATTR", - "SIOCGIFGENERIC", - "SIOCGIFGMEMB", - "SIOCGIFGROUP", - "SIOCGIFHARDMTU", - "SIOCGIFHWADDR", - "SIOCGIFINDEX", - "SIOCGIFKPI", - "SIOCGIFMAC", - "SIOCGIFMAP", - "SIOCGIFMEDIA", - "SIOCGIFMEM", - "SIOCGIFMETRIC", - "SIOCGIFMTU", - "SIOCGIFNAME", - "SIOCGIFNETMASK", - "SIOCGIFPDSTADDR", - "SIOCGIFPFLAGS", - "SIOCGIFPHYS", - "SIOCGIFPRIORITY", - "SIOCGIFPSRCADDR", - "SIOCGIFRDOMAIN", - "SIOCGIFRTLABEL", - "SIOCGIFSLAVE", - "SIOCGIFSTATUS", - "SIOCGIFTIMESLOT", - "SIOCGIFTXQLEN", - "SIOCGIFVLAN", - "SIOCGIFWAKEFLAGS", - "SIOCGIFXFLAGS", - "SIOCGLIFADDR", - "SIOCGLIFPHYADDR", - "SIOCGLIFPHYRTABLE", - "SIOCGLIFPHYTTL", - "SIOCGLINKSTR", - "SIOCGLOWAT", - "SIOCGPGRP", - "SIOCGPRIVATE_0", - "SIOCGPRIVATE_1", - "SIOCGRARP", - "SIOCGSPPPPARAMS", - "SIOCGSTAMP", - "SIOCGSTAMPNS", - "SIOCGVH", - "SIOCGVNETID", - "SIOCIFCREATE", - "SIOCIFCREATE2", - "SIOCIFDESTROY", - "SIOCIFGCLONERS", - "SIOCINITIFADDR", - "SIOCPROTOPRIVATE", - "SIOCRSLVMULTI", - "SIOCRTMSG", - "SIOCSARP", - "SIOCSDRVSPEC", - "SIOCSETKALIVE", - "SIOCSETLABEL", - "SIOCSETPFLOW", - "SIOCSETPFSYNC", - "SIOCSETVLAN", - "SIOCSHIWAT", - "SIOCSIFADDR", - "SIOCSIFADDRPREF", - "SIOCSIFALTMTU", - "SIOCSIFASYNCMAP", - "SIOCSIFBOND", - "SIOCSIFBR", - "SIOCSIFBRDADDR", - "SIOCSIFCAP", - "SIOCSIFDESCR", - "SIOCSIFDSTADDR", - "SIOCSIFENCAP", - "SIOCSIFFIB", - "SIOCSIFFLAGS", - "SIOCSIFGATTR", - "SIOCSIFGENERIC", - "SIOCSIFHWADDR", - "SIOCSIFHWBROADCAST", - "SIOCSIFKPI", - "SIOCSIFLINK", - "SIOCSIFLLADDR", - "SIOCSIFMAC", - "SIOCSIFMAP", - "SIOCSIFMEDIA", - "SIOCSIFMEM", - "SIOCSIFMETRIC", - "SIOCSIFMTU", - "SIOCSIFNAME", - "SIOCSIFNETMASK", - "SIOCSIFPFLAGS", - "SIOCSIFPHYADDR", - "SIOCSIFPHYS", - "SIOCSIFPRIORITY", - "SIOCSIFRDOMAIN", - "SIOCSIFRTLABEL", - "SIOCSIFRVNET", - "SIOCSIFSLAVE", - "SIOCSIFTIMESLOT", - "SIOCSIFTXQLEN", - "SIOCSIFVLAN", - "SIOCSIFVNET", - "SIOCSIFXFLAGS", - "SIOCSLIFPHYADDR", - "SIOCSLIFPHYRTABLE", - "SIOCSLIFPHYTTL", - "SIOCSLINKSTR", - "SIOCSLOWAT", - "SIOCSPGRP", - "SIOCSRARP", - "SIOCSSPPPPARAMS", - "SIOCSVH", - "SIOCSVNETID", - "SIOCZIFDATA", - "SIO_GET_EXTENSION_FUNCTION_POINTER", - "SIO_GET_INTERFACE_LIST", - "SIO_KEEPALIVE_VALS", - "SIO_UDP_CONNRESET", - "SOCK_CLOEXEC", - "SOCK_DCCP", - "SOCK_DGRAM", - "SOCK_FLAGS_MASK", - "SOCK_MAXADDRLEN", - "SOCK_NONBLOCK", - "SOCK_NOSIGPIPE", - "SOCK_PACKET", - "SOCK_RAW", - "SOCK_RDM", - "SOCK_SEQPACKET", - "SOCK_STREAM", - "SOL_AAL", - "SOL_ATM", - "SOL_DECNET", - "SOL_ICMPV6", - "SOL_IP", - "SOL_IPV6", - "SOL_IRDA", - "SOL_PACKET", - "SOL_RAW", - "SOL_SOCKET", - "SOL_TCP", - "SOL_X25", - "SOMAXCONN", - "SO_ACCEPTCONN", - "SO_ACCEPTFILTER", - "SO_ATTACH_FILTER", - "SO_BINDANY", - "SO_BINDTODEVICE", - "SO_BINTIME", - "SO_BROADCAST", - "SO_BSDCOMPAT", - "SO_DEBUG", - "SO_DETACH_FILTER", - "SO_DOMAIN", - "SO_DONTROUTE", - "SO_DONTTRUNC", - "SO_ERROR", - "SO_KEEPALIVE", - "SO_LABEL", - "SO_LINGER", - "SO_LINGER_SEC", - "SO_LISTENINCQLEN", - "SO_LISTENQLEN", - "SO_LISTENQLIMIT", - "SO_MARK", - "SO_NETPROC", - "SO_NKE", - "SO_NOADDRERR", - "SO_NOHEADER", - "SO_NOSIGPIPE", - "SO_NOTIFYCONFLICT", - "SO_NO_CHECK", - "SO_NO_DDP", - "SO_NO_OFFLOAD", - "SO_NP_EXTENSIONS", - "SO_NREAD", - "SO_NUMRCVPKT", - "SO_NWRITE", - "SO_OOBINLINE", - "SO_OVERFLOWED", - "SO_PASSCRED", - "SO_PASSSEC", - "SO_PEERCRED", - "SO_PEERLABEL", - "SO_PEERNAME", - "SO_PEERSEC", - "SO_PRIORITY", - "SO_PROTOCOL", - "SO_PROTOTYPE", - "SO_RANDOMPORT", - "SO_RCVBUF", - "SO_RCVBUFFORCE", - "SO_RCVLOWAT", - "SO_RCVTIMEO", - "SO_RESTRICTIONS", - "SO_RESTRICT_DENYIN", - "SO_RESTRICT_DENYOUT", - "SO_RESTRICT_DENYSET", - "SO_REUSEADDR", - "SO_REUSEPORT", - "SO_REUSESHAREUID", - "SO_RTABLE", - "SO_RXQ_OVFL", - "SO_SECURITY_AUTHENTICATION", - "SO_SECURITY_ENCRYPTION_NETWORK", - "SO_SECURITY_ENCRYPTION_TRANSPORT", - "SO_SETFIB", - "SO_SNDBUF", - "SO_SNDBUFFORCE", - "SO_SNDLOWAT", - "SO_SNDTIMEO", - "SO_SPLICE", - "SO_TIMESTAMP", - "SO_TIMESTAMPING", - "SO_TIMESTAMPNS", - "SO_TIMESTAMP_MONOTONIC", - "SO_TYPE", - "SO_UPCALLCLOSEWAIT", - "SO_UPDATE_ACCEPT_CONTEXT", - "SO_UPDATE_CONNECT_CONTEXT", - "SO_USELOOPBACK", - "SO_USER_COOKIE", - "SO_VENDOR", - "SO_WANTMORE", - "SO_WANTOOBFLAG", - "SSLExtraCertChainPolicyPara", - "STANDARD_RIGHTS_ALL", - "STANDARD_RIGHTS_EXECUTE", - "STANDARD_RIGHTS_READ", - "STANDARD_RIGHTS_REQUIRED", - "STANDARD_RIGHTS_WRITE", - "STARTF_USESHOWWINDOW", - "STARTF_USESTDHANDLES", - "STD_ERROR_HANDLE", - "STD_INPUT_HANDLE", - "STD_OUTPUT_HANDLE", - "SUBLANG_ENGLISH_US", - "SW_FORCEMINIMIZE", - "SW_HIDE", - "SW_MAXIMIZE", - "SW_MINIMIZE", - "SW_NORMAL", - "SW_RESTORE", - "SW_SHOW", - "SW_SHOWDEFAULT", - "SW_SHOWMAXIMIZED", - "SW_SHOWMINIMIZED", - "SW_SHOWMINNOACTIVE", - "SW_SHOWNA", - "SW_SHOWNOACTIVATE", - "SW_SHOWNORMAL", - "SYMBOLIC_LINK_FLAG_DIRECTORY", - "SYNCHRONIZE", - "SYSCTL_VERSION", - "SYSCTL_VERS_0", - "SYSCTL_VERS_1", - "SYSCTL_VERS_MASK", - "SYS_ABORT2", - "SYS_ACCEPT", - "SYS_ACCEPT4", - "SYS_ACCEPT_NOCANCEL", - "SYS_ACCESS", - "SYS_ACCESS_EXTENDED", - "SYS_ACCT", - "SYS_ADD_KEY", - "SYS_ADD_PROFIL", - "SYS_ADJFREQ", - "SYS_ADJTIME", - "SYS_ADJTIMEX", - "SYS_AFS_SYSCALL", - "SYS_AIO_CANCEL", - "SYS_AIO_ERROR", - "SYS_AIO_FSYNC", - "SYS_AIO_MLOCK", - "SYS_AIO_READ", - "SYS_AIO_RETURN", - "SYS_AIO_SUSPEND", - "SYS_AIO_SUSPEND_NOCANCEL", - "SYS_AIO_WAITCOMPLETE", - "SYS_AIO_WRITE", - "SYS_ALARM", - "SYS_ARCH_PRCTL", - "SYS_ARM_FADVISE64_64", - "SYS_ARM_SYNC_FILE_RANGE", - "SYS_ATGETMSG", - "SYS_ATPGETREQ", - "SYS_ATPGETRSP", - "SYS_ATPSNDREQ", - "SYS_ATPSNDRSP", - "SYS_ATPUTMSG", - "SYS_ATSOCKET", - "SYS_AUDIT", - "SYS_AUDITCTL", - "SYS_AUDITON", - "SYS_AUDIT_SESSION_JOIN", - "SYS_AUDIT_SESSION_PORT", - "SYS_AUDIT_SESSION_SELF", - "SYS_BDFLUSH", - "SYS_BIND", - "SYS_BINDAT", - "SYS_BREAK", - "SYS_BRK", - "SYS_BSDTHREAD_CREATE", - "SYS_BSDTHREAD_REGISTER", - "SYS_BSDTHREAD_TERMINATE", - "SYS_CAPGET", - "SYS_CAPSET", - "SYS_CAP_ENTER", - "SYS_CAP_FCNTLS_GET", - "SYS_CAP_FCNTLS_LIMIT", - "SYS_CAP_GETMODE", - "SYS_CAP_GETRIGHTS", - "SYS_CAP_IOCTLS_GET", - "SYS_CAP_IOCTLS_LIMIT", - "SYS_CAP_NEW", - "SYS_CAP_RIGHTS_GET", - "SYS_CAP_RIGHTS_LIMIT", - "SYS_CHDIR", - "SYS_CHFLAGS", - "SYS_CHFLAGSAT", - "SYS_CHMOD", - "SYS_CHMOD_EXTENDED", - "SYS_CHOWN", - "SYS_CHOWN32", - "SYS_CHROOT", - "SYS_CHUD", - "SYS_CLOCK_ADJTIME", - "SYS_CLOCK_GETCPUCLOCKID2", - "SYS_CLOCK_GETRES", - "SYS_CLOCK_GETTIME", - "SYS_CLOCK_NANOSLEEP", - "SYS_CLOCK_SETTIME", - "SYS_CLONE", - "SYS_CLOSE", - "SYS_CLOSEFROM", - "SYS_CLOSE_NOCANCEL", - "SYS_CONNECT", - "SYS_CONNECTAT", - "SYS_CONNECT_NOCANCEL", - "SYS_COPYFILE", - "SYS_CPUSET", - "SYS_CPUSET_GETAFFINITY", - "SYS_CPUSET_GETID", - "SYS_CPUSET_SETAFFINITY", - "SYS_CPUSET_SETID", - "SYS_CREAT", - "SYS_CREATE_MODULE", - "SYS_CSOPS", - "SYS_CSOPS_AUDITTOKEN", - "SYS_DELETE", - "SYS_DELETE_MODULE", - "SYS_DUP", - "SYS_DUP2", - "SYS_DUP3", - "SYS_EACCESS", - "SYS_EPOLL_CREATE", - "SYS_EPOLL_CREATE1", - "SYS_EPOLL_CTL", - "SYS_EPOLL_CTL_OLD", - "SYS_EPOLL_PWAIT", - "SYS_EPOLL_WAIT", - "SYS_EPOLL_WAIT_OLD", - "SYS_EVENTFD", - "SYS_EVENTFD2", - "SYS_EXCHANGEDATA", - "SYS_EXECVE", - "SYS_EXIT", - "SYS_EXIT_GROUP", - "SYS_EXTATTRCTL", - "SYS_EXTATTR_DELETE_FD", - "SYS_EXTATTR_DELETE_FILE", - "SYS_EXTATTR_DELETE_LINK", - "SYS_EXTATTR_GET_FD", - "SYS_EXTATTR_GET_FILE", - "SYS_EXTATTR_GET_LINK", - "SYS_EXTATTR_LIST_FD", - "SYS_EXTATTR_LIST_FILE", - "SYS_EXTATTR_LIST_LINK", - "SYS_EXTATTR_SET_FD", - "SYS_EXTATTR_SET_FILE", - "SYS_EXTATTR_SET_LINK", - "SYS_FACCESSAT", - "SYS_FADVISE64", - "SYS_FADVISE64_64", - "SYS_FALLOCATE", - "SYS_FANOTIFY_INIT", - "SYS_FANOTIFY_MARK", - "SYS_FCHDIR", - "SYS_FCHFLAGS", - "SYS_FCHMOD", - "SYS_FCHMODAT", - "SYS_FCHMOD_EXTENDED", - "SYS_FCHOWN", - "SYS_FCHOWN32", - "SYS_FCHOWNAT", - "SYS_FCHROOT", - "SYS_FCNTL", - "SYS_FCNTL64", - "SYS_FCNTL_NOCANCEL", - "SYS_FDATASYNC", - "SYS_FEXECVE", - "SYS_FFCLOCK_GETCOUNTER", - "SYS_FFCLOCK_GETESTIMATE", - "SYS_FFCLOCK_SETESTIMATE", - "SYS_FFSCTL", - "SYS_FGETATTRLIST", - "SYS_FGETXATTR", - "SYS_FHOPEN", - "SYS_FHSTAT", - "SYS_FHSTATFS", - "SYS_FILEPORT_MAKEFD", - "SYS_FILEPORT_MAKEPORT", - "SYS_FKTRACE", - "SYS_FLISTXATTR", - "SYS_FLOCK", - "SYS_FORK", - "SYS_FPATHCONF", - "SYS_FREEBSD6_FTRUNCATE", - "SYS_FREEBSD6_LSEEK", - "SYS_FREEBSD6_MMAP", - "SYS_FREEBSD6_PREAD", - "SYS_FREEBSD6_PWRITE", - "SYS_FREEBSD6_TRUNCATE", - "SYS_FREMOVEXATTR", - "SYS_FSCTL", - "SYS_FSETATTRLIST", - "SYS_FSETXATTR", - "SYS_FSGETPATH", - "SYS_FSTAT", - "SYS_FSTAT64", - "SYS_FSTAT64_EXTENDED", - "SYS_FSTATAT", - "SYS_FSTATAT64", - "SYS_FSTATFS", - "SYS_FSTATFS64", - "SYS_FSTATV", - "SYS_FSTATVFS1", - "SYS_FSTAT_EXTENDED", - "SYS_FSYNC", - "SYS_FSYNC_NOCANCEL", - "SYS_FSYNC_RANGE", - "SYS_FTIME", - "SYS_FTRUNCATE", - "SYS_FTRUNCATE64", - "SYS_FUTEX", - "SYS_FUTIMENS", - "SYS_FUTIMES", - "SYS_FUTIMESAT", - "SYS_GETATTRLIST", - "SYS_GETAUDIT", - "SYS_GETAUDIT_ADDR", - "SYS_GETAUID", - "SYS_GETCONTEXT", - "SYS_GETCPU", - "SYS_GETCWD", - "SYS_GETDENTS", - "SYS_GETDENTS64", - "SYS_GETDIRENTRIES", - "SYS_GETDIRENTRIES64", - "SYS_GETDIRENTRIESATTR", - "SYS_GETDTABLECOUNT", - "SYS_GETDTABLESIZE", - "SYS_GETEGID", - "SYS_GETEGID32", - "SYS_GETEUID", - "SYS_GETEUID32", - "SYS_GETFH", - "SYS_GETFSSTAT", - "SYS_GETFSSTAT64", - "SYS_GETGID", - "SYS_GETGID32", - "SYS_GETGROUPS", - "SYS_GETGROUPS32", - "SYS_GETHOSTUUID", - "SYS_GETITIMER", - "SYS_GETLCID", - "SYS_GETLOGIN", - "SYS_GETLOGINCLASS", - "SYS_GETPEERNAME", - "SYS_GETPGID", - "SYS_GETPGRP", - "SYS_GETPID", - "SYS_GETPMSG", - "SYS_GETPPID", - "SYS_GETPRIORITY", - "SYS_GETRESGID", - "SYS_GETRESGID32", - "SYS_GETRESUID", - "SYS_GETRESUID32", - "SYS_GETRLIMIT", - "SYS_GETRTABLE", - "SYS_GETRUSAGE", - "SYS_GETSGROUPS", - "SYS_GETSID", - "SYS_GETSOCKNAME", - "SYS_GETSOCKOPT", - "SYS_GETTHRID", - "SYS_GETTID", - "SYS_GETTIMEOFDAY", - "SYS_GETUID", - "SYS_GETUID32", - "SYS_GETVFSSTAT", - "SYS_GETWGROUPS", - "SYS_GETXATTR", - "SYS_GET_KERNEL_SYMS", - "SYS_GET_MEMPOLICY", - "SYS_GET_ROBUST_LIST", - "SYS_GET_THREAD_AREA", - "SYS_GSSD_SYSCALL", - "SYS_GTTY", - "SYS_IDENTITYSVC", - "SYS_IDLE", - "SYS_INITGROUPS", - "SYS_INIT_MODULE", - "SYS_INOTIFY_ADD_WATCH", - "SYS_INOTIFY_INIT", - "SYS_INOTIFY_INIT1", - "SYS_INOTIFY_RM_WATCH", - "SYS_IOCTL", - "SYS_IOPERM", - "SYS_IOPL", - "SYS_IOPOLICYSYS", - "SYS_IOPRIO_GET", - "SYS_IOPRIO_SET", - "SYS_IO_CANCEL", - "SYS_IO_DESTROY", - "SYS_IO_GETEVENTS", - "SYS_IO_SETUP", - "SYS_IO_SUBMIT", - "SYS_IPC", - "SYS_ISSETUGID", - "SYS_JAIL", - "SYS_JAIL_ATTACH", - "SYS_JAIL_GET", - "SYS_JAIL_REMOVE", - "SYS_JAIL_SET", - "SYS_KAS_INFO", - "SYS_KDEBUG_TRACE", - "SYS_KENV", - "SYS_KEVENT", - "SYS_KEVENT64", - "SYS_KEXEC_LOAD", - "SYS_KEYCTL", - "SYS_KILL", - "SYS_KLDFIND", - "SYS_KLDFIRSTMOD", - "SYS_KLDLOAD", - "SYS_KLDNEXT", - "SYS_KLDSTAT", - "SYS_KLDSYM", - "SYS_KLDUNLOAD", - "SYS_KLDUNLOADF", - "SYS_KMQ_NOTIFY", - "SYS_KMQ_OPEN", - "SYS_KMQ_SETATTR", - "SYS_KMQ_TIMEDRECEIVE", - "SYS_KMQ_TIMEDSEND", - "SYS_KMQ_UNLINK", - "SYS_KQUEUE", - "SYS_KQUEUE1", - "SYS_KSEM_CLOSE", - "SYS_KSEM_DESTROY", - "SYS_KSEM_GETVALUE", - "SYS_KSEM_INIT", - "SYS_KSEM_OPEN", - "SYS_KSEM_POST", - "SYS_KSEM_TIMEDWAIT", - "SYS_KSEM_TRYWAIT", - "SYS_KSEM_UNLINK", - "SYS_KSEM_WAIT", - "SYS_KTIMER_CREATE", - "SYS_KTIMER_DELETE", - "SYS_KTIMER_GETOVERRUN", - "SYS_KTIMER_GETTIME", - "SYS_KTIMER_SETTIME", - "SYS_KTRACE", - "SYS_LCHFLAGS", - "SYS_LCHMOD", - "SYS_LCHOWN", - "SYS_LCHOWN32", - "SYS_LEDGER", - "SYS_LGETFH", - "SYS_LGETXATTR", - "SYS_LINK", - "SYS_LINKAT", - "SYS_LIO_LISTIO", - "SYS_LISTEN", - "SYS_LISTXATTR", - "SYS_LLISTXATTR", - "SYS_LOCK", - "SYS_LOOKUP_DCOOKIE", - "SYS_LPATHCONF", - "SYS_LREMOVEXATTR", - "SYS_LSEEK", - "SYS_LSETXATTR", - "SYS_LSTAT", - "SYS_LSTAT64", - "SYS_LSTAT64_EXTENDED", - "SYS_LSTATV", - "SYS_LSTAT_EXTENDED", - "SYS_LUTIMES", - "SYS_MAC_SYSCALL", - "SYS_MADVISE", - "SYS_MADVISE1", - "SYS_MAXSYSCALL", - "SYS_MBIND", - "SYS_MIGRATE_PAGES", - "SYS_MINCORE", - "SYS_MINHERIT", - "SYS_MKCOMPLEX", - "SYS_MKDIR", - "SYS_MKDIRAT", - "SYS_MKDIR_EXTENDED", - "SYS_MKFIFO", - "SYS_MKFIFOAT", - "SYS_MKFIFO_EXTENDED", - "SYS_MKNOD", - "SYS_MKNODAT", - "SYS_MLOCK", - "SYS_MLOCKALL", - "SYS_MMAP", - "SYS_MMAP2", - "SYS_MODCTL", - "SYS_MODFIND", - "SYS_MODFNEXT", - "SYS_MODIFY_LDT", - "SYS_MODNEXT", - "SYS_MODSTAT", - "SYS_MODWATCH", - "SYS_MOUNT", - "SYS_MOVE_PAGES", - "SYS_MPROTECT", - "SYS_MPX", - "SYS_MQUERY", - "SYS_MQ_GETSETATTR", - "SYS_MQ_NOTIFY", - "SYS_MQ_OPEN", - "SYS_MQ_TIMEDRECEIVE", - "SYS_MQ_TIMEDSEND", - "SYS_MQ_UNLINK", - "SYS_MREMAP", - "SYS_MSGCTL", - "SYS_MSGGET", - "SYS_MSGRCV", - "SYS_MSGRCV_NOCANCEL", - "SYS_MSGSND", - "SYS_MSGSND_NOCANCEL", - "SYS_MSGSYS", - "SYS_MSYNC", - "SYS_MSYNC_NOCANCEL", - "SYS_MUNLOCK", - "SYS_MUNLOCKALL", - "SYS_MUNMAP", - "SYS_NAME_TO_HANDLE_AT", - "SYS_NANOSLEEP", - "SYS_NEWFSTATAT", - "SYS_NFSCLNT", - "SYS_NFSSERVCTL", - "SYS_NFSSVC", - "SYS_NFSTAT", - "SYS_NICE", - "SYS_NLM_SYSCALL", - "SYS_NLSTAT", - "SYS_NMOUNT", - "SYS_NSTAT", - "SYS_NTP_ADJTIME", - "SYS_NTP_GETTIME", - "SYS_NUMA_GETAFFINITY", - "SYS_NUMA_SETAFFINITY", - "SYS_OABI_SYSCALL_BASE", - "SYS_OBREAK", - "SYS_OLDFSTAT", - "SYS_OLDLSTAT", - "SYS_OLDOLDUNAME", - "SYS_OLDSTAT", - "SYS_OLDUNAME", - "SYS_OPEN", - "SYS_OPENAT", - "SYS_OPENBSD_POLL", - "SYS_OPEN_BY_HANDLE_AT", - "SYS_OPEN_DPROTECTED_NP", - "SYS_OPEN_EXTENDED", - "SYS_OPEN_NOCANCEL", - "SYS_OVADVISE", - "SYS_PACCEPT", - "SYS_PATHCONF", - "SYS_PAUSE", - "SYS_PCICONFIG_IOBASE", - "SYS_PCICONFIG_READ", - "SYS_PCICONFIG_WRITE", - "SYS_PDFORK", - "SYS_PDGETPID", - "SYS_PDKILL", - "SYS_PERF_EVENT_OPEN", - "SYS_PERSONALITY", - "SYS_PID_HIBERNATE", - "SYS_PID_RESUME", - "SYS_PID_SHUTDOWN_SOCKETS", - "SYS_PID_SUSPEND", - "SYS_PIPE", - "SYS_PIPE2", - "SYS_PIVOT_ROOT", - "SYS_PMC_CONTROL", - "SYS_PMC_GET_INFO", - "SYS_POLL", - "SYS_POLLTS", - "SYS_POLL_NOCANCEL", - "SYS_POSIX_FADVISE", - "SYS_POSIX_FALLOCATE", - "SYS_POSIX_OPENPT", - "SYS_POSIX_SPAWN", - "SYS_PPOLL", - "SYS_PRCTL", - "SYS_PREAD", - "SYS_PREAD64", - "SYS_PREADV", - "SYS_PREAD_NOCANCEL", - "SYS_PRLIMIT64", - "SYS_PROCCTL", - "SYS_PROCESS_POLICY", - "SYS_PROCESS_VM_READV", - "SYS_PROCESS_VM_WRITEV", - "SYS_PROC_INFO", - "SYS_PROF", - "SYS_PROFIL", - "SYS_PSELECT", - "SYS_PSELECT6", - "SYS_PSET_ASSIGN", - "SYS_PSET_CREATE", - "SYS_PSET_DESTROY", - "SYS_PSYNCH_CVBROAD", - "SYS_PSYNCH_CVCLRPREPOST", - "SYS_PSYNCH_CVSIGNAL", - "SYS_PSYNCH_CVWAIT", - "SYS_PSYNCH_MUTEXDROP", - "SYS_PSYNCH_MUTEXWAIT", - "SYS_PSYNCH_RW_DOWNGRADE", - "SYS_PSYNCH_RW_LONGRDLOCK", - "SYS_PSYNCH_RW_RDLOCK", - "SYS_PSYNCH_RW_UNLOCK", - "SYS_PSYNCH_RW_UNLOCK2", - "SYS_PSYNCH_RW_UPGRADE", - "SYS_PSYNCH_RW_WRLOCK", - "SYS_PSYNCH_RW_YIELDWRLOCK", - "SYS_PTRACE", - "SYS_PUTPMSG", - "SYS_PWRITE", - "SYS_PWRITE64", - "SYS_PWRITEV", - "SYS_PWRITE_NOCANCEL", - "SYS_QUERY_MODULE", - "SYS_QUOTACTL", - "SYS_RASCTL", - "SYS_RCTL_ADD_RULE", - "SYS_RCTL_GET_LIMITS", - "SYS_RCTL_GET_RACCT", - "SYS_RCTL_GET_RULES", - "SYS_RCTL_REMOVE_RULE", - "SYS_READ", - "SYS_READAHEAD", - "SYS_READDIR", - "SYS_READLINK", - "SYS_READLINKAT", - "SYS_READV", - "SYS_READV_NOCANCEL", - "SYS_READ_NOCANCEL", - "SYS_REBOOT", - "SYS_RECV", - "SYS_RECVFROM", - "SYS_RECVFROM_NOCANCEL", - "SYS_RECVMMSG", - "SYS_RECVMSG", - "SYS_RECVMSG_NOCANCEL", - "SYS_REMAP_FILE_PAGES", - "SYS_REMOVEXATTR", - "SYS_RENAME", - "SYS_RENAMEAT", - "SYS_REQUEST_KEY", - "SYS_RESTART_SYSCALL", - "SYS_REVOKE", - "SYS_RFORK", - "SYS_RMDIR", - "SYS_RTPRIO", - "SYS_RTPRIO_THREAD", - "SYS_RT_SIGACTION", - "SYS_RT_SIGPENDING", - "SYS_RT_SIGPROCMASK", - "SYS_RT_SIGQUEUEINFO", - "SYS_RT_SIGRETURN", - "SYS_RT_SIGSUSPEND", - "SYS_RT_SIGTIMEDWAIT", - "SYS_RT_TGSIGQUEUEINFO", - "SYS_SBRK", - "SYS_SCHED_GETAFFINITY", - "SYS_SCHED_GETPARAM", - "SYS_SCHED_GETSCHEDULER", - "SYS_SCHED_GET_PRIORITY_MAX", - "SYS_SCHED_GET_PRIORITY_MIN", - "SYS_SCHED_RR_GET_INTERVAL", - "SYS_SCHED_SETAFFINITY", - "SYS_SCHED_SETPARAM", - "SYS_SCHED_SETSCHEDULER", - "SYS_SCHED_YIELD", - "SYS_SCTP_GENERIC_RECVMSG", - "SYS_SCTP_GENERIC_SENDMSG", - "SYS_SCTP_GENERIC_SENDMSG_IOV", - "SYS_SCTP_PEELOFF", - "SYS_SEARCHFS", - "SYS_SECURITY", - "SYS_SELECT", - "SYS_SELECT_NOCANCEL", - "SYS_SEMCONFIG", - "SYS_SEMCTL", - "SYS_SEMGET", - "SYS_SEMOP", - "SYS_SEMSYS", - "SYS_SEMTIMEDOP", - "SYS_SEM_CLOSE", - "SYS_SEM_DESTROY", - "SYS_SEM_GETVALUE", - "SYS_SEM_INIT", - "SYS_SEM_OPEN", - "SYS_SEM_POST", - "SYS_SEM_TRYWAIT", - "SYS_SEM_UNLINK", - "SYS_SEM_WAIT", - "SYS_SEM_WAIT_NOCANCEL", - "SYS_SEND", - "SYS_SENDFILE", - "SYS_SENDFILE64", - "SYS_SENDMMSG", - "SYS_SENDMSG", - "SYS_SENDMSG_NOCANCEL", - "SYS_SENDTO", - "SYS_SENDTO_NOCANCEL", - "SYS_SETATTRLIST", - "SYS_SETAUDIT", - "SYS_SETAUDIT_ADDR", - "SYS_SETAUID", - "SYS_SETCONTEXT", - "SYS_SETDOMAINNAME", - "SYS_SETEGID", - "SYS_SETEUID", - "SYS_SETFIB", - "SYS_SETFSGID", - "SYS_SETFSGID32", - "SYS_SETFSUID", - "SYS_SETFSUID32", - "SYS_SETGID", - "SYS_SETGID32", - "SYS_SETGROUPS", - "SYS_SETGROUPS32", - "SYS_SETHOSTNAME", - "SYS_SETITIMER", - "SYS_SETLCID", - "SYS_SETLOGIN", - "SYS_SETLOGINCLASS", - "SYS_SETNS", - "SYS_SETPGID", - "SYS_SETPRIORITY", - "SYS_SETPRIVEXEC", - "SYS_SETREGID", - "SYS_SETREGID32", - "SYS_SETRESGID", - "SYS_SETRESGID32", - "SYS_SETRESUID", - "SYS_SETRESUID32", - "SYS_SETREUID", - "SYS_SETREUID32", - "SYS_SETRLIMIT", - "SYS_SETRTABLE", - "SYS_SETSGROUPS", - "SYS_SETSID", - "SYS_SETSOCKOPT", - "SYS_SETTID", - "SYS_SETTID_WITH_PID", - "SYS_SETTIMEOFDAY", - "SYS_SETUID", - "SYS_SETUID32", - "SYS_SETWGROUPS", - "SYS_SETXATTR", - "SYS_SET_MEMPOLICY", - "SYS_SET_ROBUST_LIST", - "SYS_SET_THREAD_AREA", - "SYS_SET_TID_ADDRESS", - "SYS_SGETMASK", - "SYS_SHARED_REGION_CHECK_NP", - "SYS_SHARED_REGION_MAP_AND_SLIDE_NP", - "SYS_SHMAT", - "SYS_SHMCTL", - "SYS_SHMDT", - "SYS_SHMGET", - "SYS_SHMSYS", - "SYS_SHM_OPEN", - "SYS_SHM_UNLINK", - "SYS_SHUTDOWN", - "SYS_SIGACTION", - "SYS_SIGALTSTACK", - "SYS_SIGNAL", - "SYS_SIGNALFD", - "SYS_SIGNALFD4", - "SYS_SIGPENDING", - "SYS_SIGPROCMASK", - "SYS_SIGQUEUE", - "SYS_SIGQUEUEINFO", - "SYS_SIGRETURN", - "SYS_SIGSUSPEND", - "SYS_SIGSUSPEND_NOCANCEL", - "SYS_SIGTIMEDWAIT", - "SYS_SIGWAIT", - "SYS_SIGWAITINFO", - "SYS_SOCKET", - "SYS_SOCKETCALL", - "SYS_SOCKETPAIR", - "SYS_SPLICE", - "SYS_SSETMASK", - "SYS_SSTK", - "SYS_STACK_SNAPSHOT", - "SYS_STAT", - "SYS_STAT64", - "SYS_STAT64_EXTENDED", - "SYS_STATFS", - "SYS_STATFS64", - "SYS_STATV", - "SYS_STATVFS1", - "SYS_STAT_EXTENDED", - "SYS_STIME", - "SYS_STTY", - "SYS_SWAPCONTEXT", - "SYS_SWAPCTL", - "SYS_SWAPOFF", - "SYS_SWAPON", - "SYS_SYMLINK", - "SYS_SYMLINKAT", - "SYS_SYNC", - "SYS_SYNCFS", - "SYS_SYNC_FILE_RANGE", - "SYS_SYSARCH", - "SYS_SYSCALL", - "SYS_SYSCALL_BASE", - "SYS_SYSFS", - "SYS_SYSINFO", - "SYS_SYSLOG", - "SYS_TEE", - "SYS_TGKILL", - "SYS_THREAD_SELFID", - "SYS_THR_CREATE", - "SYS_THR_EXIT", - "SYS_THR_KILL", - "SYS_THR_KILL2", - "SYS_THR_NEW", - "SYS_THR_SELF", - "SYS_THR_SET_NAME", - "SYS_THR_SUSPEND", - "SYS_THR_WAKE", - "SYS_TIME", - "SYS_TIMERFD_CREATE", - "SYS_TIMERFD_GETTIME", - "SYS_TIMERFD_SETTIME", - "SYS_TIMER_CREATE", - "SYS_TIMER_DELETE", - "SYS_TIMER_GETOVERRUN", - "SYS_TIMER_GETTIME", - "SYS_TIMER_SETTIME", - "SYS_TIMES", - "SYS_TKILL", - "SYS_TRUNCATE", - "SYS_TRUNCATE64", - "SYS_TUXCALL", - "SYS_UGETRLIMIT", - "SYS_ULIMIT", - "SYS_UMASK", - "SYS_UMASK_EXTENDED", - "SYS_UMOUNT", - "SYS_UMOUNT2", - "SYS_UNAME", - "SYS_UNDELETE", - "SYS_UNLINK", - "SYS_UNLINKAT", - "SYS_UNMOUNT", - "SYS_UNSHARE", - "SYS_USELIB", - "SYS_USTAT", - "SYS_UTIME", - "SYS_UTIMENSAT", - "SYS_UTIMES", - "SYS_UTRACE", - "SYS_UUIDGEN", - "SYS_VADVISE", - "SYS_VFORK", - "SYS_VHANGUP", - "SYS_VM86", - "SYS_VM86OLD", - "SYS_VMSPLICE", - "SYS_VM_PRESSURE_MONITOR", - "SYS_VSERVER", - "SYS_WAIT4", - "SYS_WAIT4_NOCANCEL", - "SYS_WAIT6", - "SYS_WAITEVENT", - "SYS_WAITID", - "SYS_WAITID_NOCANCEL", - "SYS_WAITPID", - "SYS_WATCHEVENT", - "SYS_WORKQ_KERNRETURN", - "SYS_WORKQ_OPEN", - "SYS_WRITE", - "SYS_WRITEV", - "SYS_WRITEV_NOCANCEL", - "SYS_WRITE_NOCANCEL", - "SYS_YIELD", - "SYS__LLSEEK", - "SYS__LWP_CONTINUE", - "SYS__LWP_CREATE", - "SYS__LWP_CTL", - "SYS__LWP_DETACH", - "SYS__LWP_EXIT", - "SYS__LWP_GETNAME", - "SYS__LWP_GETPRIVATE", - "SYS__LWP_KILL", - "SYS__LWP_PARK", - "SYS__LWP_SELF", - "SYS__LWP_SETNAME", - "SYS__LWP_SETPRIVATE", - "SYS__LWP_SUSPEND", - "SYS__LWP_UNPARK", - "SYS__LWP_UNPARK_ALL", - "SYS__LWP_WAIT", - "SYS__LWP_WAKEUP", - "SYS__NEWSELECT", - "SYS__PSET_BIND", - "SYS__SCHED_GETAFFINITY", - "SYS__SCHED_GETPARAM", - "SYS__SCHED_SETAFFINITY", - "SYS__SCHED_SETPARAM", - "SYS__SYSCTL", - "SYS__UMTX_LOCK", - "SYS__UMTX_OP", - "SYS__UMTX_UNLOCK", - "SYS___ACL_ACLCHECK_FD", - "SYS___ACL_ACLCHECK_FILE", - "SYS___ACL_ACLCHECK_LINK", - "SYS___ACL_DELETE_FD", - "SYS___ACL_DELETE_FILE", - "SYS___ACL_DELETE_LINK", - "SYS___ACL_GET_FD", - "SYS___ACL_GET_FILE", - "SYS___ACL_GET_LINK", - "SYS___ACL_SET_FD", - "SYS___ACL_SET_FILE", - "SYS___ACL_SET_LINK", - "SYS___CAP_RIGHTS_GET", - "SYS___CLONE", - "SYS___DISABLE_THREADSIGNAL", - "SYS___GETCWD", - "SYS___GETLOGIN", - "SYS___GET_TCB", - "SYS___MAC_EXECVE", - "SYS___MAC_GETFSSTAT", - "SYS___MAC_GET_FD", - "SYS___MAC_GET_FILE", - "SYS___MAC_GET_LCID", - "SYS___MAC_GET_LCTX", - "SYS___MAC_GET_LINK", - "SYS___MAC_GET_MOUNT", - "SYS___MAC_GET_PID", - "SYS___MAC_GET_PROC", - "SYS___MAC_MOUNT", - "SYS___MAC_SET_FD", - "SYS___MAC_SET_FILE", - "SYS___MAC_SET_LCTX", - "SYS___MAC_SET_LINK", - "SYS___MAC_SET_PROC", - "SYS___MAC_SYSCALL", - "SYS___OLD_SEMWAIT_SIGNAL", - "SYS___OLD_SEMWAIT_SIGNAL_NOCANCEL", - "SYS___POSIX_CHOWN", - "SYS___POSIX_FCHOWN", - "SYS___POSIX_LCHOWN", - "SYS___POSIX_RENAME", - "SYS___PTHREAD_CANCELED", - "SYS___PTHREAD_CHDIR", - "SYS___PTHREAD_FCHDIR", - "SYS___PTHREAD_KILL", - "SYS___PTHREAD_MARKCANCEL", - "SYS___PTHREAD_SIGMASK", - "SYS___QUOTACTL", - "SYS___SEMCTL", - "SYS___SEMWAIT_SIGNAL", - "SYS___SEMWAIT_SIGNAL_NOCANCEL", - "SYS___SETLOGIN", - "SYS___SETUGID", - "SYS___SET_TCB", - "SYS___SIGACTION_SIGTRAMP", - "SYS___SIGTIMEDWAIT", - "SYS___SIGWAIT", - "SYS___SIGWAIT_NOCANCEL", - "SYS___SYSCTL", - "SYS___TFORK", - "SYS___THREXIT", - "SYS___THRSIGDIVERT", - "SYS___THRSLEEP", - "SYS___THRWAKEUP", - "S_ARCH1", - "S_ARCH2", - "S_BLKSIZE", - "S_IEXEC", - "S_IFBLK", - "S_IFCHR", - "S_IFDIR", - "S_IFIFO", - "S_IFLNK", - "S_IFMT", - "S_IFREG", - "S_IFSOCK", - "S_IFWHT", - "S_IREAD", - "S_IRGRP", - "S_IROTH", - "S_IRUSR", - "S_IRWXG", - "S_IRWXO", - "S_IRWXU", - "S_ISGID", - "S_ISTXT", - "S_ISUID", - "S_ISVTX", - "S_IWGRP", - "S_IWOTH", - "S_IWRITE", - "S_IWUSR", - "S_IXGRP", - "S_IXOTH", - "S_IXUSR", - "S_LOGIN_SET", - "SecurityAttributes", - "Seek", - "Select", - "Sendfile", - "Sendmsg", - "SendmsgN", - "Sendto", - "Servent", - "SetBpf", - "SetBpfBuflen", - "SetBpfDatalink", - "SetBpfHeadercmpl", - "SetBpfImmediate", - "SetBpfInterface", - "SetBpfPromisc", - "SetBpfTimeout", - "SetCurrentDirectory", - "SetEndOfFile", - "SetEnvironmentVariable", - "SetFileAttributes", - "SetFileCompletionNotificationModes", - "SetFilePointer", - "SetFileTime", - "SetHandleInformation", - "SetKevent", - "SetLsfPromisc", - "SetNonblock", - "Setdomainname", - "Setegid", - "Setenv", - "Seteuid", - "Setfsgid", - "Setfsuid", - "Setgid", - "Setgroups", - "Sethostname", - "Setlogin", - "Setpgid", - "Setpriority", - "Setprivexec", - "Setregid", - "Setresgid", - "Setresuid", - "Setreuid", - "Setrlimit", - "Setsid", - "Setsockopt", - "SetsockoptByte", - "SetsockoptICMPv6Filter", - "SetsockoptIPMreq", - "SetsockoptIPMreqn", - "SetsockoptIPv6Mreq", - "SetsockoptInet4Addr", - "SetsockoptInt", - "SetsockoptLinger", - "SetsockoptString", - "SetsockoptTimeval", - "Settimeofday", - "Setuid", - "Setxattr", - "Shutdown", - "SidTypeAlias", - "SidTypeComputer", - "SidTypeDeletedAccount", - "SidTypeDomain", - "SidTypeGroup", - "SidTypeInvalid", - "SidTypeLabel", - "SidTypeUnknown", - "SidTypeUser", - "SidTypeWellKnownGroup", - "Signal", - "SizeofBpfHdr", - "SizeofBpfInsn", - "SizeofBpfProgram", - "SizeofBpfStat", - "SizeofBpfVersion", - "SizeofBpfZbuf", - "SizeofBpfZbufHeader", - "SizeofCmsghdr", - "SizeofICMPv6Filter", - "SizeofIPMreq", - "SizeofIPMreqn", - "SizeofIPv6MTUInfo", - "SizeofIPv6Mreq", - "SizeofIfAddrmsg", - "SizeofIfAnnounceMsghdr", - "SizeofIfData", - "SizeofIfInfomsg", - "SizeofIfMsghdr", - "SizeofIfaMsghdr", - "SizeofIfmaMsghdr", - "SizeofIfmaMsghdr2", - "SizeofInet4Pktinfo", - "SizeofInet6Pktinfo", - "SizeofInotifyEvent", - "SizeofLinger", - "SizeofMsghdr", - "SizeofNlAttr", - "SizeofNlMsgerr", - "SizeofNlMsghdr", - "SizeofRtAttr", - "SizeofRtGenmsg", - "SizeofRtMetrics", - "SizeofRtMsg", - "SizeofRtMsghdr", - "SizeofRtNexthop", - "SizeofSockFilter", - "SizeofSockFprog", - "SizeofSockaddrAny", - "SizeofSockaddrDatalink", - "SizeofSockaddrInet4", - "SizeofSockaddrInet6", - "SizeofSockaddrLinklayer", - "SizeofSockaddrNetlink", - "SizeofSockaddrUnix", - "SizeofTCPInfo", - "SizeofUcred", - "SlicePtrFromStrings", - "SockFilter", - "SockFprog", - "Sockaddr", - "SockaddrDatalink", - "SockaddrGen", - "SockaddrInet4", - "SockaddrInet6", - "SockaddrLinklayer", - "SockaddrNetlink", - "SockaddrUnix", - "Socket", - "SocketControlMessage", - "SocketDisableIPv6", - "Socketpair", - "Splice", - "StartProcess", - "StartupInfo", - "Stat", - "Stat_t", - "Statfs", - "Statfs_t", - "Stderr", - "Stdin", - "Stdout", - "StringBytePtr", - "StringByteSlice", - "StringSlicePtr", - "StringToSid", - "StringToUTF16", - "StringToUTF16Ptr", - "Symlink", - "Sync", - "SyncFileRange", - "SysProcAttr", - "SysProcIDMap", - "Syscall", - "Syscall12", - "Syscall15", - "Syscall18", - "Syscall6", - "Syscall9", - "SyscallN", - "Sysctl", - "SysctlUint32", - "Sysctlnode", - "Sysinfo", - "Sysinfo_t", - "Systemtime", - "TCGETS", - "TCIFLUSH", - "TCIOFLUSH", - "TCOFLUSH", - "TCPInfo", - "TCPKeepalive", - "TCP_CA_NAME_MAX", - "TCP_CONGCTL", - "TCP_CONGESTION", - "TCP_CONNECTIONTIMEOUT", - "TCP_CORK", - "TCP_DEFER_ACCEPT", - "TCP_ENABLE_ECN", - "TCP_INFO", - "TCP_KEEPALIVE", - "TCP_KEEPCNT", - "TCP_KEEPIDLE", - "TCP_KEEPINIT", - "TCP_KEEPINTVL", - "TCP_LINGER2", - "TCP_MAXBURST", - "TCP_MAXHLEN", - "TCP_MAXOLEN", - "TCP_MAXSEG", - "TCP_MAXWIN", - "TCP_MAX_SACK", - "TCP_MAX_WINSHIFT", - "TCP_MD5SIG", - "TCP_MD5SIG_MAXKEYLEN", - "TCP_MINMSS", - "TCP_MINMSSOVERLOAD", - "TCP_MSS", - "TCP_NODELAY", - "TCP_NOOPT", - "TCP_NOPUSH", - "TCP_NOTSENT_LOWAT", - "TCP_NSTATES", - "TCP_QUICKACK", - "TCP_RXT_CONNDROPTIME", - "TCP_RXT_FINDROP", - "TCP_SACK_ENABLE", - "TCP_SENDMOREACKS", - "TCP_SYNCNT", - "TCP_VENDOR", - "TCP_WINDOW_CLAMP", - "TCSAFLUSH", - "TCSETS", - "TF_DISCONNECT", - "TF_REUSE_SOCKET", - "TF_USE_DEFAULT_WORKER", - "TF_USE_KERNEL_APC", - "TF_USE_SYSTEM_THREAD", - "TF_WRITE_BEHIND", - "TH32CS_INHERIT", - "TH32CS_SNAPALL", - "TH32CS_SNAPHEAPLIST", - "TH32CS_SNAPMODULE", - "TH32CS_SNAPMODULE32", - "TH32CS_SNAPPROCESS", - "TH32CS_SNAPTHREAD", - "TIME_ZONE_ID_DAYLIGHT", - "TIME_ZONE_ID_STANDARD", - "TIME_ZONE_ID_UNKNOWN", - "TIOCCBRK", - "TIOCCDTR", - "TIOCCONS", - "TIOCDCDTIMESTAMP", - "TIOCDRAIN", - "TIOCDSIMICROCODE", - "TIOCEXCL", - "TIOCEXT", - "TIOCFLAG_CDTRCTS", - "TIOCFLAG_CLOCAL", - "TIOCFLAG_CRTSCTS", - "TIOCFLAG_MDMBUF", - "TIOCFLAG_PPS", - "TIOCFLAG_SOFTCAR", - "TIOCFLUSH", - "TIOCGDEV", - "TIOCGDRAINWAIT", - "TIOCGETA", - "TIOCGETD", - "TIOCGFLAGS", - "TIOCGICOUNT", - "TIOCGLCKTRMIOS", - "TIOCGLINED", - "TIOCGPGRP", - "TIOCGPTN", - "TIOCGQSIZE", - "TIOCGRANTPT", - "TIOCGRS485", - "TIOCGSERIAL", - "TIOCGSID", - "TIOCGSIZE", - "TIOCGSOFTCAR", - "TIOCGTSTAMP", - "TIOCGWINSZ", - "TIOCINQ", - "TIOCIXOFF", - "TIOCIXON", - "TIOCLINUX", - "TIOCMBIC", - "TIOCMBIS", - "TIOCMGDTRWAIT", - "TIOCMGET", - "TIOCMIWAIT", - "TIOCMODG", - "TIOCMODS", - "TIOCMSDTRWAIT", - "TIOCMSET", - "TIOCM_CAR", - "TIOCM_CD", - "TIOCM_CTS", - "TIOCM_DCD", - "TIOCM_DSR", - "TIOCM_DTR", - "TIOCM_LE", - "TIOCM_RI", - "TIOCM_RNG", - "TIOCM_RTS", - "TIOCM_SR", - "TIOCM_ST", - "TIOCNOTTY", - "TIOCNXCL", - "TIOCOUTQ", - "TIOCPKT", - "TIOCPKT_DATA", - "TIOCPKT_DOSTOP", - "TIOCPKT_FLUSHREAD", - "TIOCPKT_FLUSHWRITE", - "TIOCPKT_IOCTL", - "TIOCPKT_NOSTOP", - "TIOCPKT_START", - "TIOCPKT_STOP", - "TIOCPTMASTER", - "TIOCPTMGET", - "TIOCPTSNAME", - "TIOCPTYGNAME", - "TIOCPTYGRANT", - "TIOCPTYUNLK", - "TIOCRCVFRAME", - "TIOCREMOTE", - "TIOCSBRK", - "TIOCSCONS", - "TIOCSCTTY", - "TIOCSDRAINWAIT", - "TIOCSDTR", - "TIOCSERCONFIG", - "TIOCSERGETLSR", - "TIOCSERGETMULTI", - "TIOCSERGSTRUCT", - "TIOCSERGWILD", - "TIOCSERSETMULTI", - "TIOCSERSWILD", - "TIOCSER_TEMT", - "TIOCSETA", - "TIOCSETAF", - "TIOCSETAW", - "TIOCSETD", - "TIOCSFLAGS", - "TIOCSIG", - "TIOCSLCKTRMIOS", - "TIOCSLINED", - "TIOCSPGRP", - "TIOCSPTLCK", - "TIOCSQSIZE", - "TIOCSRS485", - "TIOCSSERIAL", - "TIOCSSIZE", - "TIOCSSOFTCAR", - "TIOCSTART", - "TIOCSTAT", - "TIOCSTI", - "TIOCSTOP", - "TIOCSTSTAMP", - "TIOCSWINSZ", - "TIOCTIMESTAMP", - "TIOCUCNTL", - "TIOCVHANGUP", - "TIOCXMTFRAME", - "TOKEN_ADJUST_DEFAULT", - "TOKEN_ADJUST_GROUPS", - "TOKEN_ADJUST_PRIVILEGES", - "TOKEN_ADJUST_SESSIONID", - "TOKEN_ALL_ACCESS", - "TOKEN_ASSIGN_PRIMARY", - "TOKEN_DUPLICATE", - "TOKEN_EXECUTE", - "TOKEN_IMPERSONATE", - "TOKEN_QUERY", - "TOKEN_QUERY_SOURCE", - "TOKEN_READ", - "TOKEN_WRITE", - "TOSTOP", - "TRUNCATE_EXISTING", - "TUNATTACHFILTER", - "TUNDETACHFILTER", - "TUNGETFEATURES", - "TUNGETIFF", - "TUNGETSNDBUF", - "TUNGETVNETHDRSZ", - "TUNSETDEBUG", - "TUNSETGROUP", - "TUNSETIFF", - "TUNSETLINK", - "TUNSETNOCSUM", - "TUNSETOFFLOAD", - "TUNSETOWNER", - "TUNSETPERSIST", - "TUNSETSNDBUF", - "TUNSETTXFILTER", - "TUNSETVNETHDRSZ", - "Tee", - "TerminateProcess", - "Termios", - "Tgkill", - "Time", - "Time_t", - "Times", - "Timespec", - "TimespecToNsec", - "Timeval", - "Timeval32", - "TimevalToNsec", - "Timex", - "Timezoneinformation", - "Tms", - "Token", - "TokenAccessInformation", - "TokenAuditPolicy", - "TokenDefaultDacl", - "TokenElevation", - "TokenElevationType", - "TokenGroups", - "TokenGroupsAndPrivileges", - "TokenHasRestrictions", - "TokenImpersonationLevel", - "TokenIntegrityLevel", - "TokenLinkedToken", - "TokenLogonSid", - "TokenMandatoryPolicy", - "TokenOrigin", - "TokenOwner", - "TokenPrimaryGroup", - "TokenPrivileges", - "TokenRestrictedSids", - "TokenSandBoxInert", - "TokenSessionId", - "TokenSessionReference", - "TokenSource", - "TokenStatistics", - "TokenType", - "TokenUIAccess", - "TokenUser", - "TokenVirtualizationAllowed", - "TokenVirtualizationEnabled", - "Tokenprimarygroup", - "Tokenuser", - "TranslateAccountName", - "TranslateName", - "TransmitFile", - "TransmitFileBuffers", - "Truncate", - "UNIX_PATH_MAX", - "USAGE_MATCH_TYPE_AND", - "USAGE_MATCH_TYPE_OR", - "UTF16FromString", - "UTF16PtrFromString", - "UTF16ToString", - "Ucred", - "Umask", - "Uname", - "Undelete", - "UnixCredentials", - "UnixRights", - "Unlink", - "Unlinkat", - "UnmapViewOfFile", - "Unmount", - "Unsetenv", - "Unshare", - "UserInfo10", - "Ustat", - "Ustat_t", - "Utimbuf", - "Utime", - "Utimes", - "UtimesNano", - "Utsname", - "VDISCARD", - "VDSUSP", - "VEOF", - "VEOL", - "VEOL2", - "VERASE", - "VERASE2", - "VINTR", - "VKILL", - "VLNEXT", - "VMIN", - "VQUIT", - "VREPRINT", - "VSTART", - "VSTATUS", - "VSTOP", - "VSUSP", - "VSWTC", - "VT0", - "VT1", - "VTDLY", - "VTIME", - "VWERASE", - "VirtualLock", - "VirtualUnlock", - "WAIT_ABANDONED", - "WAIT_FAILED", - "WAIT_OBJECT_0", - "WAIT_TIMEOUT", - "WALL", - "WALLSIG", - "WALTSIG", - "WCLONE", - "WCONTINUED", - "WCOREFLAG", - "WEXITED", - "WLINUXCLONE", - "WNOHANG", - "WNOTHREAD", - "WNOWAIT", - "WNOZOMBIE", - "WOPTSCHECKED", - "WORDSIZE", - "WSABuf", - "WSACleanup", - "WSADESCRIPTION_LEN", - "WSAData", - "WSAEACCES", - "WSAECONNABORTED", - "WSAECONNRESET", - "WSAEnumProtocols", - "WSAID_CONNECTEX", - "WSAIoctl", - "WSAPROTOCOL_LEN", - "WSAProtocolChain", - "WSAProtocolInfo", - "WSARecv", - "WSARecvFrom", - "WSASYS_STATUS_LEN", - "WSASend", - "WSASendTo", - "WSASendto", - "WSAStartup", - "WSTOPPED", - "WTRAPPED", - "WUNTRACED", - "Wait4", - "WaitForSingleObject", - "WaitStatus", - "Win32FileAttributeData", - "Win32finddata", - "Write", - "WriteConsole", - "WriteFile", - "X509_ASN_ENCODING", - "XCASE", - "XP1_CONNECTIONLESS", - "XP1_CONNECT_DATA", - "XP1_DISCONNECT_DATA", - "XP1_EXPEDITED_DATA", - "XP1_GRACEFUL_CLOSE", - "XP1_GUARANTEED_DELIVERY", - "XP1_GUARANTEED_ORDER", - "XP1_IFS_HANDLES", - "XP1_MESSAGE_ORIENTED", - "XP1_MULTIPOINT_CONTROL_PLANE", - "XP1_MULTIPOINT_DATA_PLANE", - "XP1_PARTIAL_MESSAGE", - "XP1_PSEUDO_STREAM", - "XP1_QOS_SUPPORTED", - "XP1_SAN_SUPPORT_SDP", - "XP1_SUPPORT_BROADCAST", - "XP1_SUPPORT_MULTIPOINT", - "XP1_UNI_RECV", - "XP1_UNI_SEND", - }, - "syscall/js": { - "CopyBytesToGo", - "CopyBytesToJS", - "Error", - "Func", - "FuncOf", - "Global", - "Null", - "Type", - "TypeBoolean", - "TypeFunction", - "TypeNull", - "TypeNumber", - "TypeObject", - "TypeString", - "TypeSymbol", - "TypeUndefined", - "Undefined", - "Value", - "ValueError", - "ValueOf", - }, - "testing": { - "AllocsPerRun", - "B", - "Benchmark", - "BenchmarkResult", - "Cover", - "CoverBlock", - "CoverMode", - "Coverage", - "F", - "Init", - "InternalBenchmark", - "InternalExample", - "InternalFuzzTarget", - "InternalTest", - "M", - "Main", - "MainStart", - "PB", - "RegisterCover", - "RunBenchmarks", - "RunExamples", - "RunTests", - "Short", - "T", - "TB", - "Testing", - "Verbose", - }, - "testing/fstest": { - "MapFS", - "MapFile", - "TestFS", - }, - "testing/iotest": { - "DataErrReader", - "ErrReader", - "ErrTimeout", - "HalfReader", - "NewReadLogger", - "NewWriteLogger", - "OneByteReader", - "TestReader", - "TimeoutReader", - "TruncateWriter", - }, - "testing/quick": { - "Check", - "CheckEqual", - "CheckEqualError", - "CheckError", - "Config", - "Generator", - "SetupError", - "Value", - }, - "testing/slogtest": { - "Run", - "TestHandler", - }, - "text/scanner": { - "Char", - "Comment", - "EOF", - "Float", - "GoTokens", - "GoWhitespace", - "Ident", - "Int", - "Position", - "RawString", - "ScanChars", - "ScanComments", - "ScanFloats", - "ScanIdents", - "ScanInts", - "ScanRawStrings", - "ScanStrings", - "Scanner", - "SkipComments", - "String", - "TokenString", - }, - "text/tabwriter": { - "AlignRight", - "Debug", - "DiscardEmptyColumns", - "Escape", - "FilterHTML", - "NewWriter", - "StripEscape", - "TabIndent", - "Writer", - }, - "text/template": { - "ExecError", - "FuncMap", - "HTMLEscape", - "HTMLEscapeString", - "HTMLEscaper", - "IsTrue", - "JSEscape", - "JSEscapeString", - "JSEscaper", - "Must", - "New", - "ParseFS", - "ParseFiles", - "ParseGlob", - "Template", - "URLQueryEscaper", - }, - "text/template/parse": { - "ActionNode", - "BoolNode", - "BranchNode", - "BreakNode", - "ChainNode", - "CommandNode", - "CommentNode", - "ContinueNode", - "DotNode", - "FieldNode", - "IdentifierNode", - "IfNode", - "IsEmptyTree", - "ListNode", - "Mode", - "New", - "NewIdentifier", - "NilNode", - "Node", - "NodeAction", - "NodeBool", - "NodeBreak", - "NodeChain", - "NodeCommand", - "NodeComment", - "NodeContinue", - "NodeDot", - "NodeField", - "NodeIdentifier", - "NodeIf", - "NodeList", - "NodeNil", - "NodeNumber", - "NodePipe", - "NodeRange", - "NodeString", - "NodeTemplate", - "NodeText", - "NodeType", - "NodeVariable", - "NodeWith", - "NumberNode", - "Parse", - "ParseComments", - "PipeNode", - "Pos", - "RangeNode", - "SkipFuncCheck", - "StringNode", - "TemplateNode", - "TextNode", - "Tree", - "VariableNode", - "WithNode", - }, - "time": { - "ANSIC", - "After", - "AfterFunc", - "April", - "August", - "Date", - "DateOnly", - "DateTime", - "December", - "Duration", - "February", - "FixedZone", - "Friday", - "Hour", - "January", - "July", - "June", - "Kitchen", - "Layout", - "LoadLocation", - "LoadLocationFromTZData", - "Local", - "Location", - "March", - "May", - "Microsecond", - "Millisecond", - "Minute", - "Monday", - "Month", - "Nanosecond", - "NewTicker", - "NewTimer", - "November", - "Now", - "October", - "Parse", - "ParseDuration", - "ParseError", - "ParseInLocation", - "RFC1123", - "RFC1123Z", - "RFC3339", - "RFC3339Nano", - "RFC822", - "RFC822Z", - "RFC850", - "RubyDate", - "Saturday", - "Second", - "September", - "Since", - "Sleep", - "Stamp", - "StampMicro", - "StampMilli", - "StampNano", - "Sunday", - "Thursday", - "Tick", - "Ticker", - "Time", - "TimeOnly", - "Timer", - "Tuesday", - "UTC", - "Unix", - "UnixDate", - "UnixMicro", - "UnixMilli", - "Until", - "Wednesday", - "Weekday", - }, - "unicode": { - "ASCII_Hex_Digit", - "Adlam", - "Ahom", - "Anatolian_Hieroglyphs", - "Arabic", - "Armenian", - "Avestan", - "AzeriCase", - "Balinese", - "Bamum", - "Bassa_Vah", - "Batak", - "Bengali", - "Bhaiksuki", - "Bidi_Control", - "Bopomofo", - "Brahmi", - "Braille", - "Buginese", - "Buhid", - "C", - "Canadian_Aboriginal", - "Carian", - "CaseRange", - "CaseRanges", - "Categories", - "Caucasian_Albanian", - "Cc", - "Cf", - "Chakma", - "Cham", - "Cherokee", - "Chorasmian", - "Co", - "Common", - "Coptic", - "Cs", - "Cuneiform", - "Cypriot", - "Cypro_Minoan", - "Cyrillic", - "Dash", - "Deprecated", - "Deseret", - "Devanagari", - "Diacritic", - "Digit", - "Dives_Akuru", - "Dogra", - "Duployan", - "Egyptian_Hieroglyphs", - "Elbasan", - "Elymaic", - "Ethiopic", - "Extender", - "FoldCategory", - "FoldScript", - "Georgian", - "Glagolitic", - "Gothic", - "Grantha", - "GraphicRanges", - "Greek", - "Gujarati", - "Gunjala_Gondi", - "Gurmukhi", - "Han", - "Hangul", - "Hanifi_Rohingya", - "Hanunoo", - "Hatran", - "Hebrew", - "Hex_Digit", - "Hiragana", - "Hyphen", - "IDS_Binary_Operator", - "IDS_Trinary_Operator", - "Ideographic", - "Imperial_Aramaic", - "In", - "Inherited", - "Inscriptional_Pahlavi", - "Inscriptional_Parthian", - "Is", - "IsControl", - "IsDigit", - "IsGraphic", - "IsLetter", - "IsLower", - "IsMark", - "IsNumber", - "IsOneOf", - "IsPrint", - "IsPunct", - "IsSpace", - "IsSymbol", - "IsTitle", - "IsUpper", - "Javanese", - "Join_Control", - "Kaithi", - "Kannada", - "Katakana", - "Kawi", - "Kayah_Li", - "Kharoshthi", - "Khitan_Small_Script", - "Khmer", - "Khojki", - "Khudawadi", - "L", - "Lao", - "Latin", - "Lepcha", - "Letter", - "Limbu", - "Linear_A", - "Linear_B", - "Lisu", - "Ll", - "Lm", - "Lo", - "Logical_Order_Exception", - "Lower", - "LowerCase", - "Lt", - "Lu", - "Lycian", - "Lydian", - "M", - "Mahajani", - "Makasar", - "Malayalam", - "Mandaic", - "Manichaean", - "Marchen", - "Mark", - "Masaram_Gondi", - "MaxASCII", - "MaxCase", - "MaxLatin1", - "MaxRune", - "Mc", - "Me", - "Medefaidrin", - "Meetei_Mayek", - "Mende_Kikakui", - "Meroitic_Cursive", - "Meroitic_Hieroglyphs", - "Miao", - "Mn", - "Modi", - "Mongolian", - "Mro", - "Multani", - "Myanmar", - "N", - "Nabataean", - "Nag_Mundari", - "Nandinagari", - "Nd", - "New_Tai_Lue", - "Newa", - "Nko", - "Nl", - "No", - "Noncharacter_Code_Point", - "Number", - "Nushu", - "Nyiakeng_Puachue_Hmong", - "Ogham", - "Ol_Chiki", - "Old_Hungarian", - "Old_Italic", - "Old_North_Arabian", - "Old_Permic", - "Old_Persian", - "Old_Sogdian", - "Old_South_Arabian", - "Old_Turkic", - "Old_Uyghur", - "Oriya", - "Osage", - "Osmanya", - "Other", - "Other_Alphabetic", - "Other_Default_Ignorable_Code_Point", - "Other_Grapheme_Extend", - "Other_ID_Continue", - "Other_ID_Start", - "Other_Lowercase", - "Other_Math", - "Other_Uppercase", - "P", - "Pahawh_Hmong", - "Palmyrene", - "Pattern_Syntax", - "Pattern_White_Space", - "Pau_Cin_Hau", - "Pc", - "Pd", - "Pe", - "Pf", - "Phags_Pa", - "Phoenician", - "Pi", - "Po", - "Prepended_Concatenation_Mark", - "PrintRanges", - "Properties", - "Ps", - "Psalter_Pahlavi", - "Punct", - "Quotation_Mark", - "Radical", - "Range16", - "Range32", - "RangeTable", - "Regional_Indicator", - "Rejang", - "ReplacementChar", - "Runic", - "S", - "STerm", - "Samaritan", - "Saurashtra", - "Sc", - "Scripts", - "Sentence_Terminal", - "Sharada", - "Shavian", - "Siddham", - "SignWriting", - "SimpleFold", - "Sinhala", - "Sk", - "Sm", - "So", - "Soft_Dotted", - "Sogdian", - "Sora_Sompeng", - "Soyombo", - "Space", - "SpecialCase", - "Sundanese", - "Syloti_Nagri", - "Symbol", - "Syriac", - "Tagalog", - "Tagbanwa", - "Tai_Le", - "Tai_Tham", - "Tai_Viet", - "Takri", - "Tamil", - "Tangsa", - "Tangut", - "Telugu", - "Terminal_Punctuation", - "Thaana", - "Thai", - "Tibetan", - "Tifinagh", - "Tirhuta", - "Title", - "TitleCase", - "To", - "ToLower", - "ToTitle", - "ToUpper", - "Toto", - "TurkishCase", - "Ugaritic", - "Unified_Ideograph", - "Upper", - "UpperCase", - "UpperLower", - "Vai", - "Variation_Selector", - "Version", - "Vithkuqi", - "Wancho", - "Warang_Citi", - "White_Space", - "Yezidi", - "Yi", - "Z", - "Zanabazar_Square", - "Zl", - "Zp", - "Zs", - }, - "unicode/utf16": { - "AppendRune", - "Decode", - "DecodeRune", - "Encode", - "EncodeRune", - "IsSurrogate", - }, - "unicode/utf8": { - "AppendRune", - "DecodeLastRune", - "DecodeLastRuneInString", - "DecodeRune", - "DecodeRuneInString", - "EncodeRune", - "FullRune", - "FullRuneInString", - "MaxRune", - "RuneCount", - "RuneCountInString", - "RuneError", - "RuneLen", - "RuneSelf", - "RuneStart", - "UTFMax", - "Valid", - "ValidRune", - "ValidString", - }, - "unsafe": { - "Add", - "Alignof", - "Offsetof", - "Pointer", - "Sizeof", - "Slice", - "SliceData", - "String", - "StringData", - }, -} diff --git a/vendor/golang.org/x/tools/internal/stdlib/manifest.go b/vendor/golang.org/x/tools/internal/stdlib/manifest.go new file mode 100644 index 000000000000..fd6892075ee4 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/stdlib/manifest.go @@ -0,0 +1,17320 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate.go. DO NOT EDIT. + +package stdlib + +var PackageSymbols = map[string][]Symbol{ + "archive/tar": { + {"(*Header).FileInfo", Method, 1}, + {"(*Reader).Next", Method, 0}, + {"(*Reader).Read", Method, 0}, + {"(*Writer).AddFS", Method, 22}, + {"(*Writer).Close", Method, 0}, + {"(*Writer).Flush", Method, 0}, + {"(*Writer).Write", Method, 0}, + {"(*Writer).WriteHeader", Method, 0}, + {"(Format).String", Method, 10}, + {"ErrFieldTooLong", Var, 0}, + {"ErrHeader", Var, 0}, + {"ErrInsecurePath", Var, 20}, + {"ErrWriteAfterClose", Var, 0}, + {"ErrWriteTooLong", Var, 0}, + {"FileInfoHeader", Func, 1}, + {"Format", Type, 10}, + {"FormatGNU", Const, 10}, + {"FormatPAX", Const, 10}, + {"FormatUSTAR", Const, 10}, + {"FormatUnknown", Const, 10}, + {"Header", Type, 0}, + {"Header.AccessTime", Field, 0}, + {"Header.ChangeTime", Field, 0}, + {"Header.Devmajor", Field, 0}, + {"Header.Devminor", Field, 0}, + {"Header.Format", Field, 10}, + {"Header.Gid", Field, 0}, + {"Header.Gname", Field, 0}, + {"Header.Linkname", Field, 0}, + {"Header.ModTime", Field, 0}, + {"Header.Mode", Field, 0}, + {"Header.Name", Field, 0}, + {"Header.PAXRecords", Field, 10}, + {"Header.Size", Field, 0}, + {"Header.Typeflag", Field, 0}, + {"Header.Uid", Field, 0}, + {"Header.Uname", Field, 0}, + {"Header.Xattrs", Field, 3}, + {"NewReader", Func, 0}, + {"NewWriter", Func, 0}, + {"Reader", Type, 0}, + {"TypeBlock", Const, 0}, + {"TypeChar", Const, 0}, + {"TypeCont", Const, 0}, + {"TypeDir", Const, 0}, + {"TypeFifo", Const, 0}, + {"TypeGNULongLink", Const, 1}, + {"TypeGNULongName", Const, 1}, + {"TypeGNUSparse", Const, 3}, + {"TypeLink", Const, 0}, + {"TypeReg", Const, 0}, + {"TypeRegA", Const, 0}, + {"TypeSymlink", Const, 0}, + {"TypeXGlobalHeader", Const, 0}, + {"TypeXHeader", Const, 0}, + {"Writer", Type, 0}, + }, + "archive/zip": { + {"(*File).DataOffset", Method, 2}, + {"(*File).FileInfo", Method, 0}, + {"(*File).ModTime", Method, 0}, + {"(*File).Mode", Method, 0}, + {"(*File).Open", Method, 0}, + {"(*File).OpenRaw", Method, 17}, + {"(*File).SetModTime", Method, 0}, + {"(*File).SetMode", Method, 0}, + {"(*FileHeader).FileInfo", Method, 0}, + {"(*FileHeader).ModTime", Method, 0}, + {"(*FileHeader).Mode", Method, 0}, + {"(*FileHeader).SetModTime", Method, 0}, + {"(*FileHeader).SetMode", Method, 0}, + {"(*ReadCloser).Close", Method, 0}, + {"(*ReadCloser).Open", Method, 16}, + {"(*ReadCloser).RegisterDecompressor", Method, 6}, + {"(*Reader).Open", Method, 16}, + {"(*Reader).RegisterDecompressor", Method, 6}, + {"(*Writer).AddFS", Method, 22}, + {"(*Writer).Close", Method, 0}, + {"(*Writer).Copy", Method, 17}, + {"(*Writer).Create", Method, 0}, + {"(*Writer).CreateHeader", Method, 0}, + {"(*Writer).CreateRaw", Method, 17}, + {"(*Writer).Flush", Method, 4}, + {"(*Writer).RegisterCompressor", Method, 6}, + {"(*Writer).SetComment", Method, 10}, + {"(*Writer).SetOffset", Method, 5}, + {"Compressor", Type, 2}, + {"Decompressor", Type, 2}, + {"Deflate", Const, 0}, + {"ErrAlgorithm", Var, 0}, + {"ErrChecksum", Var, 0}, + {"ErrFormat", Var, 0}, + {"ErrInsecurePath", Var, 20}, + {"File", Type, 0}, + {"File.FileHeader", Field, 0}, + {"FileHeader", Type, 0}, + {"FileHeader.CRC32", Field, 0}, + {"FileHeader.Comment", Field, 0}, + {"FileHeader.CompressedSize", Field, 0}, + {"FileHeader.CompressedSize64", Field, 1}, + {"FileHeader.CreatorVersion", Field, 0}, + {"FileHeader.ExternalAttrs", Field, 0}, + {"FileHeader.Extra", Field, 0}, + {"FileHeader.Flags", Field, 0}, + {"FileHeader.Method", Field, 0}, + {"FileHeader.Modified", Field, 10}, + {"FileHeader.ModifiedDate", Field, 0}, + {"FileHeader.ModifiedTime", Field, 0}, + {"FileHeader.Name", Field, 0}, + {"FileHeader.NonUTF8", Field, 10}, + {"FileHeader.ReaderVersion", Field, 0}, + {"FileHeader.UncompressedSize", Field, 0}, + {"FileHeader.UncompressedSize64", Field, 1}, + {"FileInfoHeader", Func, 0}, + {"NewReader", Func, 0}, + {"NewWriter", Func, 0}, + {"OpenReader", Func, 0}, + {"ReadCloser", Type, 0}, + {"ReadCloser.Reader", Field, 0}, + {"Reader", Type, 0}, + {"Reader.Comment", Field, 0}, + {"Reader.File", Field, 0}, + {"RegisterCompressor", Func, 2}, + {"RegisterDecompressor", Func, 2}, + {"Store", Const, 0}, + {"Writer", Type, 0}, + }, + "bufio": { + {"(*Reader).Buffered", Method, 0}, + {"(*Reader).Discard", Method, 5}, + {"(*Reader).Peek", Method, 0}, + {"(*Reader).Read", Method, 0}, + {"(*Reader).ReadByte", Method, 0}, + {"(*Reader).ReadBytes", Method, 0}, + {"(*Reader).ReadLine", Method, 0}, + {"(*Reader).ReadRune", Method, 0}, + {"(*Reader).ReadSlice", Method, 0}, + {"(*Reader).ReadString", Method, 0}, + {"(*Reader).Reset", Method, 2}, + {"(*Reader).Size", Method, 10}, + {"(*Reader).UnreadByte", Method, 0}, + {"(*Reader).UnreadRune", Method, 0}, + {"(*Reader).WriteTo", Method, 1}, + {"(*Scanner).Buffer", Method, 6}, + {"(*Scanner).Bytes", Method, 1}, + {"(*Scanner).Err", Method, 1}, + {"(*Scanner).Scan", Method, 1}, + {"(*Scanner).Split", Method, 1}, + {"(*Scanner).Text", Method, 1}, + {"(*Writer).Available", Method, 0}, + {"(*Writer).AvailableBuffer", Method, 18}, + {"(*Writer).Buffered", Method, 0}, + {"(*Writer).Flush", Method, 0}, + {"(*Writer).ReadFrom", Method, 1}, + {"(*Writer).Reset", Method, 2}, + {"(*Writer).Size", Method, 10}, + {"(*Writer).Write", Method, 0}, + {"(*Writer).WriteByte", Method, 0}, + {"(*Writer).WriteRune", Method, 0}, + {"(*Writer).WriteString", Method, 0}, + {"(ReadWriter).Available", Method, 0}, + {"(ReadWriter).AvailableBuffer", Method, 18}, + {"(ReadWriter).Discard", Method, 5}, + {"(ReadWriter).Flush", Method, 0}, + {"(ReadWriter).Peek", Method, 0}, + {"(ReadWriter).Read", Method, 0}, + {"(ReadWriter).ReadByte", Method, 0}, + {"(ReadWriter).ReadBytes", Method, 0}, + {"(ReadWriter).ReadFrom", Method, 1}, + {"(ReadWriter).ReadLine", Method, 0}, + {"(ReadWriter).ReadRune", Method, 0}, + {"(ReadWriter).ReadSlice", Method, 0}, + {"(ReadWriter).ReadString", Method, 0}, + {"(ReadWriter).UnreadByte", Method, 0}, + {"(ReadWriter).UnreadRune", Method, 0}, + {"(ReadWriter).Write", Method, 0}, + {"(ReadWriter).WriteByte", Method, 0}, + {"(ReadWriter).WriteRune", Method, 0}, + {"(ReadWriter).WriteString", Method, 0}, + {"(ReadWriter).WriteTo", Method, 1}, + {"ErrAdvanceTooFar", Var, 1}, + {"ErrBadReadCount", Var, 15}, + {"ErrBufferFull", Var, 0}, + {"ErrFinalToken", Var, 6}, + {"ErrInvalidUnreadByte", Var, 0}, + {"ErrInvalidUnreadRune", Var, 0}, + {"ErrNegativeAdvance", Var, 1}, + {"ErrNegativeCount", Var, 0}, + {"ErrTooLong", Var, 1}, + {"MaxScanTokenSize", Const, 1}, + {"NewReadWriter", Func, 0}, + {"NewReader", Func, 0}, + {"NewReaderSize", Func, 0}, + {"NewScanner", Func, 1}, + {"NewWriter", Func, 0}, + {"NewWriterSize", Func, 0}, + {"ReadWriter", Type, 0}, + {"ReadWriter.Reader", Field, 0}, + {"ReadWriter.Writer", Field, 0}, + {"Reader", Type, 0}, + {"ScanBytes", Func, 1}, + {"ScanLines", Func, 1}, + {"ScanRunes", Func, 1}, + {"ScanWords", Func, 1}, + {"Scanner", Type, 1}, + {"SplitFunc", Type, 1}, + {"Writer", Type, 0}, + }, + "bytes": { + {"(*Buffer).Available", Method, 21}, + {"(*Buffer).AvailableBuffer", Method, 21}, + {"(*Buffer).Bytes", Method, 0}, + {"(*Buffer).Cap", Method, 5}, + {"(*Buffer).Grow", Method, 1}, + {"(*Buffer).Len", Method, 0}, + {"(*Buffer).Next", Method, 0}, + {"(*Buffer).Read", Method, 0}, + {"(*Buffer).ReadByte", Method, 0}, + {"(*Buffer).ReadBytes", Method, 0}, + {"(*Buffer).ReadFrom", Method, 0}, + {"(*Buffer).ReadRune", Method, 0}, + {"(*Buffer).ReadString", Method, 0}, + {"(*Buffer).Reset", Method, 0}, + {"(*Buffer).String", Method, 0}, + {"(*Buffer).Truncate", Method, 0}, + {"(*Buffer).UnreadByte", Method, 0}, + {"(*Buffer).UnreadRune", Method, 0}, + {"(*Buffer).Write", Method, 0}, + {"(*Buffer).WriteByte", Method, 0}, + {"(*Buffer).WriteRune", Method, 0}, + {"(*Buffer).WriteString", Method, 0}, + {"(*Buffer).WriteTo", Method, 0}, + {"(*Reader).Len", Method, 0}, + {"(*Reader).Read", Method, 0}, + {"(*Reader).ReadAt", Method, 0}, + {"(*Reader).ReadByte", Method, 0}, + {"(*Reader).ReadRune", Method, 0}, + {"(*Reader).Reset", Method, 7}, + {"(*Reader).Seek", Method, 0}, + {"(*Reader).Size", Method, 5}, + {"(*Reader).UnreadByte", Method, 0}, + {"(*Reader).UnreadRune", Method, 0}, + {"(*Reader).WriteTo", Method, 1}, + {"Buffer", Type, 0}, + {"Clone", Func, 20}, + {"Compare", Func, 0}, + {"Contains", Func, 0}, + {"ContainsAny", Func, 7}, + {"ContainsFunc", Func, 21}, + {"ContainsRune", Func, 7}, + {"Count", Func, 0}, + {"Cut", Func, 18}, + {"CutPrefix", Func, 20}, + {"CutSuffix", Func, 20}, + {"Equal", Func, 0}, + {"EqualFold", Func, 0}, + {"ErrTooLarge", Var, 0}, + {"Fields", Func, 0}, + {"FieldsFunc", Func, 0}, + {"HasPrefix", Func, 0}, + {"HasSuffix", Func, 0}, + {"Index", Func, 0}, + {"IndexAny", Func, 0}, + {"IndexByte", Func, 0}, + {"IndexFunc", Func, 0}, + {"IndexRune", Func, 0}, + {"Join", Func, 0}, + {"LastIndex", Func, 0}, + {"LastIndexAny", Func, 0}, + {"LastIndexByte", Func, 5}, + {"LastIndexFunc", Func, 0}, + {"Map", Func, 0}, + {"MinRead", Const, 0}, + {"NewBuffer", Func, 0}, + {"NewBufferString", Func, 0}, + {"NewReader", Func, 0}, + {"Reader", Type, 0}, + {"Repeat", Func, 0}, + {"Replace", Func, 0}, + {"ReplaceAll", Func, 12}, + {"Runes", Func, 0}, + {"Split", Func, 0}, + {"SplitAfter", Func, 0}, + {"SplitAfterN", Func, 0}, + {"SplitN", Func, 0}, + {"Title", Func, 0}, + {"ToLower", Func, 0}, + {"ToLowerSpecial", Func, 0}, + {"ToTitle", Func, 0}, + {"ToTitleSpecial", Func, 0}, + {"ToUpper", Func, 0}, + {"ToUpperSpecial", Func, 0}, + {"ToValidUTF8", Func, 13}, + {"Trim", Func, 0}, + {"TrimFunc", Func, 0}, + {"TrimLeft", Func, 0}, + {"TrimLeftFunc", Func, 0}, + {"TrimPrefix", Func, 1}, + {"TrimRight", Func, 0}, + {"TrimRightFunc", Func, 0}, + {"TrimSpace", Func, 0}, + {"TrimSuffix", Func, 1}, + }, + "cmp": { + {"Compare", Func, 21}, + {"Less", Func, 21}, + {"Or", Func, 22}, + {"Ordered", Type, 21}, + }, + "compress/bzip2": { + {"(StructuralError).Error", Method, 0}, + {"NewReader", Func, 0}, + {"StructuralError", Type, 0}, + }, + "compress/flate": { + {"(*ReadError).Error", Method, 0}, + {"(*WriteError).Error", Method, 0}, + {"(*Writer).Close", Method, 0}, + {"(*Writer).Flush", Method, 0}, + {"(*Writer).Reset", Method, 2}, + {"(*Writer).Write", Method, 0}, + {"(CorruptInputError).Error", Method, 0}, + {"(InternalError).Error", Method, 0}, + {"BestCompression", Const, 0}, + {"BestSpeed", Const, 0}, + {"CorruptInputError", Type, 0}, + {"DefaultCompression", Const, 0}, + {"HuffmanOnly", Const, 7}, + {"InternalError", Type, 0}, + {"NewReader", Func, 0}, + {"NewReaderDict", Func, 0}, + {"NewWriter", Func, 0}, + {"NewWriterDict", Func, 0}, + {"NoCompression", Const, 0}, + {"ReadError", Type, 0}, + {"ReadError.Err", Field, 0}, + {"ReadError.Offset", Field, 0}, + {"Reader", Type, 0}, + {"Resetter", Type, 4}, + {"WriteError", Type, 0}, + {"WriteError.Err", Field, 0}, + {"WriteError.Offset", Field, 0}, + {"Writer", Type, 0}, + }, + "compress/gzip": { + {"(*Reader).Close", Method, 0}, + {"(*Reader).Multistream", Method, 4}, + {"(*Reader).Read", Method, 0}, + {"(*Reader).Reset", Method, 3}, + {"(*Writer).Close", Method, 0}, + {"(*Writer).Flush", Method, 1}, + {"(*Writer).Reset", Method, 2}, + {"(*Writer).Write", Method, 0}, + {"BestCompression", Const, 0}, + {"BestSpeed", Const, 0}, + {"DefaultCompression", Const, 0}, + {"ErrChecksum", Var, 0}, + {"ErrHeader", Var, 0}, + {"Header", Type, 0}, + {"Header.Comment", Field, 0}, + {"Header.Extra", Field, 0}, + {"Header.ModTime", Field, 0}, + {"Header.Name", Field, 0}, + {"Header.OS", Field, 0}, + {"HuffmanOnly", Const, 8}, + {"NewReader", Func, 0}, + {"NewWriter", Func, 0}, + {"NewWriterLevel", Func, 0}, + {"NoCompression", Const, 0}, + {"Reader", Type, 0}, + {"Reader.Header", Field, 0}, + {"Writer", Type, 0}, + {"Writer.Header", Field, 0}, + }, + "compress/lzw": { + {"(*Reader).Close", Method, 17}, + {"(*Reader).Read", Method, 17}, + {"(*Reader).Reset", Method, 17}, + {"(*Writer).Close", Method, 17}, + {"(*Writer).Reset", Method, 17}, + {"(*Writer).Write", Method, 17}, + {"LSB", Const, 0}, + {"MSB", Const, 0}, + {"NewReader", Func, 0}, + {"NewWriter", Func, 0}, + {"Order", Type, 0}, + {"Reader", Type, 17}, + {"Writer", Type, 17}, + }, + "compress/zlib": { + {"(*Writer).Close", Method, 0}, + {"(*Writer).Flush", Method, 0}, + {"(*Writer).Reset", Method, 2}, + {"(*Writer).Write", Method, 0}, + {"BestCompression", Const, 0}, + {"BestSpeed", Const, 0}, + {"DefaultCompression", Const, 0}, + {"ErrChecksum", Var, 0}, + {"ErrDictionary", Var, 0}, + {"ErrHeader", Var, 0}, + {"HuffmanOnly", Const, 8}, + {"NewReader", Func, 0}, + {"NewReaderDict", Func, 0}, + {"NewWriter", Func, 0}, + {"NewWriterLevel", Func, 0}, + {"NewWriterLevelDict", Func, 0}, + {"NoCompression", Const, 0}, + {"Resetter", Type, 4}, + {"Writer", Type, 0}, + }, + "container/heap": { + {"Fix", Func, 2}, + {"Init", Func, 0}, + {"Interface", Type, 0}, + {"Pop", Func, 0}, + {"Push", Func, 0}, + {"Remove", Func, 0}, + }, + "container/list": { + {"(*Element).Next", Method, 0}, + {"(*Element).Prev", Method, 0}, + {"(*List).Back", Method, 0}, + {"(*List).Front", Method, 0}, + {"(*List).Init", Method, 0}, + {"(*List).InsertAfter", Method, 0}, + {"(*List).InsertBefore", Method, 0}, + {"(*List).Len", Method, 0}, + {"(*List).MoveAfter", Method, 2}, + {"(*List).MoveBefore", Method, 2}, + {"(*List).MoveToBack", Method, 0}, + {"(*List).MoveToFront", Method, 0}, + {"(*List).PushBack", Method, 0}, + {"(*List).PushBackList", Method, 0}, + {"(*List).PushFront", Method, 0}, + {"(*List).PushFrontList", Method, 0}, + {"(*List).Remove", Method, 0}, + {"Element", Type, 0}, + {"Element.Value", Field, 0}, + {"List", Type, 0}, + {"New", Func, 0}, + }, + "container/ring": { + {"(*Ring).Do", Method, 0}, + {"(*Ring).Len", Method, 0}, + {"(*Ring).Link", Method, 0}, + {"(*Ring).Move", Method, 0}, + {"(*Ring).Next", Method, 0}, + {"(*Ring).Prev", Method, 0}, + {"(*Ring).Unlink", Method, 0}, + {"New", Func, 0}, + {"Ring", Type, 0}, + {"Ring.Value", Field, 0}, + }, + "context": { + {"AfterFunc", Func, 21}, + {"Background", Func, 7}, + {"CancelCauseFunc", Type, 20}, + {"CancelFunc", Type, 7}, + {"Canceled", Var, 7}, + {"Cause", Func, 20}, + {"Context", Type, 7}, + {"DeadlineExceeded", Var, 7}, + {"TODO", Func, 7}, + {"WithCancel", Func, 7}, + {"WithCancelCause", Func, 20}, + {"WithDeadline", Func, 7}, + {"WithDeadlineCause", Func, 21}, + {"WithTimeout", Func, 7}, + {"WithTimeoutCause", Func, 21}, + {"WithValue", Func, 7}, + {"WithoutCancel", Func, 21}, + }, + "crypto": { + {"(Hash).Available", Method, 0}, + {"(Hash).HashFunc", Method, 4}, + {"(Hash).New", Method, 0}, + {"(Hash).Size", Method, 0}, + {"(Hash).String", Method, 15}, + {"BLAKE2b_256", Const, 9}, + {"BLAKE2b_384", Const, 9}, + {"BLAKE2b_512", Const, 9}, + {"BLAKE2s_256", Const, 9}, + {"Decrypter", Type, 5}, + {"DecrypterOpts", Type, 5}, + {"Hash", Type, 0}, + {"MD4", Const, 0}, + {"MD5", Const, 0}, + {"MD5SHA1", Const, 0}, + {"PrivateKey", Type, 0}, + {"PublicKey", Type, 2}, + {"RIPEMD160", Const, 0}, + {"RegisterHash", Func, 0}, + {"SHA1", Const, 0}, + {"SHA224", Const, 0}, + {"SHA256", Const, 0}, + {"SHA384", Const, 0}, + {"SHA3_224", Const, 4}, + {"SHA3_256", Const, 4}, + {"SHA3_384", Const, 4}, + {"SHA3_512", Const, 4}, + {"SHA512", Const, 0}, + {"SHA512_224", Const, 5}, + {"SHA512_256", Const, 5}, + {"Signer", Type, 4}, + {"SignerOpts", Type, 4}, + }, + "crypto/aes": { + {"(KeySizeError).Error", Method, 0}, + {"BlockSize", Const, 0}, + {"KeySizeError", Type, 0}, + {"NewCipher", Func, 0}, + }, + "crypto/cipher": { + {"(StreamReader).Read", Method, 0}, + {"(StreamWriter).Close", Method, 0}, + {"(StreamWriter).Write", Method, 0}, + {"AEAD", Type, 2}, + {"Block", Type, 0}, + {"BlockMode", Type, 0}, + {"NewCBCDecrypter", Func, 0}, + {"NewCBCEncrypter", Func, 0}, + {"NewCFBDecrypter", Func, 0}, + {"NewCFBEncrypter", Func, 0}, + {"NewCTR", Func, 0}, + {"NewGCM", Func, 2}, + {"NewGCMWithNonceSize", Func, 5}, + {"NewGCMWithTagSize", Func, 11}, + {"NewOFB", Func, 0}, + {"Stream", Type, 0}, + {"StreamReader", Type, 0}, + {"StreamReader.R", Field, 0}, + {"StreamReader.S", Field, 0}, + {"StreamWriter", Type, 0}, + {"StreamWriter.Err", Field, 0}, + {"StreamWriter.S", Field, 0}, + {"StreamWriter.W", Field, 0}, + }, + "crypto/des": { + {"(KeySizeError).Error", Method, 0}, + {"BlockSize", Const, 0}, + {"KeySizeError", Type, 0}, + {"NewCipher", Func, 0}, + {"NewTripleDESCipher", Func, 0}, + }, + "crypto/dsa": { + {"ErrInvalidPublicKey", Var, 0}, + {"GenerateKey", Func, 0}, + {"GenerateParameters", Func, 0}, + {"L1024N160", Const, 0}, + {"L2048N224", Const, 0}, + {"L2048N256", Const, 0}, + {"L3072N256", Const, 0}, + {"ParameterSizes", Type, 0}, + {"Parameters", Type, 0}, + {"Parameters.G", Field, 0}, + {"Parameters.P", Field, 0}, + {"Parameters.Q", Field, 0}, + {"PrivateKey", Type, 0}, + {"PrivateKey.PublicKey", Field, 0}, + {"PrivateKey.X", Field, 0}, + {"PublicKey", Type, 0}, + {"PublicKey.Parameters", Field, 0}, + {"PublicKey.Y", Field, 0}, + {"Sign", Func, 0}, + {"Verify", Func, 0}, + }, + "crypto/ecdh": { + {"(*PrivateKey).Bytes", Method, 20}, + {"(*PrivateKey).Curve", Method, 20}, + {"(*PrivateKey).ECDH", Method, 20}, + {"(*PrivateKey).Equal", Method, 20}, + {"(*PrivateKey).Public", Method, 20}, + {"(*PrivateKey).PublicKey", Method, 20}, + {"(*PublicKey).Bytes", Method, 20}, + {"(*PublicKey).Curve", Method, 20}, + {"(*PublicKey).Equal", Method, 20}, + {"Curve", Type, 20}, + {"P256", Func, 20}, + {"P384", Func, 20}, + {"P521", Func, 20}, + {"PrivateKey", Type, 20}, + {"PublicKey", Type, 20}, + {"X25519", Func, 20}, + }, + "crypto/ecdsa": { + {"(*PrivateKey).ECDH", Method, 20}, + {"(*PrivateKey).Equal", Method, 15}, + {"(*PrivateKey).Public", Method, 4}, + {"(*PrivateKey).Sign", Method, 4}, + {"(*PublicKey).ECDH", Method, 20}, + {"(*PublicKey).Equal", Method, 15}, + {"(PrivateKey).Add", Method, 0}, + {"(PrivateKey).Double", Method, 0}, + {"(PrivateKey).IsOnCurve", Method, 0}, + {"(PrivateKey).Params", Method, 0}, + {"(PrivateKey).ScalarBaseMult", Method, 0}, + {"(PrivateKey).ScalarMult", Method, 0}, + {"(PublicKey).Add", Method, 0}, + {"(PublicKey).Double", Method, 0}, + {"(PublicKey).IsOnCurve", Method, 0}, + {"(PublicKey).Params", Method, 0}, + {"(PublicKey).ScalarBaseMult", Method, 0}, + {"(PublicKey).ScalarMult", Method, 0}, + {"GenerateKey", Func, 0}, + {"PrivateKey", Type, 0}, + {"PrivateKey.D", Field, 0}, + {"PrivateKey.PublicKey", Field, 0}, + {"PublicKey", Type, 0}, + {"PublicKey.Curve", Field, 0}, + {"PublicKey.X", Field, 0}, + {"PublicKey.Y", Field, 0}, + {"Sign", Func, 0}, + {"SignASN1", Func, 15}, + {"Verify", Func, 0}, + {"VerifyASN1", Func, 15}, + }, + "crypto/ed25519": { + {"(*Options).HashFunc", Method, 20}, + {"(PrivateKey).Equal", Method, 15}, + {"(PrivateKey).Public", Method, 13}, + {"(PrivateKey).Seed", Method, 13}, + {"(PrivateKey).Sign", Method, 13}, + {"(PublicKey).Equal", Method, 15}, + {"GenerateKey", Func, 13}, + {"NewKeyFromSeed", Func, 13}, + {"Options", Type, 20}, + {"Options.Context", Field, 20}, + {"Options.Hash", Field, 20}, + {"PrivateKey", Type, 13}, + {"PrivateKeySize", Const, 13}, + {"PublicKey", Type, 13}, + {"PublicKeySize", Const, 13}, + {"SeedSize", Const, 13}, + {"Sign", Func, 13}, + {"SignatureSize", Const, 13}, + {"Verify", Func, 13}, + {"VerifyWithOptions", Func, 20}, + }, + "crypto/elliptic": { + {"(*CurveParams).Add", Method, 0}, + {"(*CurveParams).Double", Method, 0}, + {"(*CurveParams).IsOnCurve", Method, 0}, + {"(*CurveParams).Params", Method, 0}, + {"(*CurveParams).ScalarBaseMult", Method, 0}, + {"(*CurveParams).ScalarMult", Method, 0}, + {"Curve", Type, 0}, + {"CurveParams", Type, 0}, + {"CurveParams.B", Field, 0}, + {"CurveParams.BitSize", Field, 0}, + {"CurveParams.Gx", Field, 0}, + {"CurveParams.Gy", Field, 0}, + {"CurveParams.N", Field, 0}, + {"CurveParams.Name", Field, 5}, + {"CurveParams.P", Field, 0}, + {"GenerateKey", Func, 0}, + {"Marshal", Func, 0}, + {"MarshalCompressed", Func, 15}, + {"P224", Func, 0}, + {"P256", Func, 0}, + {"P384", Func, 0}, + {"P521", Func, 0}, + {"Unmarshal", Func, 0}, + {"UnmarshalCompressed", Func, 15}, + }, + "crypto/hmac": { + {"Equal", Func, 1}, + {"New", Func, 0}, + }, + "crypto/md5": { + {"BlockSize", Const, 0}, + {"New", Func, 0}, + {"Size", Const, 0}, + {"Sum", Func, 2}, + }, + "crypto/rand": { + {"Int", Func, 0}, + {"Prime", Func, 0}, + {"Read", Func, 0}, + {"Reader", Var, 0}, + }, + "crypto/rc4": { + {"(*Cipher).Reset", Method, 0}, + {"(*Cipher).XORKeyStream", Method, 0}, + {"(KeySizeError).Error", Method, 0}, + {"Cipher", Type, 0}, + {"KeySizeError", Type, 0}, + {"NewCipher", Func, 0}, + }, + "crypto/rsa": { + {"(*PSSOptions).HashFunc", Method, 4}, + {"(*PrivateKey).Decrypt", Method, 5}, + {"(*PrivateKey).Equal", Method, 15}, + {"(*PrivateKey).Precompute", Method, 0}, + {"(*PrivateKey).Public", Method, 4}, + {"(*PrivateKey).Sign", Method, 4}, + {"(*PrivateKey).Size", Method, 11}, + {"(*PrivateKey).Validate", Method, 0}, + {"(*PublicKey).Equal", Method, 15}, + {"(*PublicKey).Size", Method, 11}, + {"CRTValue", Type, 0}, + {"CRTValue.Coeff", Field, 0}, + {"CRTValue.Exp", Field, 0}, + {"CRTValue.R", Field, 0}, + {"DecryptOAEP", Func, 0}, + {"DecryptPKCS1v15", Func, 0}, + {"DecryptPKCS1v15SessionKey", Func, 0}, + {"EncryptOAEP", Func, 0}, + {"EncryptPKCS1v15", Func, 0}, + {"ErrDecryption", Var, 0}, + {"ErrMessageTooLong", Var, 0}, + {"ErrVerification", Var, 0}, + {"GenerateKey", Func, 0}, + {"GenerateMultiPrimeKey", Func, 0}, + {"OAEPOptions", Type, 5}, + {"OAEPOptions.Hash", Field, 5}, + {"OAEPOptions.Label", Field, 5}, + {"OAEPOptions.MGFHash", Field, 20}, + {"PKCS1v15DecryptOptions", Type, 5}, + {"PKCS1v15DecryptOptions.SessionKeyLen", Field, 5}, + {"PSSOptions", Type, 2}, + {"PSSOptions.Hash", Field, 4}, + {"PSSOptions.SaltLength", Field, 2}, + {"PSSSaltLengthAuto", Const, 2}, + {"PSSSaltLengthEqualsHash", Const, 2}, + {"PrecomputedValues", Type, 0}, + {"PrecomputedValues.CRTValues", Field, 0}, + {"PrecomputedValues.Dp", Field, 0}, + {"PrecomputedValues.Dq", Field, 0}, + {"PrecomputedValues.Qinv", Field, 0}, + {"PrivateKey", Type, 0}, + {"PrivateKey.D", Field, 0}, + {"PrivateKey.Precomputed", Field, 0}, + {"PrivateKey.Primes", Field, 0}, + {"PrivateKey.PublicKey", Field, 0}, + {"PublicKey", Type, 0}, + {"PublicKey.E", Field, 0}, + {"PublicKey.N", Field, 0}, + {"SignPKCS1v15", Func, 0}, + {"SignPSS", Func, 2}, + {"VerifyPKCS1v15", Func, 0}, + {"VerifyPSS", Func, 2}, + }, + "crypto/sha1": { + {"BlockSize", Const, 0}, + {"New", Func, 0}, + {"Size", Const, 0}, + {"Sum", Func, 2}, + }, + "crypto/sha256": { + {"BlockSize", Const, 0}, + {"New", Func, 0}, + {"New224", Func, 0}, + {"Size", Const, 0}, + {"Size224", Const, 0}, + {"Sum224", Func, 2}, + {"Sum256", Func, 2}, + }, + "crypto/sha512": { + {"BlockSize", Const, 0}, + {"New", Func, 0}, + {"New384", Func, 0}, + {"New512_224", Func, 5}, + {"New512_256", Func, 5}, + {"Size", Const, 0}, + {"Size224", Const, 5}, + {"Size256", Const, 5}, + {"Size384", Const, 0}, + {"Sum384", Func, 2}, + {"Sum512", Func, 2}, + {"Sum512_224", Func, 5}, + {"Sum512_256", Func, 5}, + }, + "crypto/subtle": { + {"ConstantTimeByteEq", Func, 0}, + {"ConstantTimeCompare", Func, 0}, + {"ConstantTimeCopy", Func, 0}, + {"ConstantTimeEq", Func, 0}, + {"ConstantTimeLessOrEq", Func, 2}, + {"ConstantTimeSelect", Func, 0}, + {"XORBytes", Func, 20}, + }, + "crypto/tls": { + {"(*CertificateRequestInfo).Context", Method, 17}, + {"(*CertificateRequestInfo).SupportsCertificate", Method, 14}, + {"(*CertificateVerificationError).Error", Method, 20}, + {"(*CertificateVerificationError).Unwrap", Method, 20}, + {"(*ClientHelloInfo).Context", Method, 17}, + {"(*ClientHelloInfo).SupportsCertificate", Method, 14}, + {"(*ClientSessionState).ResumptionState", Method, 21}, + {"(*Config).BuildNameToCertificate", Method, 0}, + {"(*Config).Clone", Method, 8}, + {"(*Config).DecryptTicket", Method, 21}, + {"(*Config).EncryptTicket", Method, 21}, + {"(*Config).SetSessionTicketKeys", Method, 5}, + {"(*Conn).Close", Method, 0}, + {"(*Conn).CloseWrite", Method, 8}, + {"(*Conn).ConnectionState", Method, 0}, + {"(*Conn).Handshake", Method, 0}, + {"(*Conn).HandshakeContext", Method, 17}, + {"(*Conn).LocalAddr", Method, 0}, + {"(*Conn).NetConn", Method, 18}, + {"(*Conn).OCSPResponse", Method, 0}, + {"(*Conn).Read", Method, 0}, + {"(*Conn).RemoteAddr", Method, 0}, + {"(*Conn).SetDeadline", Method, 0}, + {"(*Conn).SetReadDeadline", Method, 0}, + {"(*Conn).SetWriteDeadline", Method, 0}, + {"(*Conn).VerifyHostname", Method, 0}, + {"(*Conn).Write", Method, 0}, + {"(*ConnectionState).ExportKeyingMaterial", Method, 11}, + {"(*Dialer).Dial", Method, 15}, + {"(*Dialer).DialContext", Method, 15}, + {"(*QUICConn).Close", Method, 21}, + {"(*QUICConn).ConnectionState", Method, 21}, + {"(*QUICConn).HandleData", Method, 21}, + {"(*QUICConn).NextEvent", Method, 21}, + {"(*QUICConn).SendSessionTicket", Method, 21}, + {"(*QUICConn).SetTransportParameters", Method, 21}, + {"(*QUICConn).Start", Method, 21}, + {"(*SessionState).Bytes", Method, 21}, + {"(AlertError).Error", Method, 21}, + {"(ClientAuthType).String", Method, 15}, + {"(CurveID).String", Method, 15}, + {"(QUICEncryptionLevel).String", Method, 21}, + {"(RecordHeaderError).Error", Method, 6}, + {"(SignatureScheme).String", Method, 15}, + {"AlertError", Type, 21}, + {"Certificate", Type, 0}, + {"Certificate.Certificate", Field, 0}, + {"Certificate.Leaf", Field, 0}, + {"Certificate.OCSPStaple", Field, 0}, + {"Certificate.PrivateKey", Field, 0}, + {"Certificate.SignedCertificateTimestamps", Field, 5}, + {"Certificate.SupportedSignatureAlgorithms", Field, 14}, + {"CertificateRequestInfo", Type, 8}, + {"CertificateRequestInfo.AcceptableCAs", Field, 8}, + {"CertificateRequestInfo.SignatureSchemes", Field, 8}, + {"CertificateRequestInfo.Version", Field, 14}, + {"CertificateVerificationError", Type, 20}, + {"CertificateVerificationError.Err", Field, 20}, + {"CertificateVerificationError.UnverifiedCertificates", Field, 20}, + {"CipherSuite", Type, 14}, + {"CipherSuite.ID", Field, 14}, + {"CipherSuite.Insecure", Field, 14}, + {"CipherSuite.Name", Field, 14}, + {"CipherSuite.SupportedVersions", Field, 14}, + {"CipherSuiteName", Func, 14}, + {"CipherSuites", Func, 14}, + {"Client", Func, 0}, + {"ClientAuthType", Type, 0}, + {"ClientHelloInfo", Type, 4}, + {"ClientHelloInfo.CipherSuites", Field, 4}, + {"ClientHelloInfo.Conn", Field, 8}, + {"ClientHelloInfo.ServerName", Field, 4}, + {"ClientHelloInfo.SignatureSchemes", Field, 8}, + {"ClientHelloInfo.SupportedCurves", Field, 4}, + {"ClientHelloInfo.SupportedPoints", Field, 4}, + {"ClientHelloInfo.SupportedProtos", Field, 8}, + {"ClientHelloInfo.SupportedVersions", Field, 8}, + {"ClientSessionCache", Type, 3}, + {"ClientSessionState", Type, 3}, + {"Config", Type, 0}, + {"Config.Certificates", Field, 0}, + {"Config.CipherSuites", Field, 0}, + {"Config.ClientAuth", Field, 0}, + {"Config.ClientCAs", Field, 0}, + {"Config.ClientSessionCache", Field, 3}, + {"Config.CurvePreferences", Field, 3}, + {"Config.DynamicRecordSizingDisabled", Field, 7}, + {"Config.GetCertificate", Field, 4}, + {"Config.GetClientCertificate", Field, 8}, + {"Config.GetConfigForClient", Field, 8}, + {"Config.InsecureSkipVerify", Field, 0}, + {"Config.KeyLogWriter", Field, 8}, + {"Config.MaxVersion", Field, 2}, + {"Config.MinVersion", Field, 2}, + {"Config.NameToCertificate", Field, 0}, + {"Config.NextProtos", Field, 0}, + {"Config.PreferServerCipherSuites", Field, 1}, + {"Config.Rand", Field, 0}, + {"Config.Renegotiation", Field, 7}, + {"Config.RootCAs", Field, 0}, + {"Config.ServerName", Field, 0}, + {"Config.SessionTicketKey", Field, 1}, + {"Config.SessionTicketsDisabled", Field, 1}, + {"Config.Time", Field, 0}, + {"Config.UnwrapSession", Field, 21}, + {"Config.VerifyConnection", Field, 15}, + {"Config.VerifyPeerCertificate", Field, 8}, + {"Config.WrapSession", Field, 21}, + {"Conn", Type, 0}, + {"ConnectionState", Type, 0}, + {"ConnectionState.CipherSuite", Field, 0}, + {"ConnectionState.DidResume", Field, 1}, + {"ConnectionState.HandshakeComplete", Field, 0}, + {"ConnectionState.NegotiatedProtocol", Field, 0}, + {"ConnectionState.NegotiatedProtocolIsMutual", Field, 0}, + {"ConnectionState.OCSPResponse", Field, 5}, + {"ConnectionState.PeerCertificates", Field, 0}, + {"ConnectionState.ServerName", Field, 0}, + {"ConnectionState.SignedCertificateTimestamps", Field, 5}, + {"ConnectionState.TLSUnique", Field, 4}, + {"ConnectionState.VerifiedChains", Field, 0}, + {"ConnectionState.Version", Field, 3}, + {"CurveID", Type, 3}, + {"CurveP256", Const, 3}, + {"CurveP384", Const, 3}, + {"CurveP521", Const, 3}, + {"Dial", Func, 0}, + {"DialWithDialer", Func, 3}, + {"Dialer", Type, 15}, + {"Dialer.Config", Field, 15}, + {"Dialer.NetDialer", Field, 15}, + {"ECDSAWithP256AndSHA256", Const, 8}, + {"ECDSAWithP384AndSHA384", Const, 8}, + {"ECDSAWithP521AndSHA512", Const, 8}, + {"ECDSAWithSHA1", Const, 10}, + {"Ed25519", Const, 13}, + {"InsecureCipherSuites", Func, 14}, + {"Listen", Func, 0}, + {"LoadX509KeyPair", Func, 0}, + {"NewLRUClientSessionCache", Func, 3}, + {"NewListener", Func, 0}, + {"NewResumptionState", Func, 21}, + {"NoClientCert", Const, 0}, + {"PKCS1WithSHA1", Const, 8}, + {"PKCS1WithSHA256", Const, 8}, + {"PKCS1WithSHA384", Const, 8}, + {"PKCS1WithSHA512", Const, 8}, + {"PSSWithSHA256", Const, 8}, + {"PSSWithSHA384", Const, 8}, + {"PSSWithSHA512", Const, 8}, + {"ParseSessionState", Func, 21}, + {"QUICClient", Func, 21}, + {"QUICConfig", Type, 21}, + {"QUICConfig.TLSConfig", Field, 21}, + {"QUICConn", Type, 21}, + {"QUICEncryptionLevel", Type, 21}, + {"QUICEncryptionLevelApplication", Const, 21}, + {"QUICEncryptionLevelEarly", Const, 21}, + {"QUICEncryptionLevelHandshake", Const, 21}, + {"QUICEncryptionLevelInitial", Const, 21}, + {"QUICEvent", Type, 21}, + {"QUICEvent.Data", Field, 21}, + {"QUICEvent.Kind", Field, 21}, + {"QUICEvent.Level", Field, 21}, + {"QUICEvent.Suite", Field, 21}, + {"QUICEventKind", Type, 21}, + {"QUICHandshakeDone", Const, 21}, + {"QUICNoEvent", Const, 21}, + {"QUICRejectedEarlyData", Const, 21}, + {"QUICServer", Func, 21}, + {"QUICSessionTicketOptions", Type, 21}, + {"QUICSessionTicketOptions.EarlyData", Field, 21}, + {"QUICSetReadSecret", Const, 21}, + {"QUICSetWriteSecret", Const, 21}, + {"QUICTransportParameters", Const, 21}, + {"QUICTransportParametersRequired", Const, 21}, + {"QUICWriteData", Const, 21}, + {"RecordHeaderError", Type, 6}, + {"RecordHeaderError.Conn", Field, 12}, + {"RecordHeaderError.Msg", Field, 6}, + {"RecordHeaderError.RecordHeader", Field, 6}, + {"RenegotiateFreelyAsClient", Const, 7}, + {"RenegotiateNever", Const, 7}, + {"RenegotiateOnceAsClient", Const, 7}, + {"RenegotiationSupport", Type, 7}, + {"RequestClientCert", Const, 0}, + {"RequireAndVerifyClientCert", Const, 0}, + {"RequireAnyClientCert", Const, 0}, + {"Server", Func, 0}, + {"SessionState", Type, 21}, + {"SessionState.EarlyData", Field, 21}, + {"SessionState.Extra", Field, 21}, + {"SignatureScheme", Type, 8}, + {"TLS_AES_128_GCM_SHA256", Const, 12}, + {"TLS_AES_256_GCM_SHA384", Const, 12}, + {"TLS_CHACHA20_POLY1305_SHA256", Const, 12}, + {"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", Const, 2}, + {"TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", Const, 8}, + {"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", Const, 2}, + {"TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", Const, 2}, + {"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", Const, 5}, + {"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", Const, 8}, + {"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", Const, 14}, + {"TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", Const, 2}, + {"TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", Const, 0}, + {"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", Const, 0}, + {"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", Const, 8}, + {"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", Const, 2}, + {"TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", Const, 1}, + {"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", Const, 5}, + {"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", Const, 8}, + {"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", Const, 14}, + {"TLS_ECDHE_RSA_WITH_RC4_128_SHA", Const, 0}, + {"TLS_FALLBACK_SCSV", Const, 4}, + {"TLS_RSA_WITH_3DES_EDE_CBC_SHA", Const, 0}, + {"TLS_RSA_WITH_AES_128_CBC_SHA", Const, 0}, + {"TLS_RSA_WITH_AES_128_CBC_SHA256", Const, 8}, + {"TLS_RSA_WITH_AES_128_GCM_SHA256", Const, 6}, + {"TLS_RSA_WITH_AES_256_CBC_SHA", Const, 1}, + {"TLS_RSA_WITH_AES_256_GCM_SHA384", Const, 6}, + {"TLS_RSA_WITH_RC4_128_SHA", Const, 0}, + {"VerifyClientCertIfGiven", Const, 0}, + {"VersionName", Func, 21}, + {"VersionSSL30", Const, 2}, + {"VersionTLS10", Const, 2}, + {"VersionTLS11", Const, 2}, + {"VersionTLS12", Const, 2}, + {"VersionTLS13", Const, 12}, + {"X25519", Const, 8}, + {"X509KeyPair", Func, 0}, + }, + "crypto/x509": { + {"(*CertPool).AddCert", Method, 0}, + {"(*CertPool).AddCertWithConstraint", Method, 22}, + {"(*CertPool).AppendCertsFromPEM", Method, 0}, + {"(*CertPool).Clone", Method, 19}, + {"(*CertPool).Equal", Method, 19}, + {"(*CertPool).Subjects", Method, 0}, + {"(*Certificate).CheckCRLSignature", Method, 0}, + {"(*Certificate).CheckSignature", Method, 0}, + {"(*Certificate).CheckSignatureFrom", Method, 0}, + {"(*Certificate).CreateCRL", Method, 0}, + {"(*Certificate).Equal", Method, 0}, + {"(*Certificate).Verify", Method, 0}, + {"(*Certificate).VerifyHostname", Method, 0}, + {"(*CertificateRequest).CheckSignature", Method, 5}, + {"(*RevocationList).CheckSignatureFrom", Method, 19}, + {"(CertificateInvalidError).Error", Method, 0}, + {"(ConstraintViolationError).Error", Method, 0}, + {"(HostnameError).Error", Method, 0}, + {"(InsecureAlgorithmError).Error", Method, 6}, + {"(OID).Equal", Method, 22}, + {"(OID).EqualASN1OID", Method, 22}, + {"(OID).String", Method, 22}, + {"(PublicKeyAlgorithm).String", Method, 10}, + {"(SignatureAlgorithm).String", Method, 6}, + {"(SystemRootsError).Error", Method, 1}, + {"(SystemRootsError).Unwrap", Method, 16}, + {"(UnhandledCriticalExtension).Error", Method, 0}, + {"(UnknownAuthorityError).Error", Method, 0}, + {"CANotAuthorizedForExtKeyUsage", Const, 10}, + {"CANotAuthorizedForThisName", Const, 0}, + {"CertPool", Type, 0}, + {"Certificate", Type, 0}, + {"Certificate.AuthorityKeyId", Field, 0}, + {"Certificate.BasicConstraintsValid", Field, 0}, + {"Certificate.CRLDistributionPoints", Field, 2}, + {"Certificate.DNSNames", Field, 0}, + {"Certificate.EmailAddresses", Field, 0}, + {"Certificate.ExcludedDNSDomains", Field, 9}, + {"Certificate.ExcludedEmailAddresses", Field, 10}, + {"Certificate.ExcludedIPRanges", Field, 10}, + {"Certificate.ExcludedURIDomains", Field, 10}, + {"Certificate.ExtKeyUsage", Field, 0}, + {"Certificate.Extensions", Field, 2}, + {"Certificate.ExtraExtensions", Field, 2}, + {"Certificate.IPAddresses", Field, 1}, + {"Certificate.IsCA", Field, 0}, + {"Certificate.Issuer", Field, 0}, + {"Certificate.IssuingCertificateURL", Field, 2}, + {"Certificate.KeyUsage", Field, 0}, + {"Certificate.MaxPathLen", Field, 0}, + {"Certificate.MaxPathLenZero", Field, 4}, + {"Certificate.NotAfter", Field, 0}, + {"Certificate.NotBefore", Field, 0}, + {"Certificate.OCSPServer", Field, 2}, + {"Certificate.PermittedDNSDomains", Field, 0}, + {"Certificate.PermittedDNSDomainsCritical", Field, 0}, + {"Certificate.PermittedEmailAddresses", Field, 10}, + {"Certificate.PermittedIPRanges", Field, 10}, + {"Certificate.PermittedURIDomains", Field, 10}, + {"Certificate.Policies", Field, 22}, + {"Certificate.PolicyIdentifiers", Field, 0}, + {"Certificate.PublicKey", Field, 0}, + {"Certificate.PublicKeyAlgorithm", Field, 0}, + {"Certificate.Raw", Field, 0}, + {"Certificate.RawIssuer", Field, 0}, + {"Certificate.RawSubject", Field, 0}, + {"Certificate.RawSubjectPublicKeyInfo", Field, 0}, + {"Certificate.RawTBSCertificate", Field, 0}, + {"Certificate.SerialNumber", Field, 0}, + {"Certificate.Signature", Field, 0}, + {"Certificate.SignatureAlgorithm", Field, 0}, + {"Certificate.Subject", Field, 0}, + {"Certificate.SubjectKeyId", Field, 0}, + {"Certificate.URIs", Field, 10}, + {"Certificate.UnhandledCriticalExtensions", Field, 5}, + {"Certificate.UnknownExtKeyUsage", Field, 0}, + {"Certificate.Version", Field, 0}, + {"CertificateInvalidError", Type, 0}, + {"CertificateInvalidError.Cert", Field, 0}, + {"CertificateInvalidError.Detail", Field, 10}, + {"CertificateInvalidError.Reason", Field, 0}, + {"CertificateRequest", Type, 3}, + {"CertificateRequest.Attributes", Field, 3}, + {"CertificateRequest.DNSNames", Field, 3}, + {"CertificateRequest.EmailAddresses", Field, 3}, + {"CertificateRequest.Extensions", Field, 3}, + {"CertificateRequest.ExtraExtensions", Field, 3}, + {"CertificateRequest.IPAddresses", Field, 3}, + {"CertificateRequest.PublicKey", Field, 3}, + {"CertificateRequest.PublicKeyAlgorithm", Field, 3}, + {"CertificateRequest.Raw", Field, 3}, + {"CertificateRequest.RawSubject", Field, 3}, + {"CertificateRequest.RawSubjectPublicKeyInfo", Field, 3}, + {"CertificateRequest.RawTBSCertificateRequest", Field, 3}, + {"CertificateRequest.Signature", Field, 3}, + {"CertificateRequest.SignatureAlgorithm", Field, 3}, + {"CertificateRequest.Subject", Field, 3}, + {"CertificateRequest.URIs", Field, 10}, + {"CertificateRequest.Version", Field, 3}, + {"ConstraintViolationError", Type, 0}, + {"CreateCertificate", Func, 0}, + {"CreateCertificateRequest", Func, 3}, + {"CreateRevocationList", Func, 15}, + {"DSA", Const, 0}, + {"DSAWithSHA1", Const, 0}, + {"DSAWithSHA256", Const, 0}, + {"DecryptPEMBlock", Func, 1}, + {"ECDSA", Const, 1}, + {"ECDSAWithSHA1", Const, 1}, + {"ECDSAWithSHA256", Const, 1}, + {"ECDSAWithSHA384", Const, 1}, + {"ECDSAWithSHA512", Const, 1}, + {"Ed25519", Const, 13}, + {"EncryptPEMBlock", Func, 1}, + {"ErrUnsupportedAlgorithm", Var, 0}, + {"Expired", Const, 0}, + {"ExtKeyUsage", Type, 0}, + {"ExtKeyUsageAny", Const, 0}, + {"ExtKeyUsageClientAuth", Const, 0}, + {"ExtKeyUsageCodeSigning", Const, 0}, + {"ExtKeyUsageEmailProtection", Const, 0}, + {"ExtKeyUsageIPSECEndSystem", Const, 1}, + {"ExtKeyUsageIPSECTunnel", Const, 1}, + {"ExtKeyUsageIPSECUser", Const, 1}, + {"ExtKeyUsageMicrosoftCommercialCodeSigning", Const, 10}, + {"ExtKeyUsageMicrosoftKernelCodeSigning", Const, 10}, + {"ExtKeyUsageMicrosoftServerGatedCrypto", Const, 1}, + {"ExtKeyUsageNetscapeServerGatedCrypto", Const, 1}, + {"ExtKeyUsageOCSPSigning", Const, 0}, + {"ExtKeyUsageServerAuth", Const, 0}, + {"ExtKeyUsageTimeStamping", Const, 0}, + {"HostnameError", Type, 0}, + {"HostnameError.Certificate", Field, 0}, + {"HostnameError.Host", Field, 0}, + {"IncompatibleUsage", Const, 1}, + {"IncorrectPasswordError", Var, 1}, + {"InsecureAlgorithmError", Type, 6}, + {"InvalidReason", Type, 0}, + {"IsEncryptedPEMBlock", Func, 1}, + {"KeyUsage", Type, 0}, + {"KeyUsageCRLSign", Const, 0}, + {"KeyUsageCertSign", Const, 0}, + {"KeyUsageContentCommitment", Const, 0}, + {"KeyUsageDataEncipherment", Const, 0}, + {"KeyUsageDecipherOnly", Const, 0}, + {"KeyUsageDigitalSignature", Const, 0}, + {"KeyUsageEncipherOnly", Const, 0}, + {"KeyUsageKeyAgreement", Const, 0}, + {"KeyUsageKeyEncipherment", Const, 0}, + {"MD2WithRSA", Const, 0}, + {"MD5WithRSA", Const, 0}, + {"MarshalECPrivateKey", Func, 2}, + {"MarshalPKCS1PrivateKey", Func, 0}, + {"MarshalPKCS1PublicKey", Func, 10}, + {"MarshalPKCS8PrivateKey", Func, 10}, + {"MarshalPKIXPublicKey", Func, 0}, + {"NameConstraintsWithoutSANs", Const, 10}, + {"NameMismatch", Const, 8}, + {"NewCertPool", Func, 0}, + {"NotAuthorizedToSign", Const, 0}, + {"OID", Type, 22}, + {"OIDFromInts", Func, 22}, + {"PEMCipher", Type, 1}, + {"PEMCipher3DES", Const, 1}, + {"PEMCipherAES128", Const, 1}, + {"PEMCipherAES192", Const, 1}, + {"PEMCipherAES256", Const, 1}, + {"PEMCipherDES", Const, 1}, + {"ParseCRL", Func, 0}, + {"ParseCertificate", Func, 0}, + {"ParseCertificateRequest", Func, 3}, + {"ParseCertificates", Func, 0}, + {"ParseDERCRL", Func, 0}, + {"ParseECPrivateKey", Func, 1}, + {"ParsePKCS1PrivateKey", Func, 0}, + {"ParsePKCS1PublicKey", Func, 10}, + {"ParsePKCS8PrivateKey", Func, 0}, + {"ParsePKIXPublicKey", Func, 0}, + {"ParseRevocationList", Func, 19}, + {"PublicKeyAlgorithm", Type, 0}, + {"PureEd25519", Const, 13}, + {"RSA", Const, 0}, + {"RevocationList", Type, 15}, + {"RevocationList.AuthorityKeyId", Field, 19}, + {"RevocationList.Extensions", Field, 19}, + {"RevocationList.ExtraExtensions", Field, 15}, + {"RevocationList.Issuer", Field, 19}, + {"RevocationList.NextUpdate", Field, 15}, + {"RevocationList.Number", Field, 15}, + {"RevocationList.Raw", Field, 19}, + {"RevocationList.RawIssuer", Field, 19}, + {"RevocationList.RawTBSRevocationList", Field, 19}, + {"RevocationList.RevokedCertificateEntries", Field, 21}, + {"RevocationList.RevokedCertificates", Field, 15}, + {"RevocationList.Signature", Field, 19}, + {"RevocationList.SignatureAlgorithm", Field, 15}, + {"RevocationList.ThisUpdate", Field, 15}, + {"RevocationListEntry", Type, 21}, + {"RevocationListEntry.Extensions", Field, 21}, + {"RevocationListEntry.ExtraExtensions", Field, 21}, + {"RevocationListEntry.Raw", Field, 21}, + {"RevocationListEntry.ReasonCode", Field, 21}, + {"RevocationListEntry.RevocationTime", Field, 21}, + {"RevocationListEntry.SerialNumber", Field, 21}, + {"SHA1WithRSA", Const, 0}, + {"SHA256WithRSA", Const, 0}, + {"SHA256WithRSAPSS", Const, 8}, + {"SHA384WithRSA", Const, 0}, + {"SHA384WithRSAPSS", Const, 8}, + {"SHA512WithRSA", Const, 0}, + {"SHA512WithRSAPSS", Const, 8}, + {"SetFallbackRoots", Func, 20}, + {"SignatureAlgorithm", Type, 0}, + {"SystemCertPool", Func, 7}, + {"SystemRootsError", Type, 1}, + {"SystemRootsError.Err", Field, 7}, + {"TooManyConstraints", Const, 10}, + {"TooManyIntermediates", Const, 0}, + {"UnconstrainedName", Const, 10}, + {"UnhandledCriticalExtension", Type, 0}, + {"UnknownAuthorityError", Type, 0}, + {"UnknownAuthorityError.Cert", Field, 8}, + {"UnknownPublicKeyAlgorithm", Const, 0}, + {"UnknownSignatureAlgorithm", Const, 0}, + {"VerifyOptions", Type, 0}, + {"VerifyOptions.CurrentTime", Field, 0}, + {"VerifyOptions.DNSName", Field, 0}, + {"VerifyOptions.Intermediates", Field, 0}, + {"VerifyOptions.KeyUsages", Field, 1}, + {"VerifyOptions.MaxConstraintComparisions", Field, 10}, + {"VerifyOptions.Roots", Field, 0}, + }, + "crypto/x509/pkix": { + {"(*CertificateList).HasExpired", Method, 0}, + {"(*Name).FillFromRDNSequence", Method, 0}, + {"(Name).String", Method, 10}, + {"(Name).ToRDNSequence", Method, 0}, + {"(RDNSequence).String", Method, 10}, + {"AlgorithmIdentifier", Type, 0}, + {"AlgorithmIdentifier.Algorithm", Field, 0}, + {"AlgorithmIdentifier.Parameters", Field, 0}, + {"AttributeTypeAndValue", Type, 0}, + {"AttributeTypeAndValue.Type", Field, 0}, + {"AttributeTypeAndValue.Value", Field, 0}, + {"AttributeTypeAndValueSET", Type, 3}, + {"AttributeTypeAndValueSET.Type", Field, 3}, + {"AttributeTypeAndValueSET.Value", Field, 3}, + {"CertificateList", Type, 0}, + {"CertificateList.SignatureAlgorithm", Field, 0}, + {"CertificateList.SignatureValue", Field, 0}, + {"CertificateList.TBSCertList", Field, 0}, + {"Extension", Type, 0}, + {"Extension.Critical", Field, 0}, + {"Extension.Id", Field, 0}, + {"Extension.Value", Field, 0}, + {"Name", Type, 0}, + {"Name.CommonName", Field, 0}, + {"Name.Country", Field, 0}, + {"Name.ExtraNames", Field, 5}, + {"Name.Locality", Field, 0}, + {"Name.Names", Field, 0}, + {"Name.Organization", Field, 0}, + {"Name.OrganizationalUnit", Field, 0}, + {"Name.PostalCode", Field, 0}, + {"Name.Province", Field, 0}, + {"Name.SerialNumber", Field, 0}, + {"Name.StreetAddress", Field, 0}, + {"RDNSequence", Type, 0}, + {"RelativeDistinguishedNameSET", Type, 0}, + {"RevokedCertificate", Type, 0}, + {"RevokedCertificate.Extensions", Field, 0}, + {"RevokedCertificate.RevocationTime", Field, 0}, + {"RevokedCertificate.SerialNumber", Field, 0}, + {"TBSCertificateList", Type, 0}, + {"TBSCertificateList.Extensions", Field, 0}, + {"TBSCertificateList.Issuer", Field, 0}, + {"TBSCertificateList.NextUpdate", Field, 0}, + {"TBSCertificateList.Raw", Field, 0}, + {"TBSCertificateList.RevokedCertificates", Field, 0}, + {"TBSCertificateList.Signature", Field, 0}, + {"TBSCertificateList.ThisUpdate", Field, 0}, + {"TBSCertificateList.Version", Field, 0}, + }, + "database/sql": { + {"(*ColumnType).DatabaseTypeName", Method, 8}, + {"(*ColumnType).DecimalSize", Method, 8}, + {"(*ColumnType).Length", Method, 8}, + {"(*ColumnType).Name", Method, 8}, + {"(*ColumnType).Nullable", Method, 8}, + {"(*ColumnType).ScanType", Method, 8}, + {"(*Conn).BeginTx", Method, 9}, + {"(*Conn).Close", Method, 9}, + {"(*Conn).ExecContext", Method, 9}, + {"(*Conn).PingContext", Method, 9}, + {"(*Conn).PrepareContext", Method, 9}, + {"(*Conn).QueryContext", Method, 9}, + {"(*Conn).QueryRowContext", Method, 9}, + {"(*Conn).Raw", Method, 13}, + {"(*DB).Begin", Method, 0}, + {"(*DB).BeginTx", Method, 8}, + {"(*DB).Close", Method, 0}, + {"(*DB).Conn", Method, 9}, + {"(*DB).Driver", Method, 0}, + {"(*DB).Exec", Method, 0}, + {"(*DB).ExecContext", Method, 8}, + {"(*DB).Ping", Method, 1}, + {"(*DB).PingContext", Method, 8}, + {"(*DB).Prepare", Method, 0}, + {"(*DB).PrepareContext", Method, 8}, + {"(*DB).Query", Method, 0}, + {"(*DB).QueryContext", Method, 8}, + {"(*DB).QueryRow", Method, 0}, + {"(*DB).QueryRowContext", Method, 8}, + {"(*DB).SetConnMaxIdleTime", Method, 15}, + {"(*DB).SetConnMaxLifetime", Method, 6}, + {"(*DB).SetMaxIdleConns", Method, 1}, + {"(*DB).SetMaxOpenConns", Method, 2}, + {"(*DB).Stats", Method, 5}, + {"(*Null).Scan", Method, 22}, + {"(*NullBool).Scan", Method, 0}, + {"(*NullByte).Scan", Method, 17}, + {"(*NullFloat64).Scan", Method, 0}, + {"(*NullInt16).Scan", Method, 17}, + {"(*NullInt32).Scan", Method, 13}, + {"(*NullInt64).Scan", Method, 0}, + {"(*NullString).Scan", Method, 0}, + {"(*NullTime).Scan", Method, 13}, + {"(*Row).Err", Method, 15}, + {"(*Row).Scan", Method, 0}, + {"(*Rows).Close", Method, 0}, + {"(*Rows).ColumnTypes", Method, 8}, + {"(*Rows).Columns", Method, 0}, + {"(*Rows).Err", Method, 0}, + {"(*Rows).Next", Method, 0}, + {"(*Rows).NextResultSet", Method, 8}, + {"(*Rows).Scan", Method, 0}, + {"(*Stmt).Close", Method, 0}, + {"(*Stmt).Exec", Method, 0}, + {"(*Stmt).ExecContext", Method, 8}, + {"(*Stmt).Query", Method, 0}, + {"(*Stmt).QueryContext", Method, 8}, + {"(*Stmt).QueryRow", Method, 0}, + {"(*Stmt).QueryRowContext", Method, 8}, + {"(*Tx).Commit", Method, 0}, + {"(*Tx).Exec", Method, 0}, + {"(*Tx).ExecContext", Method, 8}, + {"(*Tx).Prepare", Method, 0}, + {"(*Tx).PrepareContext", Method, 8}, + {"(*Tx).Query", Method, 0}, + {"(*Tx).QueryContext", Method, 8}, + {"(*Tx).QueryRow", Method, 0}, + {"(*Tx).QueryRowContext", Method, 8}, + {"(*Tx).Rollback", Method, 0}, + {"(*Tx).Stmt", Method, 0}, + {"(*Tx).StmtContext", Method, 8}, + {"(IsolationLevel).String", Method, 11}, + {"(Null).Value", Method, 22}, + {"(NullBool).Value", Method, 0}, + {"(NullByte).Value", Method, 17}, + {"(NullFloat64).Value", Method, 0}, + {"(NullInt16).Value", Method, 17}, + {"(NullInt32).Value", Method, 13}, + {"(NullInt64).Value", Method, 0}, + {"(NullString).Value", Method, 0}, + {"(NullTime).Value", Method, 13}, + {"ColumnType", Type, 8}, + {"Conn", Type, 9}, + {"DB", Type, 0}, + {"DBStats", Type, 5}, + {"DBStats.Idle", Field, 11}, + {"DBStats.InUse", Field, 11}, + {"DBStats.MaxIdleClosed", Field, 11}, + {"DBStats.MaxIdleTimeClosed", Field, 15}, + {"DBStats.MaxLifetimeClosed", Field, 11}, + {"DBStats.MaxOpenConnections", Field, 11}, + {"DBStats.OpenConnections", Field, 5}, + {"DBStats.WaitCount", Field, 11}, + {"DBStats.WaitDuration", Field, 11}, + {"Drivers", Func, 4}, + {"ErrConnDone", Var, 9}, + {"ErrNoRows", Var, 0}, + {"ErrTxDone", Var, 0}, + {"IsolationLevel", Type, 8}, + {"LevelDefault", Const, 8}, + {"LevelLinearizable", Const, 8}, + {"LevelReadCommitted", Const, 8}, + {"LevelReadUncommitted", Const, 8}, + {"LevelRepeatableRead", Const, 8}, + {"LevelSerializable", Const, 8}, + {"LevelSnapshot", Const, 8}, + {"LevelWriteCommitted", Const, 8}, + {"Named", Func, 8}, + {"NamedArg", Type, 8}, + {"NamedArg.Name", Field, 8}, + {"NamedArg.Value", Field, 8}, + {"Null", Type, 22}, + {"Null.V", Field, 22}, + {"Null.Valid", Field, 22}, + {"NullBool", Type, 0}, + {"NullBool.Bool", Field, 0}, + {"NullBool.Valid", Field, 0}, + {"NullByte", Type, 17}, + {"NullByte.Byte", Field, 17}, + {"NullByte.Valid", Field, 17}, + {"NullFloat64", Type, 0}, + {"NullFloat64.Float64", Field, 0}, + {"NullFloat64.Valid", Field, 0}, + {"NullInt16", Type, 17}, + {"NullInt16.Int16", Field, 17}, + {"NullInt16.Valid", Field, 17}, + {"NullInt32", Type, 13}, + {"NullInt32.Int32", Field, 13}, + {"NullInt32.Valid", Field, 13}, + {"NullInt64", Type, 0}, + {"NullInt64.Int64", Field, 0}, + {"NullInt64.Valid", Field, 0}, + {"NullString", Type, 0}, + {"NullString.String", Field, 0}, + {"NullString.Valid", Field, 0}, + {"NullTime", Type, 13}, + {"NullTime.Time", Field, 13}, + {"NullTime.Valid", Field, 13}, + {"Open", Func, 0}, + {"OpenDB", Func, 10}, + {"Out", Type, 9}, + {"Out.Dest", Field, 9}, + {"Out.In", Field, 9}, + {"RawBytes", Type, 0}, + {"Register", Func, 0}, + {"Result", Type, 0}, + {"Row", Type, 0}, + {"Rows", Type, 0}, + {"Scanner", Type, 0}, + {"Stmt", Type, 0}, + {"Tx", Type, 0}, + {"TxOptions", Type, 8}, + {"TxOptions.Isolation", Field, 8}, + {"TxOptions.ReadOnly", Field, 8}, + }, + "database/sql/driver": { + {"(NotNull).ConvertValue", Method, 0}, + {"(Null).ConvertValue", Method, 0}, + {"(RowsAffected).LastInsertId", Method, 0}, + {"(RowsAffected).RowsAffected", Method, 0}, + {"Bool", Var, 0}, + {"ColumnConverter", Type, 0}, + {"Conn", Type, 0}, + {"ConnBeginTx", Type, 8}, + {"ConnPrepareContext", Type, 8}, + {"Connector", Type, 10}, + {"DefaultParameterConverter", Var, 0}, + {"Driver", Type, 0}, + {"DriverContext", Type, 10}, + {"ErrBadConn", Var, 0}, + {"ErrRemoveArgument", Var, 9}, + {"ErrSkip", Var, 0}, + {"Execer", Type, 0}, + {"ExecerContext", Type, 8}, + {"Int32", Var, 0}, + {"IsScanValue", Func, 0}, + {"IsValue", Func, 0}, + {"IsolationLevel", Type, 8}, + {"NamedValue", Type, 8}, + {"NamedValue.Name", Field, 8}, + {"NamedValue.Ordinal", Field, 8}, + {"NamedValue.Value", Field, 8}, + {"NamedValueChecker", Type, 9}, + {"NotNull", Type, 0}, + {"NotNull.Converter", Field, 0}, + {"Null", Type, 0}, + {"Null.Converter", Field, 0}, + {"Pinger", Type, 8}, + {"Queryer", Type, 1}, + {"QueryerContext", Type, 8}, + {"Result", Type, 0}, + {"ResultNoRows", Var, 0}, + {"Rows", Type, 0}, + {"RowsAffected", Type, 0}, + {"RowsColumnTypeDatabaseTypeName", Type, 8}, + {"RowsColumnTypeLength", Type, 8}, + {"RowsColumnTypeNullable", Type, 8}, + {"RowsColumnTypePrecisionScale", Type, 8}, + {"RowsColumnTypeScanType", Type, 8}, + {"RowsNextResultSet", Type, 8}, + {"SessionResetter", Type, 10}, + {"Stmt", Type, 0}, + {"StmtExecContext", Type, 8}, + {"StmtQueryContext", Type, 8}, + {"String", Var, 0}, + {"Tx", Type, 0}, + {"TxOptions", Type, 8}, + {"TxOptions.Isolation", Field, 8}, + {"TxOptions.ReadOnly", Field, 8}, + {"Validator", Type, 15}, + {"Value", Type, 0}, + {"ValueConverter", Type, 0}, + {"Valuer", Type, 0}, + }, + "debug/buildinfo": { + {"BuildInfo", Type, 18}, + {"Read", Func, 18}, + {"ReadFile", Func, 18}, + }, + "debug/dwarf": { + {"(*AddrType).Basic", Method, 0}, + {"(*AddrType).Common", Method, 0}, + {"(*AddrType).Size", Method, 0}, + {"(*AddrType).String", Method, 0}, + {"(*ArrayType).Common", Method, 0}, + {"(*ArrayType).Size", Method, 0}, + {"(*ArrayType).String", Method, 0}, + {"(*BasicType).Basic", Method, 0}, + {"(*BasicType).Common", Method, 0}, + {"(*BasicType).Size", Method, 0}, + {"(*BasicType).String", Method, 0}, + {"(*BoolType).Basic", Method, 0}, + {"(*BoolType).Common", Method, 0}, + {"(*BoolType).Size", Method, 0}, + {"(*BoolType).String", Method, 0}, + {"(*CharType).Basic", Method, 0}, + {"(*CharType).Common", Method, 0}, + {"(*CharType).Size", Method, 0}, + {"(*CharType).String", Method, 0}, + {"(*CommonType).Common", Method, 0}, + {"(*CommonType).Size", Method, 0}, + {"(*ComplexType).Basic", Method, 0}, + {"(*ComplexType).Common", Method, 0}, + {"(*ComplexType).Size", Method, 0}, + {"(*ComplexType).String", Method, 0}, + {"(*Data).AddSection", Method, 14}, + {"(*Data).AddTypes", Method, 3}, + {"(*Data).LineReader", Method, 5}, + {"(*Data).Ranges", Method, 7}, + {"(*Data).Reader", Method, 0}, + {"(*Data).Type", Method, 0}, + {"(*DotDotDotType).Common", Method, 0}, + {"(*DotDotDotType).Size", Method, 0}, + {"(*DotDotDotType).String", Method, 0}, + {"(*Entry).AttrField", Method, 5}, + {"(*Entry).Val", Method, 0}, + {"(*EnumType).Common", Method, 0}, + {"(*EnumType).Size", Method, 0}, + {"(*EnumType).String", Method, 0}, + {"(*FloatType).Basic", Method, 0}, + {"(*FloatType).Common", Method, 0}, + {"(*FloatType).Size", Method, 0}, + {"(*FloatType).String", Method, 0}, + {"(*FuncType).Common", Method, 0}, + {"(*FuncType).Size", Method, 0}, + {"(*FuncType).String", Method, 0}, + {"(*IntType).Basic", Method, 0}, + {"(*IntType).Common", Method, 0}, + {"(*IntType).Size", Method, 0}, + {"(*IntType).String", Method, 0}, + {"(*LineReader).Files", Method, 14}, + {"(*LineReader).Next", Method, 5}, + {"(*LineReader).Reset", Method, 5}, + {"(*LineReader).Seek", Method, 5}, + {"(*LineReader).SeekPC", Method, 5}, + {"(*LineReader).Tell", Method, 5}, + {"(*PtrType).Common", Method, 0}, + {"(*PtrType).Size", Method, 0}, + {"(*PtrType).String", Method, 0}, + {"(*QualType).Common", Method, 0}, + {"(*QualType).Size", Method, 0}, + {"(*QualType).String", Method, 0}, + {"(*Reader).AddressSize", Method, 5}, + {"(*Reader).ByteOrder", Method, 14}, + {"(*Reader).Next", Method, 0}, + {"(*Reader).Seek", Method, 0}, + {"(*Reader).SeekPC", Method, 7}, + {"(*Reader).SkipChildren", Method, 0}, + {"(*StructType).Common", Method, 0}, + {"(*StructType).Defn", Method, 0}, + {"(*StructType).Size", Method, 0}, + {"(*StructType).String", Method, 0}, + {"(*TypedefType).Common", Method, 0}, + {"(*TypedefType).Size", Method, 0}, + {"(*TypedefType).String", Method, 0}, + {"(*UcharType).Basic", Method, 0}, + {"(*UcharType).Common", Method, 0}, + {"(*UcharType).Size", Method, 0}, + {"(*UcharType).String", Method, 0}, + {"(*UintType).Basic", Method, 0}, + {"(*UintType).Common", Method, 0}, + {"(*UintType).Size", Method, 0}, + {"(*UintType).String", Method, 0}, + {"(*UnspecifiedType).Basic", Method, 4}, + {"(*UnspecifiedType).Common", Method, 4}, + {"(*UnspecifiedType).Size", Method, 4}, + {"(*UnspecifiedType).String", Method, 4}, + {"(*UnsupportedType).Common", Method, 13}, + {"(*UnsupportedType).Size", Method, 13}, + {"(*UnsupportedType).String", Method, 13}, + {"(*VoidType).Common", Method, 0}, + {"(*VoidType).Size", Method, 0}, + {"(*VoidType).String", Method, 0}, + {"(Attr).GoString", Method, 0}, + {"(Attr).String", Method, 0}, + {"(Class).GoString", Method, 5}, + {"(Class).String", Method, 5}, + {"(DecodeError).Error", Method, 0}, + {"(Tag).GoString", Method, 0}, + {"(Tag).String", Method, 0}, + {"AddrType", Type, 0}, + {"AddrType.BasicType", Field, 0}, + {"ArrayType", Type, 0}, + {"ArrayType.CommonType", Field, 0}, + {"ArrayType.Count", Field, 0}, + {"ArrayType.StrideBitSize", Field, 0}, + {"ArrayType.Type", Field, 0}, + {"Attr", Type, 0}, + {"AttrAbstractOrigin", Const, 0}, + {"AttrAccessibility", Const, 0}, + {"AttrAddrBase", Const, 14}, + {"AttrAddrClass", Const, 0}, + {"AttrAlignment", Const, 14}, + {"AttrAllocated", Const, 0}, + {"AttrArtificial", Const, 0}, + {"AttrAssociated", Const, 0}, + {"AttrBaseTypes", Const, 0}, + {"AttrBinaryScale", Const, 14}, + {"AttrBitOffset", Const, 0}, + {"AttrBitSize", Const, 0}, + {"AttrByteSize", Const, 0}, + {"AttrCallAllCalls", Const, 14}, + {"AttrCallAllSourceCalls", Const, 14}, + {"AttrCallAllTailCalls", Const, 14}, + {"AttrCallColumn", Const, 0}, + {"AttrCallDataLocation", Const, 14}, + {"AttrCallDataValue", Const, 14}, + {"AttrCallFile", Const, 0}, + {"AttrCallLine", Const, 0}, + {"AttrCallOrigin", Const, 14}, + {"AttrCallPC", Const, 14}, + {"AttrCallParameter", Const, 14}, + {"AttrCallReturnPC", Const, 14}, + {"AttrCallTailCall", Const, 14}, + {"AttrCallTarget", Const, 14}, + {"AttrCallTargetClobbered", Const, 14}, + {"AttrCallValue", Const, 14}, + {"AttrCalling", Const, 0}, + {"AttrCommonRef", Const, 0}, + {"AttrCompDir", Const, 0}, + {"AttrConstExpr", Const, 14}, + {"AttrConstValue", Const, 0}, + {"AttrContainingType", Const, 0}, + {"AttrCount", Const, 0}, + {"AttrDataBitOffset", Const, 14}, + {"AttrDataLocation", Const, 0}, + {"AttrDataMemberLoc", Const, 0}, + {"AttrDecimalScale", Const, 14}, + {"AttrDecimalSign", Const, 14}, + {"AttrDeclColumn", Const, 0}, + {"AttrDeclFile", Const, 0}, + {"AttrDeclLine", Const, 0}, + {"AttrDeclaration", Const, 0}, + {"AttrDefaultValue", Const, 0}, + {"AttrDefaulted", Const, 14}, + {"AttrDeleted", Const, 14}, + {"AttrDescription", Const, 0}, + {"AttrDigitCount", Const, 14}, + {"AttrDiscr", Const, 0}, + {"AttrDiscrList", Const, 0}, + {"AttrDiscrValue", Const, 0}, + {"AttrDwoName", Const, 14}, + {"AttrElemental", Const, 14}, + {"AttrEncoding", Const, 0}, + {"AttrEndianity", Const, 14}, + {"AttrEntrypc", Const, 0}, + {"AttrEnumClass", Const, 14}, + {"AttrExplicit", Const, 14}, + {"AttrExportSymbols", Const, 14}, + {"AttrExtension", Const, 0}, + {"AttrExternal", Const, 0}, + {"AttrFrameBase", Const, 0}, + {"AttrFriend", Const, 0}, + {"AttrHighpc", Const, 0}, + {"AttrIdentifierCase", Const, 0}, + {"AttrImport", Const, 0}, + {"AttrInline", Const, 0}, + {"AttrIsOptional", Const, 0}, + {"AttrLanguage", Const, 0}, + {"AttrLinkageName", Const, 14}, + {"AttrLocation", Const, 0}, + {"AttrLoclistsBase", Const, 14}, + {"AttrLowerBound", Const, 0}, + {"AttrLowpc", Const, 0}, + {"AttrMacroInfo", Const, 0}, + {"AttrMacros", Const, 14}, + {"AttrMainSubprogram", Const, 14}, + {"AttrMutable", Const, 14}, + {"AttrName", Const, 0}, + {"AttrNamelistItem", Const, 0}, + {"AttrNoreturn", Const, 14}, + {"AttrObjectPointer", Const, 14}, + {"AttrOrdering", Const, 0}, + {"AttrPictureString", Const, 14}, + {"AttrPriority", Const, 0}, + {"AttrProducer", Const, 0}, + {"AttrPrototyped", Const, 0}, + {"AttrPure", Const, 14}, + {"AttrRanges", Const, 0}, + {"AttrRank", Const, 14}, + {"AttrRecursive", Const, 14}, + {"AttrReference", Const, 14}, + {"AttrReturnAddr", Const, 0}, + {"AttrRnglistsBase", Const, 14}, + {"AttrRvalueReference", Const, 14}, + {"AttrSegment", Const, 0}, + {"AttrSibling", Const, 0}, + {"AttrSignature", Const, 14}, + {"AttrSmall", Const, 14}, + {"AttrSpecification", Const, 0}, + {"AttrStartScope", Const, 0}, + {"AttrStaticLink", Const, 0}, + {"AttrStmtList", Const, 0}, + {"AttrStrOffsetsBase", Const, 14}, + {"AttrStride", Const, 0}, + {"AttrStrideSize", Const, 0}, + {"AttrStringLength", Const, 0}, + {"AttrStringLengthBitSize", Const, 14}, + {"AttrStringLengthByteSize", Const, 14}, + {"AttrThreadsScaled", Const, 14}, + {"AttrTrampoline", Const, 0}, + {"AttrType", Const, 0}, + {"AttrUpperBound", Const, 0}, + {"AttrUseLocation", Const, 0}, + {"AttrUseUTF8", Const, 0}, + {"AttrVarParam", Const, 0}, + {"AttrVirtuality", Const, 0}, + {"AttrVisibility", Const, 0}, + {"AttrVtableElemLoc", Const, 0}, + {"BasicType", Type, 0}, + {"BasicType.BitOffset", Field, 0}, + {"BasicType.BitSize", Field, 0}, + {"BasicType.CommonType", Field, 0}, + {"BasicType.DataBitOffset", Field, 18}, + {"BoolType", Type, 0}, + {"BoolType.BasicType", Field, 0}, + {"CharType", Type, 0}, + {"CharType.BasicType", Field, 0}, + {"Class", Type, 5}, + {"ClassAddrPtr", Const, 14}, + {"ClassAddress", Const, 5}, + {"ClassBlock", Const, 5}, + {"ClassConstant", Const, 5}, + {"ClassExprLoc", Const, 5}, + {"ClassFlag", Const, 5}, + {"ClassLinePtr", Const, 5}, + {"ClassLocList", Const, 14}, + {"ClassLocListPtr", Const, 5}, + {"ClassMacPtr", Const, 5}, + {"ClassRangeListPtr", Const, 5}, + {"ClassReference", Const, 5}, + {"ClassReferenceAlt", Const, 5}, + {"ClassReferenceSig", Const, 5}, + {"ClassRngList", Const, 14}, + {"ClassRngListsPtr", Const, 14}, + {"ClassStrOffsetsPtr", Const, 14}, + {"ClassString", Const, 5}, + {"ClassStringAlt", Const, 5}, + {"ClassUnknown", Const, 6}, + {"CommonType", Type, 0}, + {"CommonType.ByteSize", Field, 0}, + {"CommonType.Name", Field, 0}, + {"ComplexType", Type, 0}, + {"ComplexType.BasicType", Field, 0}, + {"Data", Type, 0}, + {"DecodeError", Type, 0}, + {"DecodeError.Err", Field, 0}, + {"DecodeError.Name", Field, 0}, + {"DecodeError.Offset", Field, 0}, + {"DotDotDotType", Type, 0}, + {"DotDotDotType.CommonType", Field, 0}, + {"Entry", Type, 0}, + {"Entry.Children", Field, 0}, + {"Entry.Field", Field, 0}, + {"Entry.Offset", Field, 0}, + {"Entry.Tag", Field, 0}, + {"EnumType", Type, 0}, + {"EnumType.CommonType", Field, 0}, + {"EnumType.EnumName", Field, 0}, + {"EnumType.Val", Field, 0}, + {"EnumValue", Type, 0}, + {"EnumValue.Name", Field, 0}, + {"EnumValue.Val", Field, 0}, + {"ErrUnknownPC", Var, 5}, + {"Field", Type, 0}, + {"Field.Attr", Field, 0}, + {"Field.Class", Field, 5}, + {"Field.Val", Field, 0}, + {"FloatType", Type, 0}, + {"FloatType.BasicType", Field, 0}, + {"FuncType", Type, 0}, + {"FuncType.CommonType", Field, 0}, + {"FuncType.ParamType", Field, 0}, + {"FuncType.ReturnType", Field, 0}, + {"IntType", Type, 0}, + {"IntType.BasicType", Field, 0}, + {"LineEntry", Type, 5}, + {"LineEntry.Address", Field, 5}, + {"LineEntry.BasicBlock", Field, 5}, + {"LineEntry.Column", Field, 5}, + {"LineEntry.Discriminator", Field, 5}, + {"LineEntry.EndSequence", Field, 5}, + {"LineEntry.EpilogueBegin", Field, 5}, + {"LineEntry.File", Field, 5}, + {"LineEntry.ISA", Field, 5}, + {"LineEntry.IsStmt", Field, 5}, + {"LineEntry.Line", Field, 5}, + {"LineEntry.OpIndex", Field, 5}, + {"LineEntry.PrologueEnd", Field, 5}, + {"LineFile", Type, 5}, + {"LineFile.Length", Field, 5}, + {"LineFile.Mtime", Field, 5}, + {"LineFile.Name", Field, 5}, + {"LineReader", Type, 5}, + {"LineReaderPos", Type, 5}, + {"New", Func, 0}, + {"Offset", Type, 0}, + {"PtrType", Type, 0}, + {"PtrType.CommonType", Field, 0}, + {"PtrType.Type", Field, 0}, + {"QualType", Type, 0}, + {"QualType.CommonType", Field, 0}, + {"QualType.Qual", Field, 0}, + {"QualType.Type", Field, 0}, + {"Reader", Type, 0}, + {"StructField", Type, 0}, + {"StructField.BitOffset", Field, 0}, + {"StructField.BitSize", Field, 0}, + {"StructField.ByteOffset", Field, 0}, + {"StructField.ByteSize", Field, 0}, + {"StructField.DataBitOffset", Field, 18}, + {"StructField.Name", Field, 0}, + {"StructField.Type", Field, 0}, + {"StructType", Type, 0}, + {"StructType.CommonType", Field, 0}, + {"StructType.Field", Field, 0}, + {"StructType.Incomplete", Field, 0}, + {"StructType.Kind", Field, 0}, + {"StructType.StructName", Field, 0}, + {"Tag", Type, 0}, + {"TagAccessDeclaration", Const, 0}, + {"TagArrayType", Const, 0}, + {"TagAtomicType", Const, 14}, + {"TagBaseType", Const, 0}, + {"TagCallSite", Const, 14}, + {"TagCallSiteParameter", Const, 14}, + {"TagCatchDwarfBlock", Const, 0}, + {"TagClassType", Const, 0}, + {"TagCoarrayType", Const, 14}, + {"TagCommonDwarfBlock", Const, 0}, + {"TagCommonInclusion", Const, 0}, + {"TagCompileUnit", Const, 0}, + {"TagCondition", Const, 3}, + {"TagConstType", Const, 0}, + {"TagConstant", Const, 0}, + {"TagDwarfProcedure", Const, 0}, + {"TagDynamicType", Const, 14}, + {"TagEntryPoint", Const, 0}, + {"TagEnumerationType", Const, 0}, + {"TagEnumerator", Const, 0}, + {"TagFileType", Const, 0}, + {"TagFormalParameter", Const, 0}, + {"TagFriend", Const, 0}, + {"TagGenericSubrange", Const, 14}, + {"TagImmutableType", Const, 14}, + {"TagImportedDeclaration", Const, 0}, + {"TagImportedModule", Const, 0}, + {"TagImportedUnit", Const, 0}, + {"TagInheritance", Const, 0}, + {"TagInlinedSubroutine", Const, 0}, + {"TagInterfaceType", Const, 0}, + {"TagLabel", Const, 0}, + {"TagLexDwarfBlock", Const, 0}, + {"TagMember", Const, 0}, + {"TagModule", Const, 0}, + {"TagMutableType", Const, 0}, + {"TagNamelist", Const, 0}, + {"TagNamelistItem", Const, 0}, + {"TagNamespace", Const, 0}, + {"TagPackedType", Const, 0}, + {"TagPartialUnit", Const, 0}, + {"TagPointerType", Const, 0}, + {"TagPtrToMemberType", Const, 0}, + {"TagReferenceType", Const, 0}, + {"TagRestrictType", Const, 0}, + {"TagRvalueReferenceType", Const, 3}, + {"TagSetType", Const, 0}, + {"TagSharedType", Const, 3}, + {"TagSkeletonUnit", Const, 14}, + {"TagStringType", Const, 0}, + {"TagStructType", Const, 0}, + {"TagSubprogram", Const, 0}, + {"TagSubrangeType", Const, 0}, + {"TagSubroutineType", Const, 0}, + {"TagTemplateAlias", Const, 3}, + {"TagTemplateTypeParameter", Const, 0}, + {"TagTemplateValueParameter", Const, 0}, + {"TagThrownType", Const, 0}, + {"TagTryDwarfBlock", Const, 0}, + {"TagTypeUnit", Const, 3}, + {"TagTypedef", Const, 0}, + {"TagUnionType", Const, 0}, + {"TagUnspecifiedParameters", Const, 0}, + {"TagUnspecifiedType", Const, 0}, + {"TagVariable", Const, 0}, + {"TagVariant", Const, 0}, + {"TagVariantPart", Const, 0}, + {"TagVolatileType", Const, 0}, + {"TagWithStmt", Const, 0}, + {"Type", Type, 0}, + {"TypedefType", Type, 0}, + {"TypedefType.CommonType", Field, 0}, + {"TypedefType.Type", Field, 0}, + {"UcharType", Type, 0}, + {"UcharType.BasicType", Field, 0}, + {"UintType", Type, 0}, + {"UintType.BasicType", Field, 0}, + {"UnspecifiedType", Type, 4}, + {"UnspecifiedType.BasicType", Field, 4}, + {"UnsupportedType", Type, 13}, + {"UnsupportedType.CommonType", Field, 13}, + {"UnsupportedType.Tag", Field, 13}, + {"VoidType", Type, 0}, + {"VoidType.CommonType", Field, 0}, + }, + "debug/elf": { + {"(*File).Close", Method, 0}, + {"(*File).DWARF", Method, 0}, + {"(*File).DynString", Method, 1}, + {"(*File).DynValue", Method, 21}, + {"(*File).DynamicSymbols", Method, 4}, + {"(*File).ImportedLibraries", Method, 0}, + {"(*File).ImportedSymbols", Method, 0}, + {"(*File).Section", Method, 0}, + {"(*File).SectionByType", Method, 0}, + {"(*File).Symbols", Method, 0}, + {"(*FormatError).Error", Method, 0}, + {"(*Prog).Open", Method, 0}, + {"(*Section).Data", Method, 0}, + {"(*Section).Open", Method, 0}, + {"(Class).GoString", Method, 0}, + {"(Class).String", Method, 0}, + {"(CompressionType).GoString", Method, 6}, + {"(CompressionType).String", Method, 6}, + {"(Data).GoString", Method, 0}, + {"(Data).String", Method, 0}, + {"(DynFlag).GoString", Method, 0}, + {"(DynFlag).String", Method, 0}, + {"(DynFlag1).GoString", Method, 21}, + {"(DynFlag1).String", Method, 21}, + {"(DynTag).GoString", Method, 0}, + {"(DynTag).String", Method, 0}, + {"(Machine).GoString", Method, 0}, + {"(Machine).String", Method, 0}, + {"(NType).GoString", Method, 0}, + {"(NType).String", Method, 0}, + {"(OSABI).GoString", Method, 0}, + {"(OSABI).String", Method, 0}, + {"(Prog).ReadAt", Method, 0}, + {"(ProgFlag).GoString", Method, 0}, + {"(ProgFlag).String", Method, 0}, + {"(ProgType).GoString", Method, 0}, + {"(ProgType).String", Method, 0}, + {"(R_386).GoString", Method, 0}, + {"(R_386).String", Method, 0}, + {"(R_390).GoString", Method, 7}, + {"(R_390).String", Method, 7}, + {"(R_AARCH64).GoString", Method, 4}, + {"(R_AARCH64).String", Method, 4}, + {"(R_ALPHA).GoString", Method, 0}, + {"(R_ALPHA).String", Method, 0}, + {"(R_ARM).GoString", Method, 0}, + {"(R_ARM).String", Method, 0}, + {"(R_LARCH).GoString", Method, 19}, + {"(R_LARCH).String", Method, 19}, + {"(R_MIPS).GoString", Method, 6}, + {"(R_MIPS).String", Method, 6}, + {"(R_PPC).GoString", Method, 0}, + {"(R_PPC).String", Method, 0}, + {"(R_PPC64).GoString", Method, 5}, + {"(R_PPC64).String", Method, 5}, + {"(R_RISCV).GoString", Method, 11}, + {"(R_RISCV).String", Method, 11}, + {"(R_SPARC).GoString", Method, 0}, + {"(R_SPARC).String", Method, 0}, + {"(R_X86_64).GoString", Method, 0}, + {"(R_X86_64).String", Method, 0}, + {"(Section).ReadAt", Method, 0}, + {"(SectionFlag).GoString", Method, 0}, + {"(SectionFlag).String", Method, 0}, + {"(SectionIndex).GoString", Method, 0}, + {"(SectionIndex).String", Method, 0}, + {"(SectionType).GoString", Method, 0}, + {"(SectionType).String", Method, 0}, + {"(SymBind).GoString", Method, 0}, + {"(SymBind).String", Method, 0}, + {"(SymType).GoString", Method, 0}, + {"(SymType).String", Method, 0}, + {"(SymVis).GoString", Method, 0}, + {"(SymVis).String", Method, 0}, + {"(Type).GoString", Method, 0}, + {"(Type).String", Method, 0}, + {"(Version).GoString", Method, 0}, + {"(Version).String", Method, 0}, + {"ARM_MAGIC_TRAMP_NUMBER", Const, 0}, + {"COMPRESS_HIOS", Const, 6}, + {"COMPRESS_HIPROC", Const, 6}, + {"COMPRESS_LOOS", Const, 6}, + {"COMPRESS_LOPROC", Const, 6}, + {"COMPRESS_ZLIB", Const, 6}, + {"COMPRESS_ZSTD", Const, 21}, + {"Chdr32", Type, 6}, + {"Chdr32.Addralign", Field, 6}, + {"Chdr32.Size", Field, 6}, + {"Chdr32.Type", Field, 6}, + {"Chdr64", Type, 6}, + {"Chdr64.Addralign", Field, 6}, + {"Chdr64.Size", Field, 6}, + {"Chdr64.Type", Field, 6}, + {"Class", Type, 0}, + {"CompressionType", Type, 6}, + {"DF_1_CONFALT", Const, 21}, + {"DF_1_DIRECT", Const, 21}, + {"DF_1_DISPRELDNE", Const, 21}, + {"DF_1_DISPRELPND", Const, 21}, + {"DF_1_EDITED", Const, 21}, + {"DF_1_ENDFILTEE", Const, 21}, + {"DF_1_GLOBAL", Const, 21}, + {"DF_1_GLOBAUDIT", Const, 21}, + {"DF_1_GROUP", Const, 21}, + {"DF_1_IGNMULDEF", Const, 21}, + {"DF_1_INITFIRST", Const, 21}, + {"DF_1_INTERPOSE", Const, 21}, + {"DF_1_KMOD", Const, 21}, + {"DF_1_LOADFLTR", Const, 21}, + {"DF_1_NOCOMMON", Const, 21}, + {"DF_1_NODEFLIB", Const, 21}, + {"DF_1_NODELETE", Const, 21}, + {"DF_1_NODIRECT", Const, 21}, + {"DF_1_NODUMP", Const, 21}, + {"DF_1_NOHDR", Const, 21}, + {"DF_1_NOKSYMS", Const, 21}, + {"DF_1_NOOPEN", Const, 21}, + {"DF_1_NORELOC", Const, 21}, + {"DF_1_NOW", Const, 21}, + {"DF_1_ORIGIN", Const, 21}, + {"DF_1_PIE", Const, 21}, + {"DF_1_SINGLETON", Const, 21}, + {"DF_1_STUB", Const, 21}, + {"DF_1_SYMINTPOSE", Const, 21}, + {"DF_1_TRANS", Const, 21}, + {"DF_1_WEAKFILTER", Const, 21}, + {"DF_BIND_NOW", Const, 0}, + {"DF_ORIGIN", Const, 0}, + {"DF_STATIC_TLS", Const, 0}, + {"DF_SYMBOLIC", Const, 0}, + {"DF_TEXTREL", Const, 0}, + {"DT_ADDRRNGHI", Const, 16}, + {"DT_ADDRRNGLO", Const, 16}, + {"DT_AUDIT", Const, 16}, + {"DT_AUXILIARY", Const, 16}, + {"DT_BIND_NOW", Const, 0}, + {"DT_CHECKSUM", Const, 16}, + {"DT_CONFIG", Const, 16}, + {"DT_DEBUG", Const, 0}, + {"DT_DEPAUDIT", Const, 16}, + {"DT_ENCODING", Const, 0}, + {"DT_FEATURE", Const, 16}, + {"DT_FILTER", Const, 16}, + {"DT_FINI", Const, 0}, + {"DT_FINI_ARRAY", Const, 0}, + {"DT_FINI_ARRAYSZ", Const, 0}, + {"DT_FLAGS", Const, 0}, + {"DT_FLAGS_1", Const, 16}, + {"DT_GNU_CONFLICT", Const, 16}, + {"DT_GNU_CONFLICTSZ", Const, 16}, + {"DT_GNU_HASH", Const, 16}, + {"DT_GNU_LIBLIST", Const, 16}, + {"DT_GNU_LIBLISTSZ", Const, 16}, + {"DT_GNU_PRELINKED", Const, 16}, + {"DT_HASH", Const, 0}, + {"DT_HIOS", Const, 0}, + {"DT_HIPROC", Const, 0}, + {"DT_INIT", Const, 0}, + {"DT_INIT_ARRAY", Const, 0}, + {"DT_INIT_ARRAYSZ", Const, 0}, + {"DT_JMPREL", Const, 0}, + {"DT_LOOS", Const, 0}, + {"DT_LOPROC", Const, 0}, + {"DT_MIPS_AUX_DYNAMIC", Const, 16}, + {"DT_MIPS_BASE_ADDRESS", Const, 16}, + {"DT_MIPS_COMPACT_SIZE", Const, 16}, + {"DT_MIPS_CONFLICT", Const, 16}, + {"DT_MIPS_CONFLICTNO", Const, 16}, + {"DT_MIPS_CXX_FLAGS", Const, 16}, + {"DT_MIPS_DELTA_CLASS", Const, 16}, + {"DT_MIPS_DELTA_CLASSSYM", Const, 16}, + {"DT_MIPS_DELTA_CLASSSYM_NO", Const, 16}, + {"DT_MIPS_DELTA_CLASS_NO", Const, 16}, + {"DT_MIPS_DELTA_INSTANCE", Const, 16}, + {"DT_MIPS_DELTA_INSTANCE_NO", Const, 16}, + {"DT_MIPS_DELTA_RELOC", Const, 16}, + {"DT_MIPS_DELTA_RELOC_NO", Const, 16}, + {"DT_MIPS_DELTA_SYM", Const, 16}, + {"DT_MIPS_DELTA_SYM_NO", Const, 16}, + {"DT_MIPS_DYNSTR_ALIGN", Const, 16}, + {"DT_MIPS_FLAGS", Const, 16}, + {"DT_MIPS_GOTSYM", Const, 16}, + {"DT_MIPS_GP_VALUE", Const, 16}, + {"DT_MIPS_HIDDEN_GOTIDX", Const, 16}, + {"DT_MIPS_HIPAGENO", Const, 16}, + {"DT_MIPS_ICHECKSUM", Const, 16}, + {"DT_MIPS_INTERFACE", Const, 16}, + {"DT_MIPS_INTERFACE_SIZE", Const, 16}, + {"DT_MIPS_IVERSION", Const, 16}, + {"DT_MIPS_LIBLIST", Const, 16}, + {"DT_MIPS_LIBLISTNO", Const, 16}, + {"DT_MIPS_LOCALPAGE_GOTIDX", Const, 16}, + {"DT_MIPS_LOCAL_GOTIDX", Const, 16}, + {"DT_MIPS_LOCAL_GOTNO", Const, 16}, + {"DT_MIPS_MSYM", Const, 16}, + {"DT_MIPS_OPTIONS", Const, 16}, + {"DT_MIPS_PERF_SUFFIX", Const, 16}, + {"DT_MIPS_PIXIE_INIT", Const, 16}, + {"DT_MIPS_PLTGOT", Const, 16}, + {"DT_MIPS_PROTECTED_GOTIDX", Const, 16}, + {"DT_MIPS_RLD_MAP", Const, 16}, + {"DT_MIPS_RLD_MAP_REL", Const, 16}, + {"DT_MIPS_RLD_TEXT_RESOLVE_ADDR", Const, 16}, + {"DT_MIPS_RLD_VERSION", Const, 16}, + {"DT_MIPS_RWPLT", Const, 16}, + {"DT_MIPS_SYMBOL_LIB", Const, 16}, + {"DT_MIPS_SYMTABNO", Const, 16}, + {"DT_MIPS_TIME_STAMP", Const, 16}, + {"DT_MIPS_UNREFEXTNO", Const, 16}, + {"DT_MOVEENT", Const, 16}, + {"DT_MOVESZ", Const, 16}, + {"DT_MOVETAB", Const, 16}, + {"DT_NEEDED", Const, 0}, + {"DT_NULL", Const, 0}, + {"DT_PLTGOT", Const, 0}, + {"DT_PLTPAD", Const, 16}, + {"DT_PLTPADSZ", Const, 16}, + {"DT_PLTREL", Const, 0}, + {"DT_PLTRELSZ", Const, 0}, + {"DT_POSFLAG_1", Const, 16}, + {"DT_PPC64_GLINK", Const, 16}, + {"DT_PPC64_OPD", Const, 16}, + {"DT_PPC64_OPDSZ", Const, 16}, + {"DT_PPC64_OPT", Const, 16}, + {"DT_PPC_GOT", Const, 16}, + {"DT_PPC_OPT", Const, 16}, + {"DT_PREINIT_ARRAY", Const, 0}, + {"DT_PREINIT_ARRAYSZ", Const, 0}, + {"DT_REL", Const, 0}, + {"DT_RELA", Const, 0}, + {"DT_RELACOUNT", Const, 16}, + {"DT_RELAENT", Const, 0}, + {"DT_RELASZ", Const, 0}, + {"DT_RELCOUNT", Const, 16}, + {"DT_RELENT", Const, 0}, + {"DT_RELSZ", Const, 0}, + {"DT_RPATH", Const, 0}, + {"DT_RUNPATH", Const, 0}, + {"DT_SONAME", Const, 0}, + {"DT_SPARC_REGISTER", Const, 16}, + {"DT_STRSZ", Const, 0}, + {"DT_STRTAB", Const, 0}, + {"DT_SYMBOLIC", Const, 0}, + {"DT_SYMENT", Const, 0}, + {"DT_SYMINENT", Const, 16}, + {"DT_SYMINFO", Const, 16}, + {"DT_SYMINSZ", Const, 16}, + {"DT_SYMTAB", Const, 0}, + {"DT_SYMTAB_SHNDX", Const, 16}, + {"DT_TEXTREL", Const, 0}, + {"DT_TLSDESC_GOT", Const, 16}, + {"DT_TLSDESC_PLT", Const, 16}, + {"DT_USED", Const, 16}, + {"DT_VALRNGHI", Const, 16}, + {"DT_VALRNGLO", Const, 16}, + {"DT_VERDEF", Const, 16}, + {"DT_VERDEFNUM", Const, 16}, + {"DT_VERNEED", Const, 0}, + {"DT_VERNEEDNUM", Const, 0}, + {"DT_VERSYM", Const, 0}, + {"Data", Type, 0}, + {"Dyn32", Type, 0}, + {"Dyn32.Tag", Field, 0}, + {"Dyn32.Val", Field, 0}, + {"Dyn64", Type, 0}, + {"Dyn64.Tag", Field, 0}, + {"Dyn64.Val", Field, 0}, + {"DynFlag", Type, 0}, + {"DynFlag1", Type, 21}, + {"DynTag", Type, 0}, + {"EI_ABIVERSION", Const, 0}, + {"EI_CLASS", Const, 0}, + {"EI_DATA", Const, 0}, + {"EI_NIDENT", Const, 0}, + {"EI_OSABI", Const, 0}, + {"EI_PAD", Const, 0}, + {"EI_VERSION", Const, 0}, + {"ELFCLASS32", Const, 0}, + {"ELFCLASS64", Const, 0}, + {"ELFCLASSNONE", Const, 0}, + {"ELFDATA2LSB", Const, 0}, + {"ELFDATA2MSB", Const, 0}, + {"ELFDATANONE", Const, 0}, + {"ELFMAG", Const, 0}, + {"ELFOSABI_86OPEN", Const, 0}, + {"ELFOSABI_AIX", Const, 0}, + {"ELFOSABI_ARM", Const, 0}, + {"ELFOSABI_AROS", Const, 11}, + {"ELFOSABI_CLOUDABI", Const, 11}, + {"ELFOSABI_FENIXOS", Const, 11}, + {"ELFOSABI_FREEBSD", Const, 0}, + {"ELFOSABI_HPUX", Const, 0}, + {"ELFOSABI_HURD", Const, 0}, + {"ELFOSABI_IRIX", Const, 0}, + {"ELFOSABI_LINUX", Const, 0}, + {"ELFOSABI_MODESTO", Const, 0}, + {"ELFOSABI_NETBSD", Const, 0}, + {"ELFOSABI_NONE", Const, 0}, + {"ELFOSABI_NSK", Const, 0}, + {"ELFOSABI_OPENBSD", Const, 0}, + {"ELFOSABI_OPENVMS", Const, 0}, + {"ELFOSABI_SOLARIS", Const, 0}, + {"ELFOSABI_STANDALONE", Const, 0}, + {"ELFOSABI_TRU64", Const, 0}, + {"EM_386", Const, 0}, + {"EM_486", Const, 0}, + {"EM_56800EX", Const, 11}, + {"EM_68HC05", Const, 11}, + {"EM_68HC08", Const, 11}, + {"EM_68HC11", Const, 11}, + {"EM_68HC12", Const, 0}, + {"EM_68HC16", Const, 11}, + {"EM_68K", Const, 0}, + {"EM_78KOR", Const, 11}, + {"EM_8051", Const, 11}, + {"EM_860", Const, 0}, + {"EM_88K", Const, 0}, + {"EM_960", Const, 0}, + {"EM_AARCH64", Const, 4}, + {"EM_ALPHA", Const, 0}, + {"EM_ALPHA_STD", Const, 0}, + {"EM_ALTERA_NIOS2", Const, 11}, + {"EM_AMDGPU", Const, 11}, + {"EM_ARC", Const, 0}, + {"EM_ARCA", Const, 11}, + {"EM_ARC_COMPACT", Const, 11}, + {"EM_ARC_COMPACT2", Const, 11}, + {"EM_ARM", Const, 0}, + {"EM_AVR", Const, 11}, + {"EM_AVR32", Const, 11}, + {"EM_BA1", Const, 11}, + {"EM_BA2", Const, 11}, + {"EM_BLACKFIN", Const, 11}, + {"EM_BPF", Const, 11}, + {"EM_C166", Const, 11}, + {"EM_CDP", Const, 11}, + {"EM_CE", Const, 11}, + {"EM_CLOUDSHIELD", Const, 11}, + {"EM_COGE", Const, 11}, + {"EM_COLDFIRE", Const, 0}, + {"EM_COOL", Const, 11}, + {"EM_COREA_1ST", Const, 11}, + {"EM_COREA_2ND", Const, 11}, + {"EM_CR", Const, 11}, + {"EM_CR16", Const, 11}, + {"EM_CRAYNV2", Const, 11}, + {"EM_CRIS", Const, 11}, + {"EM_CRX", Const, 11}, + {"EM_CSR_KALIMBA", Const, 11}, + {"EM_CUDA", Const, 11}, + {"EM_CYPRESS_M8C", Const, 11}, + {"EM_D10V", Const, 11}, + {"EM_D30V", Const, 11}, + {"EM_DSP24", Const, 11}, + {"EM_DSPIC30F", Const, 11}, + {"EM_DXP", Const, 11}, + {"EM_ECOG1", Const, 11}, + {"EM_ECOG16", Const, 11}, + {"EM_ECOG1X", Const, 11}, + {"EM_ECOG2", Const, 11}, + {"EM_ETPU", Const, 11}, + {"EM_EXCESS", Const, 11}, + {"EM_F2MC16", Const, 11}, + {"EM_FIREPATH", Const, 11}, + {"EM_FR20", Const, 0}, + {"EM_FR30", Const, 11}, + {"EM_FT32", Const, 11}, + {"EM_FX66", Const, 11}, + {"EM_H8S", Const, 0}, + {"EM_H8_300", Const, 0}, + {"EM_H8_300H", Const, 0}, + {"EM_H8_500", Const, 0}, + {"EM_HUANY", Const, 11}, + {"EM_IA_64", Const, 0}, + {"EM_INTEL205", Const, 11}, + {"EM_INTEL206", Const, 11}, + {"EM_INTEL207", Const, 11}, + {"EM_INTEL208", Const, 11}, + {"EM_INTEL209", Const, 11}, + {"EM_IP2K", Const, 11}, + {"EM_JAVELIN", Const, 11}, + {"EM_K10M", Const, 11}, + {"EM_KM32", Const, 11}, + {"EM_KMX16", Const, 11}, + {"EM_KMX32", Const, 11}, + {"EM_KMX8", Const, 11}, + {"EM_KVARC", Const, 11}, + {"EM_L10M", Const, 11}, + {"EM_LANAI", Const, 11}, + {"EM_LATTICEMICO32", Const, 11}, + {"EM_LOONGARCH", Const, 19}, + {"EM_M16C", Const, 11}, + {"EM_M32", Const, 0}, + {"EM_M32C", Const, 11}, + {"EM_M32R", Const, 11}, + {"EM_MANIK", Const, 11}, + {"EM_MAX", Const, 11}, + {"EM_MAXQ30", Const, 11}, + {"EM_MCHP_PIC", Const, 11}, + {"EM_MCST_ELBRUS", Const, 11}, + {"EM_ME16", Const, 0}, + {"EM_METAG", Const, 11}, + {"EM_MICROBLAZE", Const, 11}, + {"EM_MIPS", Const, 0}, + {"EM_MIPS_RS3_LE", Const, 0}, + {"EM_MIPS_RS4_BE", Const, 0}, + {"EM_MIPS_X", Const, 0}, + {"EM_MMA", Const, 0}, + {"EM_MMDSP_PLUS", Const, 11}, + {"EM_MMIX", Const, 11}, + {"EM_MN10200", Const, 11}, + {"EM_MN10300", Const, 11}, + {"EM_MOXIE", Const, 11}, + {"EM_MSP430", Const, 11}, + {"EM_NCPU", Const, 0}, + {"EM_NDR1", Const, 0}, + {"EM_NDS32", Const, 11}, + {"EM_NONE", Const, 0}, + {"EM_NORC", Const, 11}, + {"EM_NS32K", Const, 11}, + {"EM_OPEN8", Const, 11}, + {"EM_OPENRISC", Const, 11}, + {"EM_PARISC", Const, 0}, + {"EM_PCP", Const, 0}, + {"EM_PDP10", Const, 11}, + {"EM_PDP11", Const, 11}, + {"EM_PDSP", Const, 11}, + {"EM_PJ", Const, 11}, + {"EM_PPC", Const, 0}, + {"EM_PPC64", Const, 0}, + {"EM_PRISM", Const, 11}, + {"EM_QDSP6", Const, 11}, + {"EM_R32C", Const, 11}, + {"EM_RCE", Const, 0}, + {"EM_RH32", Const, 0}, + {"EM_RISCV", Const, 11}, + {"EM_RL78", Const, 11}, + {"EM_RS08", Const, 11}, + {"EM_RX", Const, 11}, + {"EM_S370", Const, 0}, + {"EM_S390", Const, 0}, + {"EM_SCORE7", Const, 11}, + {"EM_SEP", Const, 11}, + {"EM_SE_C17", Const, 11}, + {"EM_SE_C33", Const, 11}, + {"EM_SH", Const, 0}, + {"EM_SHARC", Const, 11}, + {"EM_SLE9X", Const, 11}, + {"EM_SNP1K", Const, 11}, + {"EM_SPARC", Const, 0}, + {"EM_SPARC32PLUS", Const, 0}, + {"EM_SPARCV9", Const, 0}, + {"EM_ST100", Const, 0}, + {"EM_ST19", Const, 11}, + {"EM_ST200", Const, 11}, + {"EM_ST7", Const, 11}, + {"EM_ST9PLUS", Const, 11}, + {"EM_STARCORE", Const, 0}, + {"EM_STM8", Const, 11}, + {"EM_STXP7X", Const, 11}, + {"EM_SVX", Const, 11}, + {"EM_TILE64", Const, 11}, + {"EM_TILEGX", Const, 11}, + {"EM_TILEPRO", Const, 11}, + {"EM_TINYJ", Const, 0}, + {"EM_TI_ARP32", Const, 11}, + {"EM_TI_C2000", Const, 11}, + {"EM_TI_C5500", Const, 11}, + {"EM_TI_C6000", Const, 11}, + {"EM_TI_PRU", Const, 11}, + {"EM_TMM_GPP", Const, 11}, + {"EM_TPC", Const, 11}, + {"EM_TRICORE", Const, 0}, + {"EM_TRIMEDIA", Const, 11}, + {"EM_TSK3000", Const, 11}, + {"EM_UNICORE", Const, 11}, + {"EM_V800", Const, 0}, + {"EM_V850", Const, 11}, + {"EM_VAX", Const, 11}, + {"EM_VIDEOCORE", Const, 11}, + {"EM_VIDEOCORE3", Const, 11}, + {"EM_VIDEOCORE5", Const, 11}, + {"EM_VISIUM", Const, 11}, + {"EM_VPP500", Const, 0}, + {"EM_X86_64", Const, 0}, + {"EM_XCORE", Const, 11}, + {"EM_XGATE", Const, 11}, + {"EM_XIMO16", Const, 11}, + {"EM_XTENSA", Const, 11}, + {"EM_Z80", Const, 11}, + {"EM_ZSP", Const, 11}, + {"ET_CORE", Const, 0}, + {"ET_DYN", Const, 0}, + {"ET_EXEC", Const, 0}, + {"ET_HIOS", Const, 0}, + {"ET_HIPROC", Const, 0}, + {"ET_LOOS", Const, 0}, + {"ET_LOPROC", Const, 0}, + {"ET_NONE", Const, 0}, + {"ET_REL", Const, 0}, + {"EV_CURRENT", Const, 0}, + {"EV_NONE", Const, 0}, + {"ErrNoSymbols", Var, 4}, + {"File", Type, 0}, + {"File.FileHeader", Field, 0}, + {"File.Progs", Field, 0}, + {"File.Sections", Field, 0}, + {"FileHeader", Type, 0}, + {"FileHeader.ABIVersion", Field, 0}, + {"FileHeader.ByteOrder", Field, 0}, + {"FileHeader.Class", Field, 0}, + {"FileHeader.Data", Field, 0}, + {"FileHeader.Entry", Field, 1}, + {"FileHeader.Machine", Field, 0}, + {"FileHeader.OSABI", Field, 0}, + {"FileHeader.Type", Field, 0}, + {"FileHeader.Version", Field, 0}, + {"FormatError", Type, 0}, + {"Header32", Type, 0}, + {"Header32.Ehsize", Field, 0}, + {"Header32.Entry", Field, 0}, + {"Header32.Flags", Field, 0}, + {"Header32.Ident", Field, 0}, + {"Header32.Machine", Field, 0}, + {"Header32.Phentsize", Field, 0}, + {"Header32.Phnum", Field, 0}, + {"Header32.Phoff", Field, 0}, + {"Header32.Shentsize", Field, 0}, + {"Header32.Shnum", Field, 0}, + {"Header32.Shoff", Field, 0}, + {"Header32.Shstrndx", Field, 0}, + {"Header32.Type", Field, 0}, + {"Header32.Version", Field, 0}, + {"Header64", Type, 0}, + {"Header64.Ehsize", Field, 0}, + {"Header64.Entry", Field, 0}, + {"Header64.Flags", Field, 0}, + {"Header64.Ident", Field, 0}, + {"Header64.Machine", Field, 0}, + {"Header64.Phentsize", Field, 0}, + {"Header64.Phnum", Field, 0}, + {"Header64.Phoff", Field, 0}, + {"Header64.Shentsize", Field, 0}, + {"Header64.Shnum", Field, 0}, + {"Header64.Shoff", Field, 0}, + {"Header64.Shstrndx", Field, 0}, + {"Header64.Type", Field, 0}, + {"Header64.Version", Field, 0}, + {"ImportedSymbol", Type, 0}, + {"ImportedSymbol.Library", Field, 0}, + {"ImportedSymbol.Name", Field, 0}, + {"ImportedSymbol.Version", Field, 0}, + {"Machine", Type, 0}, + {"NT_FPREGSET", Const, 0}, + {"NT_PRPSINFO", Const, 0}, + {"NT_PRSTATUS", Const, 0}, + {"NType", Type, 0}, + {"NewFile", Func, 0}, + {"OSABI", Type, 0}, + {"Open", Func, 0}, + {"PF_MASKOS", Const, 0}, + {"PF_MASKPROC", Const, 0}, + {"PF_R", Const, 0}, + {"PF_W", Const, 0}, + {"PF_X", Const, 0}, + {"PT_AARCH64_ARCHEXT", Const, 16}, + {"PT_AARCH64_UNWIND", Const, 16}, + {"PT_ARM_ARCHEXT", Const, 16}, + {"PT_ARM_EXIDX", Const, 16}, + {"PT_DYNAMIC", Const, 0}, + {"PT_GNU_EH_FRAME", Const, 16}, + {"PT_GNU_MBIND_HI", Const, 16}, + {"PT_GNU_MBIND_LO", Const, 16}, + {"PT_GNU_PROPERTY", Const, 16}, + {"PT_GNU_RELRO", Const, 16}, + {"PT_GNU_STACK", Const, 16}, + {"PT_HIOS", Const, 0}, + {"PT_HIPROC", Const, 0}, + {"PT_INTERP", Const, 0}, + {"PT_LOAD", Const, 0}, + {"PT_LOOS", Const, 0}, + {"PT_LOPROC", Const, 0}, + {"PT_MIPS_ABIFLAGS", Const, 16}, + {"PT_MIPS_OPTIONS", Const, 16}, + {"PT_MIPS_REGINFO", Const, 16}, + {"PT_MIPS_RTPROC", Const, 16}, + {"PT_NOTE", Const, 0}, + {"PT_NULL", Const, 0}, + {"PT_OPENBSD_BOOTDATA", Const, 16}, + {"PT_OPENBSD_RANDOMIZE", Const, 16}, + {"PT_OPENBSD_WXNEEDED", Const, 16}, + {"PT_PAX_FLAGS", Const, 16}, + {"PT_PHDR", Const, 0}, + {"PT_S390_PGSTE", Const, 16}, + {"PT_SHLIB", Const, 0}, + {"PT_SUNWSTACK", Const, 16}, + {"PT_SUNW_EH_FRAME", Const, 16}, + {"PT_TLS", Const, 0}, + {"Prog", Type, 0}, + {"Prog.ProgHeader", Field, 0}, + {"Prog.ReaderAt", Field, 0}, + {"Prog32", Type, 0}, + {"Prog32.Align", Field, 0}, + {"Prog32.Filesz", Field, 0}, + {"Prog32.Flags", Field, 0}, + {"Prog32.Memsz", Field, 0}, + {"Prog32.Off", Field, 0}, + {"Prog32.Paddr", Field, 0}, + {"Prog32.Type", Field, 0}, + {"Prog32.Vaddr", Field, 0}, + {"Prog64", Type, 0}, + {"Prog64.Align", Field, 0}, + {"Prog64.Filesz", Field, 0}, + {"Prog64.Flags", Field, 0}, + {"Prog64.Memsz", Field, 0}, + {"Prog64.Off", Field, 0}, + {"Prog64.Paddr", Field, 0}, + {"Prog64.Type", Field, 0}, + {"Prog64.Vaddr", Field, 0}, + {"ProgFlag", Type, 0}, + {"ProgHeader", Type, 0}, + {"ProgHeader.Align", Field, 0}, + {"ProgHeader.Filesz", Field, 0}, + {"ProgHeader.Flags", Field, 0}, + {"ProgHeader.Memsz", Field, 0}, + {"ProgHeader.Off", Field, 0}, + {"ProgHeader.Paddr", Field, 0}, + {"ProgHeader.Type", Field, 0}, + {"ProgHeader.Vaddr", Field, 0}, + {"ProgType", Type, 0}, + {"R_386", Type, 0}, + {"R_386_16", Const, 10}, + {"R_386_32", Const, 0}, + {"R_386_32PLT", Const, 10}, + {"R_386_8", Const, 10}, + {"R_386_COPY", Const, 0}, + {"R_386_GLOB_DAT", Const, 0}, + {"R_386_GOT32", Const, 0}, + {"R_386_GOT32X", Const, 10}, + {"R_386_GOTOFF", Const, 0}, + {"R_386_GOTPC", Const, 0}, + {"R_386_IRELATIVE", Const, 10}, + {"R_386_JMP_SLOT", Const, 0}, + {"R_386_NONE", Const, 0}, + {"R_386_PC16", Const, 10}, + {"R_386_PC32", Const, 0}, + {"R_386_PC8", Const, 10}, + {"R_386_PLT32", Const, 0}, + {"R_386_RELATIVE", Const, 0}, + {"R_386_SIZE32", Const, 10}, + {"R_386_TLS_DESC", Const, 10}, + {"R_386_TLS_DESC_CALL", Const, 10}, + {"R_386_TLS_DTPMOD32", Const, 0}, + {"R_386_TLS_DTPOFF32", Const, 0}, + {"R_386_TLS_GD", Const, 0}, + {"R_386_TLS_GD_32", Const, 0}, + {"R_386_TLS_GD_CALL", Const, 0}, + {"R_386_TLS_GD_POP", Const, 0}, + {"R_386_TLS_GD_PUSH", Const, 0}, + {"R_386_TLS_GOTDESC", Const, 10}, + {"R_386_TLS_GOTIE", Const, 0}, + {"R_386_TLS_IE", Const, 0}, + {"R_386_TLS_IE_32", Const, 0}, + {"R_386_TLS_LDM", Const, 0}, + {"R_386_TLS_LDM_32", Const, 0}, + {"R_386_TLS_LDM_CALL", Const, 0}, + {"R_386_TLS_LDM_POP", Const, 0}, + {"R_386_TLS_LDM_PUSH", Const, 0}, + {"R_386_TLS_LDO_32", Const, 0}, + {"R_386_TLS_LE", Const, 0}, + {"R_386_TLS_LE_32", Const, 0}, + {"R_386_TLS_TPOFF", Const, 0}, + {"R_386_TLS_TPOFF32", Const, 0}, + {"R_390", Type, 7}, + {"R_390_12", Const, 7}, + {"R_390_16", Const, 7}, + {"R_390_20", Const, 7}, + {"R_390_32", Const, 7}, + {"R_390_64", Const, 7}, + {"R_390_8", Const, 7}, + {"R_390_COPY", Const, 7}, + {"R_390_GLOB_DAT", Const, 7}, + {"R_390_GOT12", Const, 7}, + {"R_390_GOT16", Const, 7}, + {"R_390_GOT20", Const, 7}, + {"R_390_GOT32", Const, 7}, + {"R_390_GOT64", Const, 7}, + {"R_390_GOTENT", Const, 7}, + {"R_390_GOTOFF", Const, 7}, + {"R_390_GOTOFF16", Const, 7}, + {"R_390_GOTOFF64", Const, 7}, + {"R_390_GOTPC", Const, 7}, + {"R_390_GOTPCDBL", Const, 7}, + {"R_390_GOTPLT12", Const, 7}, + {"R_390_GOTPLT16", Const, 7}, + {"R_390_GOTPLT20", Const, 7}, + {"R_390_GOTPLT32", Const, 7}, + {"R_390_GOTPLT64", Const, 7}, + {"R_390_GOTPLTENT", Const, 7}, + {"R_390_GOTPLTOFF16", Const, 7}, + {"R_390_GOTPLTOFF32", Const, 7}, + {"R_390_GOTPLTOFF64", Const, 7}, + {"R_390_JMP_SLOT", Const, 7}, + {"R_390_NONE", Const, 7}, + {"R_390_PC16", Const, 7}, + {"R_390_PC16DBL", Const, 7}, + {"R_390_PC32", Const, 7}, + {"R_390_PC32DBL", Const, 7}, + {"R_390_PC64", Const, 7}, + {"R_390_PLT16DBL", Const, 7}, + {"R_390_PLT32", Const, 7}, + {"R_390_PLT32DBL", Const, 7}, + {"R_390_PLT64", Const, 7}, + {"R_390_RELATIVE", Const, 7}, + {"R_390_TLS_DTPMOD", Const, 7}, + {"R_390_TLS_DTPOFF", Const, 7}, + {"R_390_TLS_GD32", Const, 7}, + {"R_390_TLS_GD64", Const, 7}, + {"R_390_TLS_GDCALL", Const, 7}, + {"R_390_TLS_GOTIE12", Const, 7}, + {"R_390_TLS_GOTIE20", Const, 7}, + {"R_390_TLS_GOTIE32", Const, 7}, + {"R_390_TLS_GOTIE64", Const, 7}, + {"R_390_TLS_IE32", Const, 7}, + {"R_390_TLS_IE64", Const, 7}, + {"R_390_TLS_IEENT", Const, 7}, + {"R_390_TLS_LDCALL", Const, 7}, + {"R_390_TLS_LDM32", Const, 7}, + {"R_390_TLS_LDM64", Const, 7}, + {"R_390_TLS_LDO32", Const, 7}, + {"R_390_TLS_LDO64", Const, 7}, + {"R_390_TLS_LE32", Const, 7}, + {"R_390_TLS_LE64", Const, 7}, + {"R_390_TLS_LOAD", Const, 7}, + {"R_390_TLS_TPOFF", Const, 7}, + {"R_AARCH64", Type, 4}, + {"R_AARCH64_ABS16", Const, 4}, + {"R_AARCH64_ABS32", Const, 4}, + {"R_AARCH64_ABS64", Const, 4}, + {"R_AARCH64_ADD_ABS_LO12_NC", Const, 4}, + {"R_AARCH64_ADR_GOT_PAGE", Const, 4}, + {"R_AARCH64_ADR_PREL_LO21", Const, 4}, + {"R_AARCH64_ADR_PREL_PG_HI21", Const, 4}, + {"R_AARCH64_ADR_PREL_PG_HI21_NC", Const, 4}, + {"R_AARCH64_CALL26", Const, 4}, + {"R_AARCH64_CONDBR19", Const, 4}, + {"R_AARCH64_COPY", Const, 4}, + {"R_AARCH64_GLOB_DAT", Const, 4}, + {"R_AARCH64_GOT_LD_PREL19", Const, 4}, + {"R_AARCH64_IRELATIVE", Const, 4}, + {"R_AARCH64_JUMP26", Const, 4}, + {"R_AARCH64_JUMP_SLOT", Const, 4}, + {"R_AARCH64_LD64_GOTOFF_LO15", Const, 10}, + {"R_AARCH64_LD64_GOTPAGE_LO15", Const, 10}, + {"R_AARCH64_LD64_GOT_LO12_NC", Const, 4}, + {"R_AARCH64_LDST128_ABS_LO12_NC", Const, 4}, + {"R_AARCH64_LDST16_ABS_LO12_NC", Const, 4}, + {"R_AARCH64_LDST32_ABS_LO12_NC", Const, 4}, + {"R_AARCH64_LDST64_ABS_LO12_NC", Const, 4}, + {"R_AARCH64_LDST8_ABS_LO12_NC", Const, 4}, + {"R_AARCH64_LD_PREL_LO19", Const, 4}, + {"R_AARCH64_MOVW_SABS_G0", Const, 4}, + {"R_AARCH64_MOVW_SABS_G1", Const, 4}, + {"R_AARCH64_MOVW_SABS_G2", Const, 4}, + {"R_AARCH64_MOVW_UABS_G0", Const, 4}, + {"R_AARCH64_MOVW_UABS_G0_NC", Const, 4}, + {"R_AARCH64_MOVW_UABS_G1", Const, 4}, + {"R_AARCH64_MOVW_UABS_G1_NC", Const, 4}, + {"R_AARCH64_MOVW_UABS_G2", Const, 4}, + {"R_AARCH64_MOVW_UABS_G2_NC", Const, 4}, + {"R_AARCH64_MOVW_UABS_G3", Const, 4}, + {"R_AARCH64_NONE", Const, 4}, + {"R_AARCH64_NULL", Const, 4}, + {"R_AARCH64_P32_ABS16", Const, 4}, + {"R_AARCH64_P32_ABS32", Const, 4}, + {"R_AARCH64_P32_ADD_ABS_LO12_NC", Const, 4}, + {"R_AARCH64_P32_ADR_GOT_PAGE", Const, 4}, + {"R_AARCH64_P32_ADR_PREL_LO21", Const, 4}, + {"R_AARCH64_P32_ADR_PREL_PG_HI21", Const, 4}, + {"R_AARCH64_P32_CALL26", Const, 4}, + {"R_AARCH64_P32_CONDBR19", Const, 4}, + {"R_AARCH64_P32_COPY", Const, 4}, + {"R_AARCH64_P32_GLOB_DAT", Const, 4}, + {"R_AARCH64_P32_GOT_LD_PREL19", Const, 4}, + {"R_AARCH64_P32_IRELATIVE", Const, 4}, + {"R_AARCH64_P32_JUMP26", Const, 4}, + {"R_AARCH64_P32_JUMP_SLOT", Const, 4}, + {"R_AARCH64_P32_LD32_GOT_LO12_NC", Const, 4}, + {"R_AARCH64_P32_LDST128_ABS_LO12_NC", Const, 4}, + {"R_AARCH64_P32_LDST16_ABS_LO12_NC", Const, 4}, + {"R_AARCH64_P32_LDST32_ABS_LO12_NC", Const, 4}, + {"R_AARCH64_P32_LDST64_ABS_LO12_NC", Const, 4}, + {"R_AARCH64_P32_LDST8_ABS_LO12_NC", Const, 4}, + {"R_AARCH64_P32_LD_PREL_LO19", Const, 4}, + {"R_AARCH64_P32_MOVW_SABS_G0", Const, 4}, + {"R_AARCH64_P32_MOVW_UABS_G0", Const, 4}, + {"R_AARCH64_P32_MOVW_UABS_G0_NC", Const, 4}, + {"R_AARCH64_P32_MOVW_UABS_G1", Const, 4}, + {"R_AARCH64_P32_PREL16", Const, 4}, + {"R_AARCH64_P32_PREL32", Const, 4}, + {"R_AARCH64_P32_RELATIVE", Const, 4}, + {"R_AARCH64_P32_TLSDESC", Const, 4}, + {"R_AARCH64_P32_TLSDESC_ADD_LO12_NC", Const, 4}, + {"R_AARCH64_P32_TLSDESC_ADR_PAGE21", Const, 4}, + {"R_AARCH64_P32_TLSDESC_ADR_PREL21", Const, 4}, + {"R_AARCH64_P32_TLSDESC_CALL", Const, 4}, + {"R_AARCH64_P32_TLSDESC_LD32_LO12_NC", Const, 4}, + {"R_AARCH64_P32_TLSDESC_LD_PREL19", Const, 4}, + {"R_AARCH64_P32_TLSGD_ADD_LO12_NC", Const, 4}, + {"R_AARCH64_P32_TLSGD_ADR_PAGE21", Const, 4}, + {"R_AARCH64_P32_TLSIE_ADR_GOTTPREL_PAGE21", Const, 4}, + {"R_AARCH64_P32_TLSIE_LD32_GOTTPREL_LO12_NC", Const, 4}, + {"R_AARCH64_P32_TLSIE_LD_GOTTPREL_PREL19", Const, 4}, + {"R_AARCH64_P32_TLSLE_ADD_TPREL_HI12", Const, 4}, + {"R_AARCH64_P32_TLSLE_ADD_TPREL_LO12", Const, 4}, + {"R_AARCH64_P32_TLSLE_ADD_TPREL_LO12_NC", Const, 4}, + {"R_AARCH64_P32_TLSLE_MOVW_TPREL_G0", Const, 4}, + {"R_AARCH64_P32_TLSLE_MOVW_TPREL_G0_NC", Const, 4}, + {"R_AARCH64_P32_TLSLE_MOVW_TPREL_G1", Const, 4}, + {"R_AARCH64_P32_TLS_DTPMOD", Const, 4}, + {"R_AARCH64_P32_TLS_DTPREL", Const, 4}, + {"R_AARCH64_P32_TLS_TPREL", Const, 4}, + {"R_AARCH64_P32_TSTBR14", Const, 4}, + {"R_AARCH64_PREL16", Const, 4}, + {"R_AARCH64_PREL32", Const, 4}, + {"R_AARCH64_PREL64", Const, 4}, + {"R_AARCH64_RELATIVE", Const, 4}, + {"R_AARCH64_TLSDESC", Const, 4}, + {"R_AARCH64_TLSDESC_ADD", Const, 4}, + {"R_AARCH64_TLSDESC_ADD_LO12_NC", Const, 4}, + {"R_AARCH64_TLSDESC_ADR_PAGE21", Const, 4}, + {"R_AARCH64_TLSDESC_ADR_PREL21", Const, 4}, + {"R_AARCH64_TLSDESC_CALL", Const, 4}, + {"R_AARCH64_TLSDESC_LD64_LO12_NC", Const, 4}, + {"R_AARCH64_TLSDESC_LDR", Const, 4}, + {"R_AARCH64_TLSDESC_LD_PREL19", Const, 4}, + {"R_AARCH64_TLSDESC_OFF_G0_NC", Const, 4}, + {"R_AARCH64_TLSDESC_OFF_G1", Const, 4}, + {"R_AARCH64_TLSGD_ADD_LO12_NC", Const, 4}, + {"R_AARCH64_TLSGD_ADR_PAGE21", Const, 4}, + {"R_AARCH64_TLSGD_ADR_PREL21", Const, 10}, + {"R_AARCH64_TLSGD_MOVW_G0_NC", Const, 10}, + {"R_AARCH64_TLSGD_MOVW_G1", Const, 10}, + {"R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21", Const, 4}, + {"R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC", Const, 4}, + {"R_AARCH64_TLSIE_LD_GOTTPREL_PREL19", Const, 4}, + {"R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC", Const, 4}, + {"R_AARCH64_TLSIE_MOVW_GOTTPREL_G1", Const, 4}, + {"R_AARCH64_TLSLD_ADR_PAGE21", Const, 10}, + {"R_AARCH64_TLSLD_ADR_PREL21", Const, 10}, + {"R_AARCH64_TLSLD_LDST128_DTPREL_LO12", Const, 10}, + {"R_AARCH64_TLSLD_LDST128_DTPREL_LO12_NC", Const, 10}, + {"R_AARCH64_TLSLE_ADD_TPREL_HI12", Const, 4}, + {"R_AARCH64_TLSLE_ADD_TPREL_LO12", Const, 4}, + {"R_AARCH64_TLSLE_ADD_TPREL_LO12_NC", Const, 4}, + {"R_AARCH64_TLSLE_LDST128_TPREL_LO12", Const, 10}, + {"R_AARCH64_TLSLE_LDST128_TPREL_LO12_NC", Const, 10}, + {"R_AARCH64_TLSLE_MOVW_TPREL_G0", Const, 4}, + {"R_AARCH64_TLSLE_MOVW_TPREL_G0_NC", Const, 4}, + {"R_AARCH64_TLSLE_MOVW_TPREL_G1", Const, 4}, + {"R_AARCH64_TLSLE_MOVW_TPREL_G1_NC", Const, 4}, + {"R_AARCH64_TLSLE_MOVW_TPREL_G2", Const, 4}, + {"R_AARCH64_TLS_DTPMOD64", Const, 4}, + {"R_AARCH64_TLS_DTPREL64", Const, 4}, + {"R_AARCH64_TLS_TPREL64", Const, 4}, + {"R_AARCH64_TSTBR14", Const, 4}, + {"R_ALPHA", Type, 0}, + {"R_ALPHA_BRADDR", Const, 0}, + {"R_ALPHA_COPY", Const, 0}, + {"R_ALPHA_GLOB_DAT", Const, 0}, + {"R_ALPHA_GPDISP", Const, 0}, + {"R_ALPHA_GPREL32", Const, 0}, + {"R_ALPHA_GPRELHIGH", Const, 0}, + {"R_ALPHA_GPRELLOW", Const, 0}, + {"R_ALPHA_GPVALUE", Const, 0}, + {"R_ALPHA_HINT", Const, 0}, + {"R_ALPHA_IMMED_BR_HI32", Const, 0}, + {"R_ALPHA_IMMED_GP_16", Const, 0}, + {"R_ALPHA_IMMED_GP_HI32", Const, 0}, + {"R_ALPHA_IMMED_LO32", Const, 0}, + {"R_ALPHA_IMMED_SCN_HI32", Const, 0}, + {"R_ALPHA_JMP_SLOT", Const, 0}, + {"R_ALPHA_LITERAL", Const, 0}, + {"R_ALPHA_LITUSE", Const, 0}, + {"R_ALPHA_NONE", Const, 0}, + {"R_ALPHA_OP_PRSHIFT", Const, 0}, + {"R_ALPHA_OP_PSUB", Const, 0}, + {"R_ALPHA_OP_PUSH", Const, 0}, + {"R_ALPHA_OP_STORE", Const, 0}, + {"R_ALPHA_REFLONG", Const, 0}, + {"R_ALPHA_REFQUAD", Const, 0}, + {"R_ALPHA_RELATIVE", Const, 0}, + {"R_ALPHA_SREL16", Const, 0}, + {"R_ALPHA_SREL32", Const, 0}, + {"R_ALPHA_SREL64", Const, 0}, + {"R_ARM", Type, 0}, + {"R_ARM_ABS12", Const, 0}, + {"R_ARM_ABS16", Const, 0}, + {"R_ARM_ABS32", Const, 0}, + {"R_ARM_ABS32_NOI", Const, 10}, + {"R_ARM_ABS8", Const, 0}, + {"R_ARM_ALU_PCREL_15_8", Const, 10}, + {"R_ARM_ALU_PCREL_23_15", Const, 10}, + {"R_ARM_ALU_PCREL_7_0", Const, 10}, + {"R_ARM_ALU_PC_G0", Const, 10}, + {"R_ARM_ALU_PC_G0_NC", Const, 10}, + {"R_ARM_ALU_PC_G1", Const, 10}, + {"R_ARM_ALU_PC_G1_NC", Const, 10}, + {"R_ARM_ALU_PC_G2", Const, 10}, + {"R_ARM_ALU_SBREL_19_12_NC", Const, 10}, + {"R_ARM_ALU_SBREL_27_20_CK", Const, 10}, + {"R_ARM_ALU_SB_G0", Const, 10}, + {"R_ARM_ALU_SB_G0_NC", Const, 10}, + {"R_ARM_ALU_SB_G1", Const, 10}, + {"R_ARM_ALU_SB_G1_NC", Const, 10}, + {"R_ARM_ALU_SB_G2", Const, 10}, + {"R_ARM_AMP_VCALL9", Const, 0}, + {"R_ARM_BASE_ABS", Const, 10}, + {"R_ARM_CALL", Const, 10}, + {"R_ARM_COPY", Const, 0}, + {"R_ARM_GLOB_DAT", Const, 0}, + {"R_ARM_GNU_VTENTRY", Const, 0}, + {"R_ARM_GNU_VTINHERIT", Const, 0}, + {"R_ARM_GOT32", Const, 0}, + {"R_ARM_GOTOFF", Const, 0}, + {"R_ARM_GOTOFF12", Const, 10}, + {"R_ARM_GOTPC", Const, 0}, + {"R_ARM_GOTRELAX", Const, 10}, + {"R_ARM_GOT_ABS", Const, 10}, + {"R_ARM_GOT_BREL12", Const, 10}, + {"R_ARM_GOT_PREL", Const, 10}, + {"R_ARM_IRELATIVE", Const, 10}, + {"R_ARM_JUMP24", Const, 10}, + {"R_ARM_JUMP_SLOT", Const, 0}, + {"R_ARM_LDC_PC_G0", Const, 10}, + {"R_ARM_LDC_PC_G1", Const, 10}, + {"R_ARM_LDC_PC_G2", Const, 10}, + {"R_ARM_LDC_SB_G0", Const, 10}, + {"R_ARM_LDC_SB_G1", Const, 10}, + {"R_ARM_LDC_SB_G2", Const, 10}, + {"R_ARM_LDRS_PC_G0", Const, 10}, + {"R_ARM_LDRS_PC_G1", Const, 10}, + {"R_ARM_LDRS_PC_G2", Const, 10}, + {"R_ARM_LDRS_SB_G0", Const, 10}, + {"R_ARM_LDRS_SB_G1", Const, 10}, + {"R_ARM_LDRS_SB_G2", Const, 10}, + {"R_ARM_LDR_PC_G1", Const, 10}, + {"R_ARM_LDR_PC_G2", Const, 10}, + {"R_ARM_LDR_SBREL_11_10_NC", Const, 10}, + {"R_ARM_LDR_SB_G0", Const, 10}, + {"R_ARM_LDR_SB_G1", Const, 10}, + {"R_ARM_LDR_SB_G2", Const, 10}, + {"R_ARM_ME_TOO", Const, 10}, + {"R_ARM_MOVT_ABS", Const, 10}, + {"R_ARM_MOVT_BREL", Const, 10}, + {"R_ARM_MOVT_PREL", Const, 10}, + {"R_ARM_MOVW_ABS_NC", Const, 10}, + {"R_ARM_MOVW_BREL", Const, 10}, + {"R_ARM_MOVW_BREL_NC", Const, 10}, + {"R_ARM_MOVW_PREL_NC", Const, 10}, + {"R_ARM_NONE", Const, 0}, + {"R_ARM_PC13", Const, 0}, + {"R_ARM_PC24", Const, 0}, + {"R_ARM_PLT32", Const, 0}, + {"R_ARM_PLT32_ABS", Const, 10}, + {"R_ARM_PREL31", Const, 10}, + {"R_ARM_PRIVATE_0", Const, 10}, + {"R_ARM_PRIVATE_1", Const, 10}, + {"R_ARM_PRIVATE_10", Const, 10}, + {"R_ARM_PRIVATE_11", Const, 10}, + {"R_ARM_PRIVATE_12", Const, 10}, + {"R_ARM_PRIVATE_13", Const, 10}, + {"R_ARM_PRIVATE_14", Const, 10}, + {"R_ARM_PRIVATE_15", Const, 10}, + {"R_ARM_PRIVATE_2", Const, 10}, + {"R_ARM_PRIVATE_3", Const, 10}, + {"R_ARM_PRIVATE_4", Const, 10}, + {"R_ARM_PRIVATE_5", Const, 10}, + {"R_ARM_PRIVATE_6", Const, 10}, + {"R_ARM_PRIVATE_7", Const, 10}, + {"R_ARM_PRIVATE_8", Const, 10}, + {"R_ARM_PRIVATE_9", Const, 10}, + {"R_ARM_RABS32", Const, 0}, + {"R_ARM_RBASE", Const, 0}, + {"R_ARM_REL32", Const, 0}, + {"R_ARM_REL32_NOI", Const, 10}, + {"R_ARM_RELATIVE", Const, 0}, + {"R_ARM_RPC24", Const, 0}, + {"R_ARM_RREL32", Const, 0}, + {"R_ARM_RSBREL32", Const, 0}, + {"R_ARM_RXPC25", Const, 10}, + {"R_ARM_SBREL31", Const, 10}, + {"R_ARM_SBREL32", Const, 0}, + {"R_ARM_SWI24", Const, 0}, + {"R_ARM_TARGET1", Const, 10}, + {"R_ARM_TARGET2", Const, 10}, + {"R_ARM_THM_ABS5", Const, 0}, + {"R_ARM_THM_ALU_ABS_G0_NC", Const, 10}, + {"R_ARM_THM_ALU_ABS_G1_NC", Const, 10}, + {"R_ARM_THM_ALU_ABS_G2_NC", Const, 10}, + {"R_ARM_THM_ALU_ABS_G3", Const, 10}, + {"R_ARM_THM_ALU_PREL_11_0", Const, 10}, + {"R_ARM_THM_GOT_BREL12", Const, 10}, + {"R_ARM_THM_JUMP11", Const, 10}, + {"R_ARM_THM_JUMP19", Const, 10}, + {"R_ARM_THM_JUMP24", Const, 10}, + {"R_ARM_THM_JUMP6", Const, 10}, + {"R_ARM_THM_JUMP8", Const, 10}, + {"R_ARM_THM_MOVT_ABS", Const, 10}, + {"R_ARM_THM_MOVT_BREL", Const, 10}, + {"R_ARM_THM_MOVT_PREL", Const, 10}, + {"R_ARM_THM_MOVW_ABS_NC", Const, 10}, + {"R_ARM_THM_MOVW_BREL", Const, 10}, + {"R_ARM_THM_MOVW_BREL_NC", Const, 10}, + {"R_ARM_THM_MOVW_PREL_NC", Const, 10}, + {"R_ARM_THM_PC12", Const, 10}, + {"R_ARM_THM_PC22", Const, 0}, + {"R_ARM_THM_PC8", Const, 0}, + {"R_ARM_THM_RPC22", Const, 0}, + {"R_ARM_THM_SWI8", Const, 0}, + {"R_ARM_THM_TLS_CALL", Const, 10}, + {"R_ARM_THM_TLS_DESCSEQ16", Const, 10}, + {"R_ARM_THM_TLS_DESCSEQ32", Const, 10}, + {"R_ARM_THM_XPC22", Const, 0}, + {"R_ARM_TLS_CALL", Const, 10}, + {"R_ARM_TLS_DESCSEQ", Const, 10}, + {"R_ARM_TLS_DTPMOD32", Const, 10}, + {"R_ARM_TLS_DTPOFF32", Const, 10}, + {"R_ARM_TLS_GD32", Const, 10}, + {"R_ARM_TLS_GOTDESC", Const, 10}, + {"R_ARM_TLS_IE12GP", Const, 10}, + {"R_ARM_TLS_IE32", Const, 10}, + {"R_ARM_TLS_LDM32", Const, 10}, + {"R_ARM_TLS_LDO12", Const, 10}, + {"R_ARM_TLS_LDO32", Const, 10}, + {"R_ARM_TLS_LE12", Const, 10}, + {"R_ARM_TLS_LE32", Const, 10}, + {"R_ARM_TLS_TPOFF32", Const, 10}, + {"R_ARM_V4BX", Const, 10}, + {"R_ARM_XPC25", Const, 0}, + {"R_INFO", Func, 0}, + {"R_INFO32", Func, 0}, + {"R_LARCH", Type, 19}, + {"R_LARCH_32", Const, 19}, + {"R_LARCH_32_PCREL", Const, 20}, + {"R_LARCH_64", Const, 19}, + {"R_LARCH_64_PCREL", Const, 22}, + {"R_LARCH_ABS64_HI12", Const, 20}, + {"R_LARCH_ABS64_LO20", Const, 20}, + {"R_LARCH_ABS_HI20", Const, 20}, + {"R_LARCH_ABS_LO12", Const, 20}, + {"R_LARCH_ADD16", Const, 19}, + {"R_LARCH_ADD24", Const, 19}, + {"R_LARCH_ADD32", Const, 19}, + {"R_LARCH_ADD6", Const, 22}, + {"R_LARCH_ADD64", Const, 19}, + {"R_LARCH_ADD8", Const, 19}, + {"R_LARCH_ADD_ULEB128", Const, 22}, + {"R_LARCH_ALIGN", Const, 22}, + {"R_LARCH_B16", Const, 20}, + {"R_LARCH_B21", Const, 20}, + {"R_LARCH_B26", Const, 20}, + {"R_LARCH_CFA", Const, 22}, + {"R_LARCH_COPY", Const, 19}, + {"R_LARCH_DELETE", Const, 22}, + {"R_LARCH_GNU_VTENTRY", Const, 20}, + {"R_LARCH_GNU_VTINHERIT", Const, 20}, + {"R_LARCH_GOT64_HI12", Const, 20}, + {"R_LARCH_GOT64_LO20", Const, 20}, + {"R_LARCH_GOT64_PC_HI12", Const, 20}, + {"R_LARCH_GOT64_PC_LO20", Const, 20}, + {"R_LARCH_GOT_HI20", Const, 20}, + {"R_LARCH_GOT_LO12", Const, 20}, + {"R_LARCH_GOT_PC_HI20", Const, 20}, + {"R_LARCH_GOT_PC_LO12", Const, 20}, + {"R_LARCH_IRELATIVE", Const, 19}, + {"R_LARCH_JUMP_SLOT", Const, 19}, + {"R_LARCH_MARK_LA", Const, 19}, + {"R_LARCH_MARK_PCREL", Const, 19}, + {"R_LARCH_NONE", Const, 19}, + {"R_LARCH_PCALA64_HI12", Const, 20}, + {"R_LARCH_PCALA64_LO20", Const, 20}, + {"R_LARCH_PCALA_HI20", Const, 20}, + {"R_LARCH_PCALA_LO12", Const, 20}, + {"R_LARCH_PCREL20_S2", Const, 22}, + {"R_LARCH_RELATIVE", Const, 19}, + {"R_LARCH_RELAX", Const, 20}, + {"R_LARCH_SOP_ADD", Const, 19}, + {"R_LARCH_SOP_AND", Const, 19}, + {"R_LARCH_SOP_ASSERT", Const, 19}, + {"R_LARCH_SOP_IF_ELSE", Const, 19}, + {"R_LARCH_SOP_NOT", Const, 19}, + {"R_LARCH_SOP_POP_32_S_0_10_10_16_S2", Const, 19}, + {"R_LARCH_SOP_POP_32_S_0_5_10_16_S2", Const, 19}, + {"R_LARCH_SOP_POP_32_S_10_12", Const, 19}, + {"R_LARCH_SOP_POP_32_S_10_16", Const, 19}, + {"R_LARCH_SOP_POP_32_S_10_16_S2", Const, 19}, + {"R_LARCH_SOP_POP_32_S_10_5", Const, 19}, + {"R_LARCH_SOP_POP_32_S_5_20", Const, 19}, + {"R_LARCH_SOP_POP_32_U", Const, 19}, + {"R_LARCH_SOP_POP_32_U_10_12", Const, 19}, + {"R_LARCH_SOP_PUSH_ABSOLUTE", Const, 19}, + {"R_LARCH_SOP_PUSH_DUP", Const, 19}, + {"R_LARCH_SOP_PUSH_GPREL", Const, 19}, + {"R_LARCH_SOP_PUSH_PCREL", Const, 19}, + {"R_LARCH_SOP_PUSH_PLT_PCREL", Const, 19}, + {"R_LARCH_SOP_PUSH_TLS_GD", Const, 19}, + {"R_LARCH_SOP_PUSH_TLS_GOT", Const, 19}, + {"R_LARCH_SOP_PUSH_TLS_TPREL", Const, 19}, + {"R_LARCH_SOP_SL", Const, 19}, + {"R_LARCH_SOP_SR", Const, 19}, + {"R_LARCH_SOP_SUB", Const, 19}, + {"R_LARCH_SUB16", Const, 19}, + {"R_LARCH_SUB24", Const, 19}, + {"R_LARCH_SUB32", Const, 19}, + {"R_LARCH_SUB6", Const, 22}, + {"R_LARCH_SUB64", Const, 19}, + {"R_LARCH_SUB8", Const, 19}, + {"R_LARCH_SUB_ULEB128", Const, 22}, + {"R_LARCH_TLS_DTPMOD32", Const, 19}, + {"R_LARCH_TLS_DTPMOD64", Const, 19}, + {"R_LARCH_TLS_DTPREL32", Const, 19}, + {"R_LARCH_TLS_DTPREL64", Const, 19}, + {"R_LARCH_TLS_GD_HI20", Const, 20}, + {"R_LARCH_TLS_GD_PC_HI20", Const, 20}, + {"R_LARCH_TLS_IE64_HI12", Const, 20}, + {"R_LARCH_TLS_IE64_LO20", Const, 20}, + {"R_LARCH_TLS_IE64_PC_HI12", Const, 20}, + {"R_LARCH_TLS_IE64_PC_LO20", Const, 20}, + {"R_LARCH_TLS_IE_HI20", Const, 20}, + {"R_LARCH_TLS_IE_LO12", Const, 20}, + {"R_LARCH_TLS_IE_PC_HI20", Const, 20}, + {"R_LARCH_TLS_IE_PC_LO12", Const, 20}, + {"R_LARCH_TLS_LD_HI20", Const, 20}, + {"R_LARCH_TLS_LD_PC_HI20", Const, 20}, + {"R_LARCH_TLS_LE64_HI12", Const, 20}, + {"R_LARCH_TLS_LE64_LO20", Const, 20}, + {"R_LARCH_TLS_LE_HI20", Const, 20}, + {"R_LARCH_TLS_LE_LO12", Const, 20}, + {"R_LARCH_TLS_TPREL32", Const, 19}, + {"R_LARCH_TLS_TPREL64", Const, 19}, + {"R_MIPS", Type, 6}, + {"R_MIPS_16", Const, 6}, + {"R_MIPS_26", Const, 6}, + {"R_MIPS_32", Const, 6}, + {"R_MIPS_64", Const, 6}, + {"R_MIPS_ADD_IMMEDIATE", Const, 6}, + {"R_MIPS_CALL16", Const, 6}, + {"R_MIPS_CALL_HI16", Const, 6}, + {"R_MIPS_CALL_LO16", Const, 6}, + {"R_MIPS_DELETE", Const, 6}, + {"R_MIPS_GOT16", Const, 6}, + {"R_MIPS_GOT_DISP", Const, 6}, + {"R_MIPS_GOT_HI16", Const, 6}, + {"R_MIPS_GOT_LO16", Const, 6}, + {"R_MIPS_GOT_OFST", Const, 6}, + {"R_MIPS_GOT_PAGE", Const, 6}, + {"R_MIPS_GPREL16", Const, 6}, + {"R_MIPS_GPREL32", Const, 6}, + {"R_MIPS_HI16", Const, 6}, + {"R_MIPS_HIGHER", Const, 6}, + {"R_MIPS_HIGHEST", Const, 6}, + {"R_MIPS_INSERT_A", Const, 6}, + {"R_MIPS_INSERT_B", Const, 6}, + {"R_MIPS_JALR", Const, 6}, + {"R_MIPS_LITERAL", Const, 6}, + {"R_MIPS_LO16", Const, 6}, + {"R_MIPS_NONE", Const, 6}, + {"R_MIPS_PC16", Const, 6}, + {"R_MIPS_PC32", Const, 22}, + {"R_MIPS_PJUMP", Const, 6}, + {"R_MIPS_REL16", Const, 6}, + {"R_MIPS_REL32", Const, 6}, + {"R_MIPS_RELGOT", Const, 6}, + {"R_MIPS_SCN_DISP", Const, 6}, + {"R_MIPS_SHIFT5", Const, 6}, + {"R_MIPS_SHIFT6", Const, 6}, + {"R_MIPS_SUB", Const, 6}, + {"R_MIPS_TLS_DTPMOD32", Const, 6}, + {"R_MIPS_TLS_DTPMOD64", Const, 6}, + {"R_MIPS_TLS_DTPREL32", Const, 6}, + {"R_MIPS_TLS_DTPREL64", Const, 6}, + {"R_MIPS_TLS_DTPREL_HI16", Const, 6}, + {"R_MIPS_TLS_DTPREL_LO16", Const, 6}, + {"R_MIPS_TLS_GD", Const, 6}, + {"R_MIPS_TLS_GOTTPREL", Const, 6}, + {"R_MIPS_TLS_LDM", Const, 6}, + {"R_MIPS_TLS_TPREL32", Const, 6}, + {"R_MIPS_TLS_TPREL64", Const, 6}, + {"R_MIPS_TLS_TPREL_HI16", Const, 6}, + {"R_MIPS_TLS_TPREL_LO16", Const, 6}, + {"R_PPC", Type, 0}, + {"R_PPC64", Type, 5}, + {"R_PPC64_ADDR14", Const, 5}, + {"R_PPC64_ADDR14_BRNTAKEN", Const, 5}, + {"R_PPC64_ADDR14_BRTAKEN", Const, 5}, + {"R_PPC64_ADDR16", Const, 5}, + {"R_PPC64_ADDR16_DS", Const, 5}, + {"R_PPC64_ADDR16_HA", Const, 5}, + {"R_PPC64_ADDR16_HI", Const, 5}, + {"R_PPC64_ADDR16_HIGH", Const, 10}, + {"R_PPC64_ADDR16_HIGHA", Const, 10}, + {"R_PPC64_ADDR16_HIGHER", Const, 5}, + {"R_PPC64_ADDR16_HIGHER34", Const, 20}, + {"R_PPC64_ADDR16_HIGHERA", Const, 5}, + {"R_PPC64_ADDR16_HIGHERA34", Const, 20}, + {"R_PPC64_ADDR16_HIGHEST", Const, 5}, + {"R_PPC64_ADDR16_HIGHEST34", Const, 20}, + {"R_PPC64_ADDR16_HIGHESTA", Const, 5}, + {"R_PPC64_ADDR16_HIGHESTA34", Const, 20}, + {"R_PPC64_ADDR16_LO", Const, 5}, + {"R_PPC64_ADDR16_LO_DS", Const, 5}, + {"R_PPC64_ADDR24", Const, 5}, + {"R_PPC64_ADDR32", Const, 5}, + {"R_PPC64_ADDR64", Const, 5}, + {"R_PPC64_ADDR64_LOCAL", Const, 10}, + {"R_PPC64_COPY", Const, 20}, + {"R_PPC64_D28", Const, 20}, + {"R_PPC64_D34", Const, 20}, + {"R_PPC64_D34_HA30", Const, 20}, + {"R_PPC64_D34_HI30", Const, 20}, + {"R_PPC64_D34_LO", Const, 20}, + {"R_PPC64_DTPMOD64", Const, 5}, + {"R_PPC64_DTPREL16", Const, 5}, + {"R_PPC64_DTPREL16_DS", Const, 5}, + {"R_PPC64_DTPREL16_HA", Const, 5}, + {"R_PPC64_DTPREL16_HI", Const, 5}, + {"R_PPC64_DTPREL16_HIGH", Const, 10}, + {"R_PPC64_DTPREL16_HIGHA", Const, 10}, + {"R_PPC64_DTPREL16_HIGHER", Const, 5}, + {"R_PPC64_DTPREL16_HIGHERA", Const, 5}, + {"R_PPC64_DTPREL16_HIGHEST", Const, 5}, + {"R_PPC64_DTPREL16_HIGHESTA", Const, 5}, + {"R_PPC64_DTPREL16_LO", Const, 5}, + {"R_PPC64_DTPREL16_LO_DS", Const, 5}, + {"R_PPC64_DTPREL34", Const, 20}, + {"R_PPC64_DTPREL64", Const, 5}, + {"R_PPC64_ENTRY", Const, 10}, + {"R_PPC64_GLOB_DAT", Const, 20}, + {"R_PPC64_GNU_VTENTRY", Const, 20}, + {"R_PPC64_GNU_VTINHERIT", Const, 20}, + {"R_PPC64_GOT16", Const, 5}, + {"R_PPC64_GOT16_DS", Const, 5}, + {"R_PPC64_GOT16_HA", Const, 5}, + {"R_PPC64_GOT16_HI", Const, 5}, + {"R_PPC64_GOT16_LO", Const, 5}, + {"R_PPC64_GOT16_LO_DS", Const, 5}, + {"R_PPC64_GOT_DTPREL16_DS", Const, 5}, + {"R_PPC64_GOT_DTPREL16_HA", Const, 5}, + {"R_PPC64_GOT_DTPREL16_HI", Const, 5}, + {"R_PPC64_GOT_DTPREL16_LO_DS", Const, 5}, + {"R_PPC64_GOT_DTPREL_PCREL34", Const, 20}, + {"R_PPC64_GOT_PCREL34", Const, 20}, + {"R_PPC64_GOT_TLSGD16", Const, 5}, + {"R_PPC64_GOT_TLSGD16_HA", Const, 5}, + {"R_PPC64_GOT_TLSGD16_HI", Const, 5}, + {"R_PPC64_GOT_TLSGD16_LO", Const, 5}, + {"R_PPC64_GOT_TLSGD_PCREL34", Const, 20}, + {"R_PPC64_GOT_TLSLD16", Const, 5}, + {"R_PPC64_GOT_TLSLD16_HA", Const, 5}, + {"R_PPC64_GOT_TLSLD16_HI", Const, 5}, + {"R_PPC64_GOT_TLSLD16_LO", Const, 5}, + {"R_PPC64_GOT_TLSLD_PCREL34", Const, 20}, + {"R_PPC64_GOT_TPREL16_DS", Const, 5}, + {"R_PPC64_GOT_TPREL16_HA", Const, 5}, + {"R_PPC64_GOT_TPREL16_HI", Const, 5}, + {"R_PPC64_GOT_TPREL16_LO_DS", Const, 5}, + {"R_PPC64_GOT_TPREL_PCREL34", Const, 20}, + {"R_PPC64_IRELATIVE", Const, 10}, + {"R_PPC64_JMP_IREL", Const, 10}, + {"R_PPC64_JMP_SLOT", Const, 5}, + {"R_PPC64_NONE", Const, 5}, + {"R_PPC64_PCREL28", Const, 20}, + {"R_PPC64_PCREL34", Const, 20}, + {"R_PPC64_PCREL_OPT", Const, 20}, + {"R_PPC64_PLT16_HA", Const, 20}, + {"R_PPC64_PLT16_HI", Const, 20}, + {"R_PPC64_PLT16_LO", Const, 20}, + {"R_PPC64_PLT16_LO_DS", Const, 10}, + {"R_PPC64_PLT32", Const, 20}, + {"R_PPC64_PLT64", Const, 20}, + {"R_PPC64_PLTCALL", Const, 20}, + {"R_PPC64_PLTCALL_NOTOC", Const, 20}, + {"R_PPC64_PLTGOT16", Const, 10}, + {"R_PPC64_PLTGOT16_DS", Const, 10}, + {"R_PPC64_PLTGOT16_HA", Const, 10}, + {"R_PPC64_PLTGOT16_HI", Const, 10}, + {"R_PPC64_PLTGOT16_LO", Const, 10}, + {"R_PPC64_PLTGOT_LO_DS", Const, 10}, + {"R_PPC64_PLTREL32", Const, 20}, + {"R_PPC64_PLTREL64", Const, 20}, + {"R_PPC64_PLTSEQ", Const, 20}, + {"R_PPC64_PLTSEQ_NOTOC", Const, 20}, + {"R_PPC64_PLT_PCREL34", Const, 20}, + {"R_PPC64_PLT_PCREL34_NOTOC", Const, 20}, + {"R_PPC64_REL14", Const, 5}, + {"R_PPC64_REL14_BRNTAKEN", Const, 5}, + {"R_PPC64_REL14_BRTAKEN", Const, 5}, + {"R_PPC64_REL16", Const, 5}, + {"R_PPC64_REL16DX_HA", Const, 10}, + {"R_PPC64_REL16_HA", Const, 5}, + {"R_PPC64_REL16_HI", Const, 5}, + {"R_PPC64_REL16_HIGH", Const, 20}, + {"R_PPC64_REL16_HIGHA", Const, 20}, + {"R_PPC64_REL16_HIGHER", Const, 20}, + {"R_PPC64_REL16_HIGHER34", Const, 20}, + {"R_PPC64_REL16_HIGHERA", Const, 20}, + {"R_PPC64_REL16_HIGHERA34", Const, 20}, + {"R_PPC64_REL16_HIGHEST", Const, 20}, + {"R_PPC64_REL16_HIGHEST34", Const, 20}, + {"R_PPC64_REL16_HIGHESTA", Const, 20}, + {"R_PPC64_REL16_HIGHESTA34", Const, 20}, + {"R_PPC64_REL16_LO", Const, 5}, + {"R_PPC64_REL24", Const, 5}, + {"R_PPC64_REL24_NOTOC", Const, 10}, + {"R_PPC64_REL24_P9NOTOC", Const, 21}, + {"R_PPC64_REL30", Const, 20}, + {"R_PPC64_REL32", Const, 5}, + {"R_PPC64_REL64", Const, 5}, + {"R_PPC64_RELATIVE", Const, 18}, + {"R_PPC64_SECTOFF", Const, 20}, + {"R_PPC64_SECTOFF_DS", Const, 10}, + {"R_PPC64_SECTOFF_HA", Const, 20}, + {"R_PPC64_SECTOFF_HI", Const, 20}, + {"R_PPC64_SECTOFF_LO", Const, 20}, + {"R_PPC64_SECTOFF_LO_DS", Const, 10}, + {"R_PPC64_TLS", Const, 5}, + {"R_PPC64_TLSGD", Const, 5}, + {"R_PPC64_TLSLD", Const, 5}, + {"R_PPC64_TOC", Const, 5}, + {"R_PPC64_TOC16", Const, 5}, + {"R_PPC64_TOC16_DS", Const, 5}, + {"R_PPC64_TOC16_HA", Const, 5}, + {"R_PPC64_TOC16_HI", Const, 5}, + {"R_PPC64_TOC16_LO", Const, 5}, + {"R_PPC64_TOC16_LO_DS", Const, 5}, + {"R_PPC64_TOCSAVE", Const, 10}, + {"R_PPC64_TPREL16", Const, 5}, + {"R_PPC64_TPREL16_DS", Const, 5}, + {"R_PPC64_TPREL16_HA", Const, 5}, + {"R_PPC64_TPREL16_HI", Const, 5}, + {"R_PPC64_TPREL16_HIGH", Const, 10}, + {"R_PPC64_TPREL16_HIGHA", Const, 10}, + {"R_PPC64_TPREL16_HIGHER", Const, 5}, + {"R_PPC64_TPREL16_HIGHERA", Const, 5}, + {"R_PPC64_TPREL16_HIGHEST", Const, 5}, + {"R_PPC64_TPREL16_HIGHESTA", Const, 5}, + {"R_PPC64_TPREL16_LO", Const, 5}, + {"R_PPC64_TPREL16_LO_DS", Const, 5}, + {"R_PPC64_TPREL34", Const, 20}, + {"R_PPC64_TPREL64", Const, 5}, + {"R_PPC64_UADDR16", Const, 20}, + {"R_PPC64_UADDR32", Const, 20}, + {"R_PPC64_UADDR64", Const, 20}, + {"R_PPC_ADDR14", Const, 0}, + {"R_PPC_ADDR14_BRNTAKEN", Const, 0}, + {"R_PPC_ADDR14_BRTAKEN", Const, 0}, + {"R_PPC_ADDR16", Const, 0}, + {"R_PPC_ADDR16_HA", Const, 0}, + {"R_PPC_ADDR16_HI", Const, 0}, + {"R_PPC_ADDR16_LO", Const, 0}, + {"R_PPC_ADDR24", Const, 0}, + {"R_PPC_ADDR32", Const, 0}, + {"R_PPC_COPY", Const, 0}, + {"R_PPC_DTPMOD32", Const, 0}, + {"R_PPC_DTPREL16", Const, 0}, + {"R_PPC_DTPREL16_HA", Const, 0}, + {"R_PPC_DTPREL16_HI", Const, 0}, + {"R_PPC_DTPREL16_LO", Const, 0}, + {"R_PPC_DTPREL32", Const, 0}, + {"R_PPC_EMB_BIT_FLD", Const, 0}, + {"R_PPC_EMB_MRKREF", Const, 0}, + {"R_PPC_EMB_NADDR16", Const, 0}, + {"R_PPC_EMB_NADDR16_HA", Const, 0}, + {"R_PPC_EMB_NADDR16_HI", Const, 0}, + {"R_PPC_EMB_NADDR16_LO", Const, 0}, + {"R_PPC_EMB_NADDR32", Const, 0}, + {"R_PPC_EMB_RELSDA", Const, 0}, + {"R_PPC_EMB_RELSEC16", Const, 0}, + {"R_PPC_EMB_RELST_HA", Const, 0}, + {"R_PPC_EMB_RELST_HI", Const, 0}, + {"R_PPC_EMB_RELST_LO", Const, 0}, + {"R_PPC_EMB_SDA21", Const, 0}, + {"R_PPC_EMB_SDA2I16", Const, 0}, + {"R_PPC_EMB_SDA2REL", Const, 0}, + {"R_PPC_EMB_SDAI16", Const, 0}, + {"R_PPC_GLOB_DAT", Const, 0}, + {"R_PPC_GOT16", Const, 0}, + {"R_PPC_GOT16_HA", Const, 0}, + {"R_PPC_GOT16_HI", Const, 0}, + {"R_PPC_GOT16_LO", Const, 0}, + {"R_PPC_GOT_TLSGD16", Const, 0}, + {"R_PPC_GOT_TLSGD16_HA", Const, 0}, + {"R_PPC_GOT_TLSGD16_HI", Const, 0}, + {"R_PPC_GOT_TLSGD16_LO", Const, 0}, + {"R_PPC_GOT_TLSLD16", Const, 0}, + {"R_PPC_GOT_TLSLD16_HA", Const, 0}, + {"R_PPC_GOT_TLSLD16_HI", Const, 0}, + {"R_PPC_GOT_TLSLD16_LO", Const, 0}, + {"R_PPC_GOT_TPREL16", Const, 0}, + {"R_PPC_GOT_TPREL16_HA", Const, 0}, + {"R_PPC_GOT_TPREL16_HI", Const, 0}, + {"R_PPC_GOT_TPREL16_LO", Const, 0}, + {"R_PPC_JMP_SLOT", Const, 0}, + {"R_PPC_LOCAL24PC", Const, 0}, + {"R_PPC_NONE", Const, 0}, + {"R_PPC_PLT16_HA", Const, 0}, + {"R_PPC_PLT16_HI", Const, 0}, + {"R_PPC_PLT16_LO", Const, 0}, + {"R_PPC_PLT32", Const, 0}, + {"R_PPC_PLTREL24", Const, 0}, + {"R_PPC_PLTREL32", Const, 0}, + {"R_PPC_REL14", Const, 0}, + {"R_PPC_REL14_BRNTAKEN", Const, 0}, + {"R_PPC_REL14_BRTAKEN", Const, 0}, + {"R_PPC_REL24", Const, 0}, + {"R_PPC_REL32", Const, 0}, + {"R_PPC_RELATIVE", Const, 0}, + {"R_PPC_SDAREL16", Const, 0}, + {"R_PPC_SECTOFF", Const, 0}, + {"R_PPC_SECTOFF_HA", Const, 0}, + {"R_PPC_SECTOFF_HI", Const, 0}, + {"R_PPC_SECTOFF_LO", Const, 0}, + {"R_PPC_TLS", Const, 0}, + {"R_PPC_TPREL16", Const, 0}, + {"R_PPC_TPREL16_HA", Const, 0}, + {"R_PPC_TPREL16_HI", Const, 0}, + {"R_PPC_TPREL16_LO", Const, 0}, + {"R_PPC_TPREL32", Const, 0}, + {"R_PPC_UADDR16", Const, 0}, + {"R_PPC_UADDR32", Const, 0}, + {"R_RISCV", Type, 11}, + {"R_RISCV_32", Const, 11}, + {"R_RISCV_32_PCREL", Const, 12}, + {"R_RISCV_64", Const, 11}, + {"R_RISCV_ADD16", Const, 11}, + {"R_RISCV_ADD32", Const, 11}, + {"R_RISCV_ADD64", Const, 11}, + {"R_RISCV_ADD8", Const, 11}, + {"R_RISCV_ALIGN", Const, 11}, + {"R_RISCV_BRANCH", Const, 11}, + {"R_RISCV_CALL", Const, 11}, + {"R_RISCV_CALL_PLT", Const, 11}, + {"R_RISCV_COPY", Const, 11}, + {"R_RISCV_GNU_VTENTRY", Const, 11}, + {"R_RISCV_GNU_VTINHERIT", Const, 11}, + {"R_RISCV_GOT_HI20", Const, 11}, + {"R_RISCV_GPREL_I", Const, 11}, + {"R_RISCV_GPREL_S", Const, 11}, + {"R_RISCV_HI20", Const, 11}, + {"R_RISCV_JAL", Const, 11}, + {"R_RISCV_JUMP_SLOT", Const, 11}, + {"R_RISCV_LO12_I", Const, 11}, + {"R_RISCV_LO12_S", Const, 11}, + {"R_RISCV_NONE", Const, 11}, + {"R_RISCV_PCREL_HI20", Const, 11}, + {"R_RISCV_PCREL_LO12_I", Const, 11}, + {"R_RISCV_PCREL_LO12_S", Const, 11}, + {"R_RISCV_RELATIVE", Const, 11}, + {"R_RISCV_RELAX", Const, 11}, + {"R_RISCV_RVC_BRANCH", Const, 11}, + {"R_RISCV_RVC_JUMP", Const, 11}, + {"R_RISCV_RVC_LUI", Const, 11}, + {"R_RISCV_SET16", Const, 11}, + {"R_RISCV_SET32", Const, 11}, + {"R_RISCV_SET6", Const, 11}, + {"R_RISCV_SET8", Const, 11}, + {"R_RISCV_SUB16", Const, 11}, + {"R_RISCV_SUB32", Const, 11}, + {"R_RISCV_SUB6", Const, 11}, + {"R_RISCV_SUB64", Const, 11}, + {"R_RISCV_SUB8", Const, 11}, + {"R_RISCV_TLS_DTPMOD32", Const, 11}, + {"R_RISCV_TLS_DTPMOD64", Const, 11}, + {"R_RISCV_TLS_DTPREL32", Const, 11}, + {"R_RISCV_TLS_DTPREL64", Const, 11}, + {"R_RISCV_TLS_GD_HI20", Const, 11}, + {"R_RISCV_TLS_GOT_HI20", Const, 11}, + {"R_RISCV_TLS_TPREL32", Const, 11}, + {"R_RISCV_TLS_TPREL64", Const, 11}, + {"R_RISCV_TPREL_ADD", Const, 11}, + {"R_RISCV_TPREL_HI20", Const, 11}, + {"R_RISCV_TPREL_I", Const, 11}, + {"R_RISCV_TPREL_LO12_I", Const, 11}, + {"R_RISCV_TPREL_LO12_S", Const, 11}, + {"R_RISCV_TPREL_S", Const, 11}, + {"R_SPARC", Type, 0}, + {"R_SPARC_10", Const, 0}, + {"R_SPARC_11", Const, 0}, + {"R_SPARC_13", Const, 0}, + {"R_SPARC_16", Const, 0}, + {"R_SPARC_22", Const, 0}, + {"R_SPARC_32", Const, 0}, + {"R_SPARC_5", Const, 0}, + {"R_SPARC_6", Const, 0}, + {"R_SPARC_64", Const, 0}, + {"R_SPARC_7", Const, 0}, + {"R_SPARC_8", Const, 0}, + {"R_SPARC_COPY", Const, 0}, + {"R_SPARC_DISP16", Const, 0}, + {"R_SPARC_DISP32", Const, 0}, + {"R_SPARC_DISP64", Const, 0}, + {"R_SPARC_DISP8", Const, 0}, + {"R_SPARC_GLOB_DAT", Const, 0}, + {"R_SPARC_GLOB_JMP", Const, 0}, + {"R_SPARC_GOT10", Const, 0}, + {"R_SPARC_GOT13", Const, 0}, + {"R_SPARC_GOT22", Const, 0}, + {"R_SPARC_H44", Const, 0}, + {"R_SPARC_HH22", Const, 0}, + {"R_SPARC_HI22", Const, 0}, + {"R_SPARC_HIPLT22", Const, 0}, + {"R_SPARC_HIX22", Const, 0}, + {"R_SPARC_HM10", Const, 0}, + {"R_SPARC_JMP_SLOT", Const, 0}, + {"R_SPARC_L44", Const, 0}, + {"R_SPARC_LM22", Const, 0}, + {"R_SPARC_LO10", Const, 0}, + {"R_SPARC_LOPLT10", Const, 0}, + {"R_SPARC_LOX10", Const, 0}, + {"R_SPARC_M44", Const, 0}, + {"R_SPARC_NONE", Const, 0}, + {"R_SPARC_OLO10", Const, 0}, + {"R_SPARC_PC10", Const, 0}, + {"R_SPARC_PC22", Const, 0}, + {"R_SPARC_PCPLT10", Const, 0}, + {"R_SPARC_PCPLT22", Const, 0}, + {"R_SPARC_PCPLT32", Const, 0}, + {"R_SPARC_PC_HH22", Const, 0}, + {"R_SPARC_PC_HM10", Const, 0}, + {"R_SPARC_PC_LM22", Const, 0}, + {"R_SPARC_PLT32", Const, 0}, + {"R_SPARC_PLT64", Const, 0}, + {"R_SPARC_REGISTER", Const, 0}, + {"R_SPARC_RELATIVE", Const, 0}, + {"R_SPARC_UA16", Const, 0}, + {"R_SPARC_UA32", Const, 0}, + {"R_SPARC_UA64", Const, 0}, + {"R_SPARC_WDISP16", Const, 0}, + {"R_SPARC_WDISP19", Const, 0}, + {"R_SPARC_WDISP22", Const, 0}, + {"R_SPARC_WDISP30", Const, 0}, + {"R_SPARC_WPLT30", Const, 0}, + {"R_SYM32", Func, 0}, + {"R_SYM64", Func, 0}, + {"R_TYPE32", Func, 0}, + {"R_TYPE64", Func, 0}, + {"R_X86_64", Type, 0}, + {"R_X86_64_16", Const, 0}, + {"R_X86_64_32", Const, 0}, + {"R_X86_64_32S", Const, 0}, + {"R_X86_64_64", Const, 0}, + {"R_X86_64_8", Const, 0}, + {"R_X86_64_COPY", Const, 0}, + {"R_X86_64_DTPMOD64", Const, 0}, + {"R_X86_64_DTPOFF32", Const, 0}, + {"R_X86_64_DTPOFF64", Const, 0}, + {"R_X86_64_GLOB_DAT", Const, 0}, + {"R_X86_64_GOT32", Const, 0}, + {"R_X86_64_GOT64", Const, 10}, + {"R_X86_64_GOTOFF64", Const, 10}, + {"R_X86_64_GOTPC32", Const, 10}, + {"R_X86_64_GOTPC32_TLSDESC", Const, 10}, + {"R_X86_64_GOTPC64", Const, 10}, + {"R_X86_64_GOTPCREL", Const, 0}, + {"R_X86_64_GOTPCREL64", Const, 10}, + {"R_X86_64_GOTPCRELX", Const, 10}, + {"R_X86_64_GOTPLT64", Const, 10}, + {"R_X86_64_GOTTPOFF", Const, 0}, + {"R_X86_64_IRELATIVE", Const, 10}, + {"R_X86_64_JMP_SLOT", Const, 0}, + {"R_X86_64_NONE", Const, 0}, + {"R_X86_64_PC16", Const, 0}, + {"R_X86_64_PC32", Const, 0}, + {"R_X86_64_PC32_BND", Const, 10}, + {"R_X86_64_PC64", Const, 10}, + {"R_X86_64_PC8", Const, 0}, + {"R_X86_64_PLT32", Const, 0}, + {"R_X86_64_PLT32_BND", Const, 10}, + {"R_X86_64_PLTOFF64", Const, 10}, + {"R_X86_64_RELATIVE", Const, 0}, + {"R_X86_64_RELATIVE64", Const, 10}, + {"R_X86_64_REX_GOTPCRELX", Const, 10}, + {"R_X86_64_SIZE32", Const, 10}, + {"R_X86_64_SIZE64", Const, 10}, + {"R_X86_64_TLSDESC", Const, 10}, + {"R_X86_64_TLSDESC_CALL", Const, 10}, + {"R_X86_64_TLSGD", Const, 0}, + {"R_X86_64_TLSLD", Const, 0}, + {"R_X86_64_TPOFF32", Const, 0}, + {"R_X86_64_TPOFF64", Const, 0}, + {"Rel32", Type, 0}, + {"Rel32.Info", Field, 0}, + {"Rel32.Off", Field, 0}, + {"Rel64", Type, 0}, + {"Rel64.Info", Field, 0}, + {"Rel64.Off", Field, 0}, + {"Rela32", Type, 0}, + {"Rela32.Addend", Field, 0}, + {"Rela32.Info", Field, 0}, + {"Rela32.Off", Field, 0}, + {"Rela64", Type, 0}, + {"Rela64.Addend", Field, 0}, + {"Rela64.Info", Field, 0}, + {"Rela64.Off", Field, 0}, + {"SHF_ALLOC", Const, 0}, + {"SHF_COMPRESSED", Const, 6}, + {"SHF_EXECINSTR", Const, 0}, + {"SHF_GROUP", Const, 0}, + {"SHF_INFO_LINK", Const, 0}, + {"SHF_LINK_ORDER", Const, 0}, + {"SHF_MASKOS", Const, 0}, + {"SHF_MASKPROC", Const, 0}, + {"SHF_MERGE", Const, 0}, + {"SHF_OS_NONCONFORMING", Const, 0}, + {"SHF_STRINGS", Const, 0}, + {"SHF_TLS", Const, 0}, + {"SHF_WRITE", Const, 0}, + {"SHN_ABS", Const, 0}, + {"SHN_COMMON", Const, 0}, + {"SHN_HIOS", Const, 0}, + {"SHN_HIPROC", Const, 0}, + {"SHN_HIRESERVE", Const, 0}, + {"SHN_LOOS", Const, 0}, + {"SHN_LOPROC", Const, 0}, + {"SHN_LORESERVE", Const, 0}, + {"SHN_UNDEF", Const, 0}, + {"SHN_XINDEX", Const, 0}, + {"SHT_DYNAMIC", Const, 0}, + {"SHT_DYNSYM", Const, 0}, + {"SHT_FINI_ARRAY", Const, 0}, + {"SHT_GNU_ATTRIBUTES", Const, 0}, + {"SHT_GNU_HASH", Const, 0}, + {"SHT_GNU_LIBLIST", Const, 0}, + {"SHT_GNU_VERDEF", Const, 0}, + {"SHT_GNU_VERNEED", Const, 0}, + {"SHT_GNU_VERSYM", Const, 0}, + {"SHT_GROUP", Const, 0}, + {"SHT_HASH", Const, 0}, + {"SHT_HIOS", Const, 0}, + {"SHT_HIPROC", Const, 0}, + {"SHT_HIUSER", Const, 0}, + {"SHT_INIT_ARRAY", Const, 0}, + {"SHT_LOOS", Const, 0}, + {"SHT_LOPROC", Const, 0}, + {"SHT_LOUSER", Const, 0}, + {"SHT_MIPS_ABIFLAGS", Const, 17}, + {"SHT_NOBITS", Const, 0}, + {"SHT_NOTE", Const, 0}, + {"SHT_NULL", Const, 0}, + {"SHT_PREINIT_ARRAY", Const, 0}, + {"SHT_PROGBITS", Const, 0}, + {"SHT_REL", Const, 0}, + {"SHT_RELA", Const, 0}, + {"SHT_SHLIB", Const, 0}, + {"SHT_STRTAB", Const, 0}, + {"SHT_SYMTAB", Const, 0}, + {"SHT_SYMTAB_SHNDX", Const, 0}, + {"STB_GLOBAL", Const, 0}, + {"STB_HIOS", Const, 0}, + {"STB_HIPROC", Const, 0}, + {"STB_LOCAL", Const, 0}, + {"STB_LOOS", Const, 0}, + {"STB_LOPROC", Const, 0}, + {"STB_WEAK", Const, 0}, + {"STT_COMMON", Const, 0}, + {"STT_FILE", Const, 0}, + {"STT_FUNC", Const, 0}, + {"STT_HIOS", Const, 0}, + {"STT_HIPROC", Const, 0}, + {"STT_LOOS", Const, 0}, + {"STT_LOPROC", Const, 0}, + {"STT_NOTYPE", Const, 0}, + {"STT_OBJECT", Const, 0}, + {"STT_SECTION", Const, 0}, + {"STT_TLS", Const, 0}, + {"STV_DEFAULT", Const, 0}, + {"STV_HIDDEN", Const, 0}, + {"STV_INTERNAL", Const, 0}, + {"STV_PROTECTED", Const, 0}, + {"ST_BIND", Func, 0}, + {"ST_INFO", Func, 0}, + {"ST_TYPE", Func, 0}, + {"ST_VISIBILITY", Func, 0}, + {"Section", Type, 0}, + {"Section.ReaderAt", Field, 0}, + {"Section.SectionHeader", Field, 0}, + {"Section32", Type, 0}, + {"Section32.Addr", Field, 0}, + {"Section32.Addralign", Field, 0}, + {"Section32.Entsize", Field, 0}, + {"Section32.Flags", Field, 0}, + {"Section32.Info", Field, 0}, + {"Section32.Link", Field, 0}, + {"Section32.Name", Field, 0}, + {"Section32.Off", Field, 0}, + {"Section32.Size", Field, 0}, + {"Section32.Type", Field, 0}, + {"Section64", Type, 0}, + {"Section64.Addr", Field, 0}, + {"Section64.Addralign", Field, 0}, + {"Section64.Entsize", Field, 0}, + {"Section64.Flags", Field, 0}, + {"Section64.Info", Field, 0}, + {"Section64.Link", Field, 0}, + {"Section64.Name", Field, 0}, + {"Section64.Off", Field, 0}, + {"Section64.Size", Field, 0}, + {"Section64.Type", Field, 0}, + {"SectionFlag", Type, 0}, + {"SectionHeader", Type, 0}, + {"SectionHeader.Addr", Field, 0}, + {"SectionHeader.Addralign", Field, 0}, + {"SectionHeader.Entsize", Field, 0}, + {"SectionHeader.FileSize", Field, 6}, + {"SectionHeader.Flags", Field, 0}, + {"SectionHeader.Info", Field, 0}, + {"SectionHeader.Link", Field, 0}, + {"SectionHeader.Name", Field, 0}, + {"SectionHeader.Offset", Field, 0}, + {"SectionHeader.Size", Field, 0}, + {"SectionHeader.Type", Field, 0}, + {"SectionIndex", Type, 0}, + {"SectionType", Type, 0}, + {"Sym32", Type, 0}, + {"Sym32.Info", Field, 0}, + {"Sym32.Name", Field, 0}, + {"Sym32.Other", Field, 0}, + {"Sym32.Shndx", Field, 0}, + {"Sym32.Size", Field, 0}, + {"Sym32.Value", Field, 0}, + {"Sym32Size", Const, 0}, + {"Sym64", Type, 0}, + {"Sym64.Info", Field, 0}, + {"Sym64.Name", Field, 0}, + {"Sym64.Other", Field, 0}, + {"Sym64.Shndx", Field, 0}, + {"Sym64.Size", Field, 0}, + {"Sym64.Value", Field, 0}, + {"Sym64Size", Const, 0}, + {"SymBind", Type, 0}, + {"SymType", Type, 0}, + {"SymVis", Type, 0}, + {"Symbol", Type, 0}, + {"Symbol.Info", Field, 0}, + {"Symbol.Library", Field, 13}, + {"Symbol.Name", Field, 0}, + {"Symbol.Other", Field, 0}, + {"Symbol.Section", Field, 0}, + {"Symbol.Size", Field, 0}, + {"Symbol.Value", Field, 0}, + {"Symbol.Version", Field, 13}, + {"Type", Type, 0}, + {"Version", Type, 0}, + }, + "debug/gosym": { + {"(*DecodingError).Error", Method, 0}, + {"(*LineTable).LineToPC", Method, 0}, + {"(*LineTable).PCToLine", Method, 0}, + {"(*Sym).BaseName", Method, 0}, + {"(*Sym).PackageName", Method, 0}, + {"(*Sym).ReceiverName", Method, 0}, + {"(*Sym).Static", Method, 0}, + {"(*Table).LineToPC", Method, 0}, + {"(*Table).LookupFunc", Method, 0}, + {"(*Table).LookupSym", Method, 0}, + {"(*Table).PCToFunc", Method, 0}, + {"(*Table).PCToLine", Method, 0}, + {"(*Table).SymByAddr", Method, 0}, + {"(*UnknownLineError).Error", Method, 0}, + {"(Func).BaseName", Method, 0}, + {"(Func).PackageName", Method, 0}, + {"(Func).ReceiverName", Method, 0}, + {"(Func).Static", Method, 0}, + {"(UnknownFileError).Error", Method, 0}, + {"DecodingError", Type, 0}, + {"Func", Type, 0}, + {"Func.End", Field, 0}, + {"Func.Entry", Field, 0}, + {"Func.FrameSize", Field, 0}, + {"Func.LineTable", Field, 0}, + {"Func.Locals", Field, 0}, + {"Func.Obj", Field, 0}, + {"Func.Params", Field, 0}, + {"Func.Sym", Field, 0}, + {"LineTable", Type, 0}, + {"LineTable.Data", Field, 0}, + {"LineTable.Line", Field, 0}, + {"LineTable.PC", Field, 0}, + {"NewLineTable", Func, 0}, + {"NewTable", Func, 0}, + {"Obj", Type, 0}, + {"Obj.Funcs", Field, 0}, + {"Obj.Paths", Field, 0}, + {"Sym", Type, 0}, + {"Sym.Func", Field, 0}, + {"Sym.GoType", Field, 0}, + {"Sym.Name", Field, 0}, + {"Sym.Type", Field, 0}, + {"Sym.Value", Field, 0}, + {"Table", Type, 0}, + {"Table.Files", Field, 0}, + {"Table.Funcs", Field, 0}, + {"Table.Objs", Field, 0}, + {"Table.Syms", Field, 0}, + {"UnknownFileError", Type, 0}, + {"UnknownLineError", Type, 0}, + {"UnknownLineError.File", Field, 0}, + {"UnknownLineError.Line", Field, 0}, + }, + "debug/macho": { + {"(*FatFile).Close", Method, 3}, + {"(*File).Close", Method, 0}, + {"(*File).DWARF", Method, 0}, + {"(*File).ImportedLibraries", Method, 0}, + {"(*File).ImportedSymbols", Method, 0}, + {"(*File).Section", Method, 0}, + {"(*File).Segment", Method, 0}, + {"(*FormatError).Error", Method, 0}, + {"(*Section).Data", Method, 0}, + {"(*Section).Open", Method, 0}, + {"(*Segment).Data", Method, 0}, + {"(*Segment).Open", Method, 0}, + {"(Cpu).GoString", Method, 0}, + {"(Cpu).String", Method, 0}, + {"(Dylib).Raw", Method, 0}, + {"(Dysymtab).Raw", Method, 0}, + {"(FatArch).Close", Method, 3}, + {"(FatArch).DWARF", Method, 3}, + {"(FatArch).ImportedLibraries", Method, 3}, + {"(FatArch).ImportedSymbols", Method, 3}, + {"(FatArch).Section", Method, 3}, + {"(FatArch).Segment", Method, 3}, + {"(LoadBytes).Raw", Method, 0}, + {"(LoadCmd).GoString", Method, 0}, + {"(LoadCmd).String", Method, 0}, + {"(RelocTypeARM).GoString", Method, 10}, + {"(RelocTypeARM).String", Method, 10}, + {"(RelocTypeARM64).GoString", Method, 10}, + {"(RelocTypeARM64).String", Method, 10}, + {"(RelocTypeGeneric).GoString", Method, 10}, + {"(RelocTypeGeneric).String", Method, 10}, + {"(RelocTypeX86_64).GoString", Method, 10}, + {"(RelocTypeX86_64).String", Method, 10}, + {"(Rpath).Raw", Method, 10}, + {"(Section).ReadAt", Method, 0}, + {"(Segment).Raw", Method, 0}, + {"(Segment).ReadAt", Method, 0}, + {"(Symtab).Raw", Method, 0}, + {"(Type).GoString", Method, 10}, + {"(Type).String", Method, 10}, + {"ARM64_RELOC_ADDEND", Const, 10}, + {"ARM64_RELOC_BRANCH26", Const, 10}, + {"ARM64_RELOC_GOT_LOAD_PAGE21", Const, 10}, + {"ARM64_RELOC_GOT_LOAD_PAGEOFF12", Const, 10}, + {"ARM64_RELOC_PAGE21", Const, 10}, + {"ARM64_RELOC_PAGEOFF12", Const, 10}, + {"ARM64_RELOC_POINTER_TO_GOT", Const, 10}, + {"ARM64_RELOC_SUBTRACTOR", Const, 10}, + {"ARM64_RELOC_TLVP_LOAD_PAGE21", Const, 10}, + {"ARM64_RELOC_TLVP_LOAD_PAGEOFF12", Const, 10}, + {"ARM64_RELOC_UNSIGNED", Const, 10}, + {"ARM_RELOC_BR24", Const, 10}, + {"ARM_RELOC_HALF", Const, 10}, + {"ARM_RELOC_HALF_SECTDIFF", Const, 10}, + {"ARM_RELOC_LOCAL_SECTDIFF", Const, 10}, + {"ARM_RELOC_PAIR", Const, 10}, + {"ARM_RELOC_PB_LA_PTR", Const, 10}, + {"ARM_RELOC_SECTDIFF", Const, 10}, + {"ARM_RELOC_VANILLA", Const, 10}, + {"ARM_THUMB_32BIT_BRANCH", Const, 10}, + {"ARM_THUMB_RELOC_BR22", Const, 10}, + {"Cpu", Type, 0}, + {"Cpu386", Const, 0}, + {"CpuAmd64", Const, 0}, + {"CpuArm", Const, 3}, + {"CpuArm64", Const, 11}, + {"CpuPpc", Const, 3}, + {"CpuPpc64", Const, 3}, + {"Dylib", Type, 0}, + {"Dylib.CompatVersion", Field, 0}, + {"Dylib.CurrentVersion", Field, 0}, + {"Dylib.LoadBytes", Field, 0}, + {"Dylib.Name", Field, 0}, + {"Dylib.Time", Field, 0}, + {"DylibCmd", Type, 0}, + {"DylibCmd.Cmd", Field, 0}, + {"DylibCmd.CompatVersion", Field, 0}, + {"DylibCmd.CurrentVersion", Field, 0}, + {"DylibCmd.Len", Field, 0}, + {"DylibCmd.Name", Field, 0}, + {"DylibCmd.Time", Field, 0}, + {"Dysymtab", Type, 0}, + {"Dysymtab.DysymtabCmd", Field, 0}, + {"Dysymtab.IndirectSyms", Field, 0}, + {"Dysymtab.LoadBytes", Field, 0}, + {"DysymtabCmd", Type, 0}, + {"DysymtabCmd.Cmd", Field, 0}, + {"DysymtabCmd.Extrefsymoff", Field, 0}, + {"DysymtabCmd.Extreloff", Field, 0}, + {"DysymtabCmd.Iextdefsym", Field, 0}, + {"DysymtabCmd.Ilocalsym", Field, 0}, + {"DysymtabCmd.Indirectsymoff", Field, 0}, + {"DysymtabCmd.Iundefsym", Field, 0}, + {"DysymtabCmd.Len", Field, 0}, + {"DysymtabCmd.Locreloff", Field, 0}, + {"DysymtabCmd.Modtaboff", Field, 0}, + {"DysymtabCmd.Nextdefsym", Field, 0}, + {"DysymtabCmd.Nextrefsyms", Field, 0}, + {"DysymtabCmd.Nextrel", Field, 0}, + {"DysymtabCmd.Nindirectsyms", Field, 0}, + {"DysymtabCmd.Nlocalsym", Field, 0}, + {"DysymtabCmd.Nlocrel", Field, 0}, + {"DysymtabCmd.Nmodtab", Field, 0}, + {"DysymtabCmd.Ntoc", Field, 0}, + {"DysymtabCmd.Nundefsym", Field, 0}, + {"DysymtabCmd.Tocoffset", Field, 0}, + {"ErrNotFat", Var, 3}, + {"FatArch", Type, 3}, + {"FatArch.FatArchHeader", Field, 3}, + {"FatArch.File", Field, 3}, + {"FatArchHeader", Type, 3}, + {"FatArchHeader.Align", Field, 3}, + {"FatArchHeader.Cpu", Field, 3}, + {"FatArchHeader.Offset", Field, 3}, + {"FatArchHeader.Size", Field, 3}, + {"FatArchHeader.SubCpu", Field, 3}, + {"FatFile", Type, 3}, + {"FatFile.Arches", Field, 3}, + {"FatFile.Magic", Field, 3}, + {"File", Type, 0}, + {"File.ByteOrder", Field, 0}, + {"File.Dysymtab", Field, 0}, + {"File.FileHeader", Field, 0}, + {"File.Loads", Field, 0}, + {"File.Sections", Field, 0}, + {"File.Symtab", Field, 0}, + {"FileHeader", Type, 0}, + {"FileHeader.Cmdsz", Field, 0}, + {"FileHeader.Cpu", Field, 0}, + {"FileHeader.Flags", Field, 0}, + {"FileHeader.Magic", Field, 0}, + {"FileHeader.Ncmd", Field, 0}, + {"FileHeader.SubCpu", Field, 0}, + {"FileHeader.Type", Field, 0}, + {"FlagAllModsBound", Const, 10}, + {"FlagAllowStackExecution", Const, 10}, + {"FlagAppExtensionSafe", Const, 10}, + {"FlagBindAtLoad", Const, 10}, + {"FlagBindsToWeak", Const, 10}, + {"FlagCanonical", Const, 10}, + {"FlagDeadStrippableDylib", Const, 10}, + {"FlagDyldLink", Const, 10}, + {"FlagForceFlat", Const, 10}, + {"FlagHasTLVDescriptors", Const, 10}, + {"FlagIncrLink", Const, 10}, + {"FlagLazyInit", Const, 10}, + {"FlagNoFixPrebinding", Const, 10}, + {"FlagNoHeapExecution", Const, 10}, + {"FlagNoMultiDefs", Const, 10}, + {"FlagNoReexportedDylibs", Const, 10}, + {"FlagNoUndefs", Const, 10}, + {"FlagPIE", Const, 10}, + {"FlagPrebindable", Const, 10}, + {"FlagPrebound", Const, 10}, + {"FlagRootSafe", Const, 10}, + {"FlagSetuidSafe", Const, 10}, + {"FlagSplitSegs", Const, 10}, + {"FlagSubsectionsViaSymbols", Const, 10}, + {"FlagTwoLevel", Const, 10}, + {"FlagWeakDefines", Const, 10}, + {"FormatError", Type, 0}, + {"GENERIC_RELOC_LOCAL_SECTDIFF", Const, 10}, + {"GENERIC_RELOC_PAIR", Const, 10}, + {"GENERIC_RELOC_PB_LA_PTR", Const, 10}, + {"GENERIC_RELOC_SECTDIFF", Const, 10}, + {"GENERIC_RELOC_TLV", Const, 10}, + {"GENERIC_RELOC_VANILLA", Const, 10}, + {"Load", Type, 0}, + {"LoadBytes", Type, 0}, + {"LoadCmd", Type, 0}, + {"LoadCmdDylib", Const, 0}, + {"LoadCmdDylinker", Const, 0}, + {"LoadCmdDysymtab", Const, 0}, + {"LoadCmdRpath", Const, 10}, + {"LoadCmdSegment", Const, 0}, + {"LoadCmdSegment64", Const, 0}, + {"LoadCmdSymtab", Const, 0}, + {"LoadCmdThread", Const, 0}, + {"LoadCmdUnixThread", Const, 0}, + {"Magic32", Const, 0}, + {"Magic64", Const, 0}, + {"MagicFat", Const, 3}, + {"NewFatFile", Func, 3}, + {"NewFile", Func, 0}, + {"Nlist32", Type, 0}, + {"Nlist32.Desc", Field, 0}, + {"Nlist32.Name", Field, 0}, + {"Nlist32.Sect", Field, 0}, + {"Nlist32.Type", Field, 0}, + {"Nlist32.Value", Field, 0}, + {"Nlist64", Type, 0}, + {"Nlist64.Desc", Field, 0}, + {"Nlist64.Name", Field, 0}, + {"Nlist64.Sect", Field, 0}, + {"Nlist64.Type", Field, 0}, + {"Nlist64.Value", Field, 0}, + {"Open", Func, 0}, + {"OpenFat", Func, 3}, + {"Regs386", Type, 0}, + {"Regs386.AX", Field, 0}, + {"Regs386.BP", Field, 0}, + {"Regs386.BX", Field, 0}, + {"Regs386.CS", Field, 0}, + {"Regs386.CX", Field, 0}, + {"Regs386.DI", Field, 0}, + {"Regs386.DS", Field, 0}, + {"Regs386.DX", Field, 0}, + {"Regs386.ES", Field, 0}, + {"Regs386.FLAGS", Field, 0}, + {"Regs386.FS", Field, 0}, + {"Regs386.GS", Field, 0}, + {"Regs386.IP", Field, 0}, + {"Regs386.SI", Field, 0}, + {"Regs386.SP", Field, 0}, + {"Regs386.SS", Field, 0}, + {"RegsAMD64", Type, 0}, + {"RegsAMD64.AX", Field, 0}, + {"RegsAMD64.BP", Field, 0}, + {"RegsAMD64.BX", Field, 0}, + {"RegsAMD64.CS", Field, 0}, + {"RegsAMD64.CX", Field, 0}, + {"RegsAMD64.DI", Field, 0}, + {"RegsAMD64.DX", Field, 0}, + {"RegsAMD64.FLAGS", Field, 0}, + {"RegsAMD64.FS", Field, 0}, + {"RegsAMD64.GS", Field, 0}, + {"RegsAMD64.IP", Field, 0}, + {"RegsAMD64.R10", Field, 0}, + {"RegsAMD64.R11", Field, 0}, + {"RegsAMD64.R12", Field, 0}, + {"RegsAMD64.R13", Field, 0}, + {"RegsAMD64.R14", Field, 0}, + {"RegsAMD64.R15", Field, 0}, + {"RegsAMD64.R8", Field, 0}, + {"RegsAMD64.R9", Field, 0}, + {"RegsAMD64.SI", Field, 0}, + {"RegsAMD64.SP", Field, 0}, + {"Reloc", Type, 10}, + {"Reloc.Addr", Field, 10}, + {"Reloc.Extern", Field, 10}, + {"Reloc.Len", Field, 10}, + {"Reloc.Pcrel", Field, 10}, + {"Reloc.Scattered", Field, 10}, + {"Reloc.Type", Field, 10}, + {"Reloc.Value", Field, 10}, + {"RelocTypeARM", Type, 10}, + {"RelocTypeARM64", Type, 10}, + {"RelocTypeGeneric", Type, 10}, + {"RelocTypeX86_64", Type, 10}, + {"Rpath", Type, 10}, + {"Rpath.LoadBytes", Field, 10}, + {"Rpath.Path", Field, 10}, + {"RpathCmd", Type, 10}, + {"RpathCmd.Cmd", Field, 10}, + {"RpathCmd.Len", Field, 10}, + {"RpathCmd.Path", Field, 10}, + {"Section", Type, 0}, + {"Section.ReaderAt", Field, 0}, + {"Section.Relocs", Field, 10}, + {"Section.SectionHeader", Field, 0}, + {"Section32", Type, 0}, + {"Section32.Addr", Field, 0}, + {"Section32.Align", Field, 0}, + {"Section32.Flags", Field, 0}, + {"Section32.Name", Field, 0}, + {"Section32.Nreloc", Field, 0}, + {"Section32.Offset", Field, 0}, + {"Section32.Reloff", Field, 0}, + {"Section32.Reserve1", Field, 0}, + {"Section32.Reserve2", Field, 0}, + {"Section32.Seg", Field, 0}, + {"Section32.Size", Field, 0}, + {"Section64", Type, 0}, + {"Section64.Addr", Field, 0}, + {"Section64.Align", Field, 0}, + {"Section64.Flags", Field, 0}, + {"Section64.Name", Field, 0}, + {"Section64.Nreloc", Field, 0}, + {"Section64.Offset", Field, 0}, + {"Section64.Reloff", Field, 0}, + {"Section64.Reserve1", Field, 0}, + {"Section64.Reserve2", Field, 0}, + {"Section64.Reserve3", Field, 0}, + {"Section64.Seg", Field, 0}, + {"Section64.Size", Field, 0}, + {"SectionHeader", Type, 0}, + {"SectionHeader.Addr", Field, 0}, + {"SectionHeader.Align", Field, 0}, + {"SectionHeader.Flags", Field, 0}, + {"SectionHeader.Name", Field, 0}, + {"SectionHeader.Nreloc", Field, 0}, + {"SectionHeader.Offset", Field, 0}, + {"SectionHeader.Reloff", Field, 0}, + {"SectionHeader.Seg", Field, 0}, + {"SectionHeader.Size", Field, 0}, + {"Segment", Type, 0}, + {"Segment.LoadBytes", Field, 0}, + {"Segment.ReaderAt", Field, 0}, + {"Segment.SegmentHeader", Field, 0}, + {"Segment32", Type, 0}, + {"Segment32.Addr", Field, 0}, + {"Segment32.Cmd", Field, 0}, + {"Segment32.Filesz", Field, 0}, + {"Segment32.Flag", Field, 0}, + {"Segment32.Len", Field, 0}, + {"Segment32.Maxprot", Field, 0}, + {"Segment32.Memsz", Field, 0}, + {"Segment32.Name", Field, 0}, + {"Segment32.Nsect", Field, 0}, + {"Segment32.Offset", Field, 0}, + {"Segment32.Prot", Field, 0}, + {"Segment64", Type, 0}, + {"Segment64.Addr", Field, 0}, + {"Segment64.Cmd", Field, 0}, + {"Segment64.Filesz", Field, 0}, + {"Segment64.Flag", Field, 0}, + {"Segment64.Len", Field, 0}, + {"Segment64.Maxprot", Field, 0}, + {"Segment64.Memsz", Field, 0}, + {"Segment64.Name", Field, 0}, + {"Segment64.Nsect", Field, 0}, + {"Segment64.Offset", Field, 0}, + {"Segment64.Prot", Field, 0}, + {"SegmentHeader", Type, 0}, + {"SegmentHeader.Addr", Field, 0}, + {"SegmentHeader.Cmd", Field, 0}, + {"SegmentHeader.Filesz", Field, 0}, + {"SegmentHeader.Flag", Field, 0}, + {"SegmentHeader.Len", Field, 0}, + {"SegmentHeader.Maxprot", Field, 0}, + {"SegmentHeader.Memsz", Field, 0}, + {"SegmentHeader.Name", Field, 0}, + {"SegmentHeader.Nsect", Field, 0}, + {"SegmentHeader.Offset", Field, 0}, + {"SegmentHeader.Prot", Field, 0}, + {"Symbol", Type, 0}, + {"Symbol.Desc", Field, 0}, + {"Symbol.Name", Field, 0}, + {"Symbol.Sect", Field, 0}, + {"Symbol.Type", Field, 0}, + {"Symbol.Value", Field, 0}, + {"Symtab", Type, 0}, + {"Symtab.LoadBytes", Field, 0}, + {"Symtab.Syms", Field, 0}, + {"Symtab.SymtabCmd", Field, 0}, + {"SymtabCmd", Type, 0}, + {"SymtabCmd.Cmd", Field, 0}, + {"SymtabCmd.Len", Field, 0}, + {"SymtabCmd.Nsyms", Field, 0}, + {"SymtabCmd.Stroff", Field, 0}, + {"SymtabCmd.Strsize", Field, 0}, + {"SymtabCmd.Symoff", Field, 0}, + {"Thread", Type, 0}, + {"Thread.Cmd", Field, 0}, + {"Thread.Data", Field, 0}, + {"Thread.Len", Field, 0}, + {"Thread.Type", Field, 0}, + {"Type", Type, 0}, + {"TypeBundle", Const, 3}, + {"TypeDylib", Const, 3}, + {"TypeExec", Const, 0}, + {"TypeObj", Const, 0}, + {"X86_64_RELOC_BRANCH", Const, 10}, + {"X86_64_RELOC_GOT", Const, 10}, + {"X86_64_RELOC_GOT_LOAD", Const, 10}, + {"X86_64_RELOC_SIGNED", Const, 10}, + {"X86_64_RELOC_SIGNED_1", Const, 10}, + {"X86_64_RELOC_SIGNED_2", Const, 10}, + {"X86_64_RELOC_SIGNED_4", Const, 10}, + {"X86_64_RELOC_SUBTRACTOR", Const, 10}, + {"X86_64_RELOC_TLV", Const, 10}, + {"X86_64_RELOC_UNSIGNED", Const, 10}, + }, + "debug/pe": { + {"(*COFFSymbol).FullName", Method, 8}, + {"(*File).COFFSymbolReadSectionDefAux", Method, 19}, + {"(*File).Close", Method, 0}, + {"(*File).DWARF", Method, 0}, + {"(*File).ImportedLibraries", Method, 0}, + {"(*File).ImportedSymbols", Method, 0}, + {"(*File).Section", Method, 0}, + {"(*FormatError).Error", Method, 0}, + {"(*Section).Data", Method, 0}, + {"(*Section).Open", Method, 0}, + {"(Section).ReadAt", Method, 0}, + {"(StringTable).String", Method, 8}, + {"COFFSymbol", Type, 1}, + {"COFFSymbol.Name", Field, 1}, + {"COFFSymbol.NumberOfAuxSymbols", Field, 1}, + {"COFFSymbol.SectionNumber", Field, 1}, + {"COFFSymbol.StorageClass", Field, 1}, + {"COFFSymbol.Type", Field, 1}, + {"COFFSymbol.Value", Field, 1}, + {"COFFSymbolAuxFormat5", Type, 19}, + {"COFFSymbolAuxFormat5.Checksum", Field, 19}, + {"COFFSymbolAuxFormat5.NumLineNumbers", Field, 19}, + {"COFFSymbolAuxFormat5.NumRelocs", Field, 19}, + {"COFFSymbolAuxFormat5.SecNum", Field, 19}, + {"COFFSymbolAuxFormat5.Selection", Field, 19}, + {"COFFSymbolAuxFormat5.Size", Field, 19}, + {"COFFSymbolSize", Const, 1}, + {"DataDirectory", Type, 3}, + {"DataDirectory.Size", Field, 3}, + {"DataDirectory.VirtualAddress", Field, 3}, + {"File", Type, 0}, + {"File.COFFSymbols", Field, 8}, + {"File.FileHeader", Field, 0}, + {"File.OptionalHeader", Field, 3}, + {"File.Sections", Field, 0}, + {"File.StringTable", Field, 8}, + {"File.Symbols", Field, 1}, + {"FileHeader", Type, 0}, + {"FileHeader.Characteristics", Field, 0}, + {"FileHeader.Machine", Field, 0}, + {"FileHeader.NumberOfSections", Field, 0}, + {"FileHeader.NumberOfSymbols", Field, 0}, + {"FileHeader.PointerToSymbolTable", Field, 0}, + {"FileHeader.SizeOfOptionalHeader", Field, 0}, + {"FileHeader.TimeDateStamp", Field, 0}, + {"FormatError", Type, 0}, + {"IMAGE_COMDAT_SELECT_ANY", Const, 19}, + {"IMAGE_COMDAT_SELECT_ASSOCIATIVE", Const, 19}, + {"IMAGE_COMDAT_SELECT_EXACT_MATCH", Const, 19}, + {"IMAGE_COMDAT_SELECT_LARGEST", Const, 19}, + {"IMAGE_COMDAT_SELECT_NODUPLICATES", Const, 19}, + {"IMAGE_COMDAT_SELECT_SAME_SIZE", Const, 19}, + {"IMAGE_DIRECTORY_ENTRY_ARCHITECTURE", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_BASERELOC", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_DEBUG", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_EXCEPTION", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_EXPORT", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_GLOBALPTR", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_IAT", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_IMPORT", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_RESOURCE", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_SECURITY", Const, 11}, + {"IMAGE_DIRECTORY_ENTRY_TLS", Const, 11}, + {"IMAGE_DLLCHARACTERISTICS_APPCONTAINER", Const, 15}, + {"IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE", Const, 15}, + {"IMAGE_DLLCHARACTERISTICS_FORCE_INTEGRITY", Const, 15}, + {"IMAGE_DLLCHARACTERISTICS_GUARD_CF", Const, 15}, + {"IMAGE_DLLCHARACTERISTICS_HIGH_ENTROPY_VA", Const, 15}, + {"IMAGE_DLLCHARACTERISTICS_NO_BIND", Const, 15}, + {"IMAGE_DLLCHARACTERISTICS_NO_ISOLATION", Const, 15}, + {"IMAGE_DLLCHARACTERISTICS_NO_SEH", Const, 15}, + {"IMAGE_DLLCHARACTERISTICS_NX_COMPAT", Const, 15}, + {"IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE", Const, 15}, + {"IMAGE_DLLCHARACTERISTICS_WDM_DRIVER", Const, 15}, + {"IMAGE_FILE_32BIT_MACHINE", Const, 15}, + {"IMAGE_FILE_AGGRESIVE_WS_TRIM", Const, 15}, + {"IMAGE_FILE_BYTES_REVERSED_HI", Const, 15}, + {"IMAGE_FILE_BYTES_REVERSED_LO", Const, 15}, + {"IMAGE_FILE_DEBUG_STRIPPED", Const, 15}, + {"IMAGE_FILE_DLL", Const, 15}, + {"IMAGE_FILE_EXECUTABLE_IMAGE", Const, 15}, + {"IMAGE_FILE_LARGE_ADDRESS_AWARE", Const, 15}, + {"IMAGE_FILE_LINE_NUMS_STRIPPED", Const, 15}, + {"IMAGE_FILE_LOCAL_SYMS_STRIPPED", Const, 15}, + {"IMAGE_FILE_MACHINE_AM33", Const, 0}, + {"IMAGE_FILE_MACHINE_AMD64", Const, 0}, + {"IMAGE_FILE_MACHINE_ARM", Const, 0}, + {"IMAGE_FILE_MACHINE_ARM64", Const, 11}, + {"IMAGE_FILE_MACHINE_ARMNT", Const, 12}, + {"IMAGE_FILE_MACHINE_EBC", Const, 0}, + {"IMAGE_FILE_MACHINE_I386", Const, 0}, + {"IMAGE_FILE_MACHINE_IA64", Const, 0}, + {"IMAGE_FILE_MACHINE_LOONGARCH32", Const, 19}, + {"IMAGE_FILE_MACHINE_LOONGARCH64", Const, 19}, + {"IMAGE_FILE_MACHINE_M32R", Const, 0}, + {"IMAGE_FILE_MACHINE_MIPS16", Const, 0}, + {"IMAGE_FILE_MACHINE_MIPSFPU", Const, 0}, + {"IMAGE_FILE_MACHINE_MIPSFPU16", Const, 0}, + {"IMAGE_FILE_MACHINE_POWERPC", Const, 0}, + {"IMAGE_FILE_MACHINE_POWERPCFP", Const, 0}, + {"IMAGE_FILE_MACHINE_R4000", Const, 0}, + {"IMAGE_FILE_MACHINE_RISCV128", Const, 20}, + {"IMAGE_FILE_MACHINE_RISCV32", Const, 20}, + {"IMAGE_FILE_MACHINE_RISCV64", Const, 20}, + {"IMAGE_FILE_MACHINE_SH3", Const, 0}, + {"IMAGE_FILE_MACHINE_SH3DSP", Const, 0}, + {"IMAGE_FILE_MACHINE_SH4", Const, 0}, + {"IMAGE_FILE_MACHINE_SH5", Const, 0}, + {"IMAGE_FILE_MACHINE_THUMB", Const, 0}, + {"IMAGE_FILE_MACHINE_UNKNOWN", Const, 0}, + {"IMAGE_FILE_MACHINE_WCEMIPSV2", Const, 0}, + {"IMAGE_FILE_NET_RUN_FROM_SWAP", Const, 15}, + {"IMAGE_FILE_RELOCS_STRIPPED", Const, 15}, + {"IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP", Const, 15}, + {"IMAGE_FILE_SYSTEM", Const, 15}, + {"IMAGE_FILE_UP_SYSTEM_ONLY", Const, 15}, + {"IMAGE_SCN_CNT_CODE", Const, 19}, + {"IMAGE_SCN_CNT_INITIALIZED_DATA", Const, 19}, + {"IMAGE_SCN_CNT_UNINITIALIZED_DATA", Const, 19}, + {"IMAGE_SCN_LNK_COMDAT", Const, 19}, + {"IMAGE_SCN_MEM_DISCARDABLE", Const, 19}, + {"IMAGE_SCN_MEM_EXECUTE", Const, 19}, + {"IMAGE_SCN_MEM_READ", Const, 19}, + {"IMAGE_SCN_MEM_WRITE", Const, 19}, + {"IMAGE_SUBSYSTEM_EFI_APPLICATION", Const, 15}, + {"IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER", Const, 15}, + {"IMAGE_SUBSYSTEM_EFI_ROM", Const, 15}, + {"IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER", Const, 15}, + {"IMAGE_SUBSYSTEM_NATIVE", Const, 15}, + {"IMAGE_SUBSYSTEM_NATIVE_WINDOWS", Const, 15}, + {"IMAGE_SUBSYSTEM_OS2_CUI", Const, 15}, + {"IMAGE_SUBSYSTEM_POSIX_CUI", Const, 15}, + {"IMAGE_SUBSYSTEM_UNKNOWN", Const, 15}, + {"IMAGE_SUBSYSTEM_WINDOWS_BOOT_APPLICATION", Const, 15}, + {"IMAGE_SUBSYSTEM_WINDOWS_CE_GUI", Const, 15}, + {"IMAGE_SUBSYSTEM_WINDOWS_CUI", Const, 15}, + {"IMAGE_SUBSYSTEM_WINDOWS_GUI", Const, 15}, + {"IMAGE_SUBSYSTEM_XBOX", Const, 15}, + {"ImportDirectory", Type, 0}, + {"ImportDirectory.FirstThunk", Field, 0}, + {"ImportDirectory.ForwarderChain", Field, 0}, + {"ImportDirectory.Name", Field, 0}, + {"ImportDirectory.OriginalFirstThunk", Field, 0}, + {"ImportDirectory.TimeDateStamp", Field, 0}, + {"NewFile", Func, 0}, + {"Open", Func, 0}, + {"OptionalHeader32", Type, 3}, + {"OptionalHeader32.AddressOfEntryPoint", Field, 3}, + {"OptionalHeader32.BaseOfCode", Field, 3}, + {"OptionalHeader32.BaseOfData", Field, 3}, + {"OptionalHeader32.CheckSum", Field, 3}, + {"OptionalHeader32.DataDirectory", Field, 3}, + {"OptionalHeader32.DllCharacteristics", Field, 3}, + {"OptionalHeader32.FileAlignment", Field, 3}, + {"OptionalHeader32.ImageBase", Field, 3}, + {"OptionalHeader32.LoaderFlags", Field, 3}, + {"OptionalHeader32.Magic", Field, 3}, + {"OptionalHeader32.MajorImageVersion", Field, 3}, + {"OptionalHeader32.MajorLinkerVersion", Field, 3}, + {"OptionalHeader32.MajorOperatingSystemVersion", Field, 3}, + {"OptionalHeader32.MajorSubsystemVersion", Field, 3}, + {"OptionalHeader32.MinorImageVersion", Field, 3}, + {"OptionalHeader32.MinorLinkerVersion", Field, 3}, + {"OptionalHeader32.MinorOperatingSystemVersion", Field, 3}, + {"OptionalHeader32.MinorSubsystemVersion", Field, 3}, + {"OptionalHeader32.NumberOfRvaAndSizes", Field, 3}, + {"OptionalHeader32.SectionAlignment", Field, 3}, + {"OptionalHeader32.SizeOfCode", Field, 3}, + {"OptionalHeader32.SizeOfHeaders", Field, 3}, + {"OptionalHeader32.SizeOfHeapCommit", Field, 3}, + {"OptionalHeader32.SizeOfHeapReserve", Field, 3}, + {"OptionalHeader32.SizeOfImage", Field, 3}, + {"OptionalHeader32.SizeOfInitializedData", Field, 3}, + {"OptionalHeader32.SizeOfStackCommit", Field, 3}, + {"OptionalHeader32.SizeOfStackReserve", Field, 3}, + {"OptionalHeader32.SizeOfUninitializedData", Field, 3}, + {"OptionalHeader32.Subsystem", Field, 3}, + {"OptionalHeader32.Win32VersionValue", Field, 3}, + {"OptionalHeader64", Type, 3}, + {"OptionalHeader64.AddressOfEntryPoint", Field, 3}, + {"OptionalHeader64.BaseOfCode", Field, 3}, + {"OptionalHeader64.CheckSum", Field, 3}, + {"OptionalHeader64.DataDirectory", Field, 3}, + {"OptionalHeader64.DllCharacteristics", Field, 3}, + {"OptionalHeader64.FileAlignment", Field, 3}, + {"OptionalHeader64.ImageBase", Field, 3}, + {"OptionalHeader64.LoaderFlags", Field, 3}, + {"OptionalHeader64.Magic", Field, 3}, + {"OptionalHeader64.MajorImageVersion", Field, 3}, + {"OptionalHeader64.MajorLinkerVersion", Field, 3}, + {"OptionalHeader64.MajorOperatingSystemVersion", Field, 3}, + {"OptionalHeader64.MajorSubsystemVersion", Field, 3}, + {"OptionalHeader64.MinorImageVersion", Field, 3}, + {"OptionalHeader64.MinorLinkerVersion", Field, 3}, + {"OptionalHeader64.MinorOperatingSystemVersion", Field, 3}, + {"OptionalHeader64.MinorSubsystemVersion", Field, 3}, + {"OptionalHeader64.NumberOfRvaAndSizes", Field, 3}, + {"OptionalHeader64.SectionAlignment", Field, 3}, + {"OptionalHeader64.SizeOfCode", Field, 3}, + {"OptionalHeader64.SizeOfHeaders", Field, 3}, + {"OptionalHeader64.SizeOfHeapCommit", Field, 3}, + {"OptionalHeader64.SizeOfHeapReserve", Field, 3}, + {"OptionalHeader64.SizeOfImage", Field, 3}, + {"OptionalHeader64.SizeOfInitializedData", Field, 3}, + {"OptionalHeader64.SizeOfStackCommit", Field, 3}, + {"OptionalHeader64.SizeOfStackReserve", Field, 3}, + {"OptionalHeader64.SizeOfUninitializedData", Field, 3}, + {"OptionalHeader64.Subsystem", Field, 3}, + {"OptionalHeader64.Win32VersionValue", Field, 3}, + {"Reloc", Type, 8}, + {"Reloc.SymbolTableIndex", Field, 8}, + {"Reloc.Type", Field, 8}, + {"Reloc.VirtualAddress", Field, 8}, + {"Section", Type, 0}, + {"Section.ReaderAt", Field, 0}, + {"Section.Relocs", Field, 8}, + {"Section.SectionHeader", Field, 0}, + {"SectionHeader", Type, 0}, + {"SectionHeader.Characteristics", Field, 0}, + {"SectionHeader.Name", Field, 0}, + {"SectionHeader.NumberOfLineNumbers", Field, 0}, + {"SectionHeader.NumberOfRelocations", Field, 0}, + {"SectionHeader.Offset", Field, 0}, + {"SectionHeader.PointerToLineNumbers", Field, 0}, + {"SectionHeader.PointerToRelocations", Field, 0}, + {"SectionHeader.Size", Field, 0}, + {"SectionHeader.VirtualAddress", Field, 0}, + {"SectionHeader.VirtualSize", Field, 0}, + {"SectionHeader32", Type, 0}, + {"SectionHeader32.Characteristics", Field, 0}, + {"SectionHeader32.Name", Field, 0}, + {"SectionHeader32.NumberOfLineNumbers", Field, 0}, + {"SectionHeader32.NumberOfRelocations", Field, 0}, + {"SectionHeader32.PointerToLineNumbers", Field, 0}, + {"SectionHeader32.PointerToRawData", Field, 0}, + {"SectionHeader32.PointerToRelocations", Field, 0}, + {"SectionHeader32.SizeOfRawData", Field, 0}, + {"SectionHeader32.VirtualAddress", Field, 0}, + {"SectionHeader32.VirtualSize", Field, 0}, + {"StringTable", Type, 8}, + {"Symbol", Type, 1}, + {"Symbol.Name", Field, 1}, + {"Symbol.SectionNumber", Field, 1}, + {"Symbol.StorageClass", Field, 1}, + {"Symbol.Type", Field, 1}, + {"Symbol.Value", Field, 1}, + }, + "debug/plan9obj": { + {"(*File).Close", Method, 3}, + {"(*File).Section", Method, 3}, + {"(*File).Symbols", Method, 3}, + {"(*Section).Data", Method, 3}, + {"(*Section).Open", Method, 3}, + {"(Section).ReadAt", Method, 3}, + {"ErrNoSymbols", Var, 18}, + {"File", Type, 3}, + {"File.FileHeader", Field, 3}, + {"File.Sections", Field, 3}, + {"FileHeader", Type, 3}, + {"FileHeader.Bss", Field, 3}, + {"FileHeader.Entry", Field, 3}, + {"FileHeader.HdrSize", Field, 4}, + {"FileHeader.LoadAddress", Field, 4}, + {"FileHeader.Magic", Field, 3}, + {"FileHeader.PtrSize", Field, 3}, + {"Magic386", Const, 3}, + {"Magic64", Const, 3}, + {"MagicAMD64", Const, 3}, + {"MagicARM", Const, 3}, + {"NewFile", Func, 3}, + {"Open", Func, 3}, + {"Section", Type, 3}, + {"Section.ReaderAt", Field, 3}, + {"Section.SectionHeader", Field, 3}, + {"SectionHeader", Type, 3}, + {"SectionHeader.Name", Field, 3}, + {"SectionHeader.Offset", Field, 3}, + {"SectionHeader.Size", Field, 3}, + {"Sym", Type, 3}, + {"Sym.Name", Field, 3}, + {"Sym.Type", Field, 3}, + {"Sym.Value", Field, 3}, + }, + "embed": { + {"(FS).Open", Method, 16}, + {"(FS).ReadDir", Method, 16}, + {"(FS).ReadFile", Method, 16}, + {"FS", Type, 16}, + }, + "encoding": { + {"BinaryMarshaler", Type, 2}, + {"BinaryUnmarshaler", Type, 2}, + {"TextMarshaler", Type, 2}, + {"TextUnmarshaler", Type, 2}, + }, + "encoding/ascii85": { + {"(CorruptInputError).Error", Method, 0}, + {"CorruptInputError", Type, 0}, + {"Decode", Func, 0}, + {"Encode", Func, 0}, + {"MaxEncodedLen", Func, 0}, + {"NewDecoder", Func, 0}, + {"NewEncoder", Func, 0}, + }, + "encoding/asn1": { + {"(BitString).At", Method, 0}, + {"(BitString).RightAlign", Method, 0}, + {"(ObjectIdentifier).Equal", Method, 0}, + {"(ObjectIdentifier).String", Method, 3}, + {"(StructuralError).Error", Method, 0}, + {"(SyntaxError).Error", Method, 0}, + {"BitString", Type, 0}, + {"BitString.BitLength", Field, 0}, + {"BitString.Bytes", Field, 0}, + {"ClassApplication", Const, 6}, + {"ClassContextSpecific", Const, 6}, + {"ClassPrivate", Const, 6}, + {"ClassUniversal", Const, 6}, + {"Enumerated", Type, 0}, + {"Flag", Type, 0}, + {"Marshal", Func, 0}, + {"MarshalWithParams", Func, 10}, + {"NullBytes", Var, 9}, + {"NullRawValue", Var, 9}, + {"ObjectIdentifier", Type, 0}, + {"RawContent", Type, 0}, + {"RawValue", Type, 0}, + {"RawValue.Bytes", Field, 0}, + {"RawValue.Class", Field, 0}, + {"RawValue.FullBytes", Field, 0}, + {"RawValue.IsCompound", Field, 0}, + {"RawValue.Tag", Field, 0}, + {"StructuralError", Type, 0}, + {"StructuralError.Msg", Field, 0}, + {"SyntaxError", Type, 0}, + {"SyntaxError.Msg", Field, 0}, + {"TagBMPString", Const, 14}, + {"TagBitString", Const, 6}, + {"TagBoolean", Const, 6}, + {"TagEnum", Const, 6}, + {"TagGeneralString", Const, 6}, + {"TagGeneralizedTime", Const, 6}, + {"TagIA5String", Const, 6}, + {"TagInteger", Const, 6}, + {"TagNull", Const, 9}, + {"TagNumericString", Const, 10}, + {"TagOID", Const, 6}, + {"TagOctetString", Const, 6}, + {"TagPrintableString", Const, 6}, + {"TagSequence", Const, 6}, + {"TagSet", Const, 6}, + {"TagT61String", Const, 6}, + {"TagUTCTime", Const, 6}, + {"TagUTF8String", Const, 6}, + {"Unmarshal", Func, 0}, + {"UnmarshalWithParams", Func, 0}, + }, + "encoding/base32": { + {"(*Encoding).AppendDecode", Method, 22}, + {"(*Encoding).AppendEncode", Method, 22}, + {"(*Encoding).Decode", Method, 0}, + {"(*Encoding).DecodeString", Method, 0}, + {"(*Encoding).DecodedLen", Method, 0}, + {"(*Encoding).Encode", Method, 0}, + {"(*Encoding).EncodeToString", Method, 0}, + {"(*Encoding).EncodedLen", Method, 0}, + {"(CorruptInputError).Error", Method, 0}, + {"(Encoding).WithPadding", Method, 9}, + {"CorruptInputError", Type, 0}, + {"Encoding", Type, 0}, + {"HexEncoding", Var, 0}, + {"NewDecoder", Func, 0}, + {"NewEncoder", Func, 0}, + {"NewEncoding", Func, 0}, + {"NoPadding", Const, 9}, + {"StdEncoding", Var, 0}, + {"StdPadding", Const, 9}, + }, + "encoding/base64": { + {"(*Encoding).AppendDecode", Method, 22}, + {"(*Encoding).AppendEncode", Method, 22}, + {"(*Encoding).Decode", Method, 0}, + {"(*Encoding).DecodeString", Method, 0}, + {"(*Encoding).DecodedLen", Method, 0}, + {"(*Encoding).Encode", Method, 0}, + {"(*Encoding).EncodeToString", Method, 0}, + {"(*Encoding).EncodedLen", Method, 0}, + {"(CorruptInputError).Error", Method, 0}, + {"(Encoding).Strict", Method, 8}, + {"(Encoding).WithPadding", Method, 5}, + {"CorruptInputError", Type, 0}, + {"Encoding", Type, 0}, + {"NewDecoder", Func, 0}, + {"NewEncoder", Func, 0}, + {"NewEncoding", Func, 0}, + {"NoPadding", Const, 5}, + {"RawStdEncoding", Var, 5}, + {"RawURLEncoding", Var, 5}, + {"StdEncoding", Var, 0}, + {"StdPadding", Const, 5}, + {"URLEncoding", Var, 0}, + }, + "encoding/binary": { + {"AppendByteOrder", Type, 19}, + {"AppendUvarint", Func, 19}, + {"AppendVarint", Func, 19}, + {"BigEndian", Var, 0}, + {"ByteOrder", Type, 0}, + {"LittleEndian", Var, 0}, + {"MaxVarintLen16", Const, 0}, + {"MaxVarintLen32", Const, 0}, + {"MaxVarintLen64", Const, 0}, + {"NativeEndian", Var, 21}, + {"PutUvarint", Func, 0}, + {"PutVarint", Func, 0}, + {"Read", Func, 0}, + {"ReadUvarint", Func, 0}, + {"ReadVarint", Func, 0}, + {"Size", Func, 0}, + {"Uvarint", Func, 0}, + {"Varint", Func, 0}, + {"Write", Func, 0}, + }, + "encoding/csv": { + {"(*ParseError).Error", Method, 0}, + {"(*ParseError).Unwrap", Method, 13}, + {"(*Reader).FieldPos", Method, 17}, + {"(*Reader).InputOffset", Method, 19}, + {"(*Reader).Read", Method, 0}, + {"(*Reader).ReadAll", Method, 0}, + {"(*Writer).Error", Method, 1}, + {"(*Writer).Flush", Method, 0}, + {"(*Writer).Write", Method, 0}, + {"(*Writer).WriteAll", Method, 0}, + {"ErrBareQuote", Var, 0}, + {"ErrFieldCount", Var, 0}, + {"ErrQuote", Var, 0}, + {"ErrTrailingComma", Var, 0}, + {"NewReader", Func, 0}, + {"NewWriter", Func, 0}, + {"ParseError", Type, 0}, + {"ParseError.Column", Field, 0}, + {"ParseError.Err", Field, 0}, + {"ParseError.Line", Field, 0}, + {"ParseError.StartLine", Field, 10}, + {"Reader", Type, 0}, + {"Reader.Comma", Field, 0}, + {"Reader.Comment", Field, 0}, + {"Reader.FieldsPerRecord", Field, 0}, + {"Reader.LazyQuotes", Field, 0}, + {"Reader.ReuseRecord", Field, 9}, + {"Reader.TrailingComma", Field, 0}, + {"Reader.TrimLeadingSpace", Field, 0}, + {"Writer", Type, 0}, + {"Writer.Comma", Field, 0}, + {"Writer.UseCRLF", Field, 0}, + }, + "encoding/gob": { + {"(*Decoder).Decode", Method, 0}, + {"(*Decoder).DecodeValue", Method, 0}, + {"(*Encoder).Encode", Method, 0}, + {"(*Encoder).EncodeValue", Method, 0}, + {"CommonType", Type, 0}, + {"CommonType.Id", Field, 0}, + {"CommonType.Name", Field, 0}, + {"Decoder", Type, 0}, + {"Encoder", Type, 0}, + {"GobDecoder", Type, 0}, + {"GobEncoder", Type, 0}, + {"NewDecoder", Func, 0}, + {"NewEncoder", Func, 0}, + {"Register", Func, 0}, + {"RegisterName", Func, 0}, + }, + "encoding/hex": { + {"(InvalidByteError).Error", Method, 0}, + {"AppendDecode", Func, 22}, + {"AppendEncode", Func, 22}, + {"Decode", Func, 0}, + {"DecodeString", Func, 0}, + {"DecodedLen", Func, 0}, + {"Dump", Func, 0}, + {"Dumper", Func, 0}, + {"Encode", Func, 0}, + {"EncodeToString", Func, 0}, + {"EncodedLen", Func, 0}, + {"ErrLength", Var, 0}, + {"InvalidByteError", Type, 0}, + {"NewDecoder", Func, 10}, + {"NewEncoder", Func, 10}, + }, + "encoding/json": { + {"(*Decoder).Buffered", Method, 1}, + {"(*Decoder).Decode", Method, 0}, + {"(*Decoder).DisallowUnknownFields", Method, 10}, + {"(*Decoder).InputOffset", Method, 14}, + {"(*Decoder).More", Method, 5}, + {"(*Decoder).Token", Method, 5}, + {"(*Decoder).UseNumber", Method, 1}, + {"(*Encoder).Encode", Method, 0}, + {"(*Encoder).SetEscapeHTML", Method, 7}, + {"(*Encoder).SetIndent", Method, 7}, + {"(*InvalidUTF8Error).Error", Method, 0}, + {"(*InvalidUnmarshalError).Error", Method, 0}, + {"(*MarshalerError).Error", Method, 0}, + {"(*MarshalerError).Unwrap", Method, 13}, + {"(*RawMessage).MarshalJSON", Method, 0}, + {"(*RawMessage).UnmarshalJSON", Method, 0}, + {"(*SyntaxError).Error", Method, 0}, + {"(*UnmarshalFieldError).Error", Method, 0}, + {"(*UnmarshalTypeError).Error", Method, 0}, + {"(*UnsupportedTypeError).Error", Method, 0}, + {"(*UnsupportedValueError).Error", Method, 0}, + {"(Delim).String", Method, 5}, + {"(Number).Float64", Method, 1}, + {"(Number).Int64", Method, 1}, + {"(Number).String", Method, 1}, + {"(RawMessage).MarshalJSON", Method, 8}, + {"Compact", Func, 0}, + {"Decoder", Type, 0}, + {"Delim", Type, 5}, + {"Encoder", Type, 0}, + {"HTMLEscape", Func, 0}, + {"Indent", Func, 0}, + {"InvalidUTF8Error", Type, 0}, + {"InvalidUTF8Error.S", Field, 0}, + {"InvalidUnmarshalError", Type, 0}, + {"InvalidUnmarshalError.Type", Field, 0}, + {"Marshal", Func, 0}, + {"MarshalIndent", Func, 0}, + {"Marshaler", Type, 0}, + {"MarshalerError", Type, 0}, + {"MarshalerError.Err", Field, 0}, + {"MarshalerError.Type", Field, 0}, + {"NewDecoder", Func, 0}, + {"NewEncoder", Func, 0}, + {"Number", Type, 1}, + {"RawMessage", Type, 0}, + {"SyntaxError", Type, 0}, + {"SyntaxError.Offset", Field, 0}, + {"Token", Type, 5}, + {"Unmarshal", Func, 0}, + {"UnmarshalFieldError", Type, 0}, + {"UnmarshalFieldError.Field", Field, 0}, + {"UnmarshalFieldError.Key", Field, 0}, + {"UnmarshalFieldError.Type", Field, 0}, + {"UnmarshalTypeError", Type, 0}, + {"UnmarshalTypeError.Field", Field, 8}, + {"UnmarshalTypeError.Offset", Field, 5}, + {"UnmarshalTypeError.Struct", Field, 8}, + {"UnmarshalTypeError.Type", Field, 0}, + {"UnmarshalTypeError.Value", Field, 0}, + {"Unmarshaler", Type, 0}, + {"UnsupportedTypeError", Type, 0}, + {"UnsupportedTypeError.Type", Field, 0}, + {"UnsupportedValueError", Type, 0}, + {"UnsupportedValueError.Str", Field, 0}, + {"UnsupportedValueError.Value", Field, 0}, + {"Valid", Func, 9}, + }, + "encoding/pem": { + {"Block", Type, 0}, + {"Block.Bytes", Field, 0}, + {"Block.Headers", Field, 0}, + {"Block.Type", Field, 0}, + {"Decode", Func, 0}, + {"Encode", Func, 0}, + {"EncodeToMemory", Func, 0}, + }, + "encoding/xml": { + {"(*Decoder).Decode", Method, 0}, + {"(*Decoder).DecodeElement", Method, 0}, + {"(*Decoder).InputOffset", Method, 4}, + {"(*Decoder).InputPos", Method, 19}, + {"(*Decoder).RawToken", Method, 0}, + {"(*Decoder).Skip", Method, 0}, + {"(*Decoder).Token", Method, 0}, + {"(*Encoder).Close", Method, 20}, + {"(*Encoder).Encode", Method, 0}, + {"(*Encoder).EncodeElement", Method, 2}, + {"(*Encoder).EncodeToken", Method, 2}, + {"(*Encoder).Flush", Method, 2}, + {"(*Encoder).Indent", Method, 1}, + {"(*SyntaxError).Error", Method, 0}, + {"(*TagPathError).Error", Method, 0}, + {"(*UnsupportedTypeError).Error", Method, 0}, + {"(CharData).Copy", Method, 0}, + {"(Comment).Copy", Method, 0}, + {"(Directive).Copy", Method, 0}, + {"(ProcInst).Copy", Method, 0}, + {"(StartElement).Copy", Method, 0}, + {"(StartElement).End", Method, 2}, + {"(UnmarshalError).Error", Method, 0}, + {"Attr", Type, 0}, + {"Attr.Name", Field, 0}, + {"Attr.Value", Field, 0}, + {"CharData", Type, 0}, + {"Comment", Type, 0}, + {"CopyToken", Func, 0}, + {"Decoder", Type, 0}, + {"Decoder.AutoClose", Field, 0}, + {"Decoder.CharsetReader", Field, 0}, + {"Decoder.DefaultSpace", Field, 1}, + {"Decoder.Entity", Field, 0}, + {"Decoder.Strict", Field, 0}, + {"Directive", Type, 0}, + {"Encoder", Type, 0}, + {"EndElement", Type, 0}, + {"EndElement.Name", Field, 0}, + {"Escape", Func, 0}, + {"EscapeText", Func, 1}, + {"HTMLAutoClose", Var, 0}, + {"HTMLEntity", Var, 0}, + {"Header", Const, 0}, + {"Marshal", Func, 0}, + {"MarshalIndent", Func, 0}, + {"Marshaler", Type, 2}, + {"MarshalerAttr", Type, 2}, + {"Name", Type, 0}, + {"Name.Local", Field, 0}, + {"Name.Space", Field, 0}, + {"NewDecoder", Func, 0}, + {"NewEncoder", Func, 0}, + {"NewTokenDecoder", Func, 10}, + {"ProcInst", Type, 0}, + {"ProcInst.Inst", Field, 0}, + {"ProcInst.Target", Field, 0}, + {"StartElement", Type, 0}, + {"StartElement.Attr", Field, 0}, + {"StartElement.Name", Field, 0}, + {"SyntaxError", Type, 0}, + {"SyntaxError.Line", Field, 0}, + {"SyntaxError.Msg", Field, 0}, + {"TagPathError", Type, 0}, + {"TagPathError.Field1", Field, 0}, + {"TagPathError.Field2", Field, 0}, + {"TagPathError.Struct", Field, 0}, + {"TagPathError.Tag1", Field, 0}, + {"TagPathError.Tag2", Field, 0}, + {"Token", Type, 0}, + {"TokenReader", Type, 10}, + {"Unmarshal", Func, 0}, + {"UnmarshalError", Type, 0}, + {"Unmarshaler", Type, 2}, + {"UnmarshalerAttr", Type, 2}, + {"UnsupportedTypeError", Type, 0}, + {"UnsupportedTypeError.Type", Field, 0}, + }, + "errors": { + {"As", Func, 13}, + {"ErrUnsupported", Var, 21}, + {"Is", Func, 13}, + {"Join", Func, 20}, + {"New", Func, 0}, + {"Unwrap", Func, 13}, + }, + "expvar": { + {"(*Float).Add", Method, 0}, + {"(*Float).Set", Method, 0}, + {"(*Float).String", Method, 0}, + {"(*Float).Value", Method, 8}, + {"(*Int).Add", Method, 0}, + {"(*Int).Set", Method, 0}, + {"(*Int).String", Method, 0}, + {"(*Int).Value", Method, 8}, + {"(*Map).Add", Method, 0}, + {"(*Map).AddFloat", Method, 0}, + {"(*Map).Delete", Method, 12}, + {"(*Map).Do", Method, 0}, + {"(*Map).Get", Method, 0}, + {"(*Map).Init", Method, 0}, + {"(*Map).Set", Method, 0}, + {"(*Map).String", Method, 0}, + {"(*String).Set", Method, 0}, + {"(*String).String", Method, 0}, + {"(*String).Value", Method, 8}, + {"(Func).String", Method, 0}, + {"(Func).Value", Method, 8}, + {"Do", Func, 0}, + {"Float", Type, 0}, + {"Func", Type, 0}, + {"Get", Func, 0}, + {"Handler", Func, 8}, + {"Int", Type, 0}, + {"KeyValue", Type, 0}, + {"KeyValue.Key", Field, 0}, + {"KeyValue.Value", Field, 0}, + {"Map", Type, 0}, + {"NewFloat", Func, 0}, + {"NewInt", Func, 0}, + {"NewMap", Func, 0}, + {"NewString", Func, 0}, + {"Publish", Func, 0}, + {"String", Type, 0}, + {"Var", Type, 0}, + }, + "flag": { + {"(*FlagSet).Arg", Method, 0}, + {"(*FlagSet).Args", Method, 0}, + {"(*FlagSet).Bool", Method, 0}, + {"(*FlagSet).BoolFunc", Method, 21}, + {"(*FlagSet).BoolVar", Method, 0}, + {"(*FlagSet).Duration", Method, 0}, + {"(*FlagSet).DurationVar", Method, 0}, + {"(*FlagSet).ErrorHandling", Method, 10}, + {"(*FlagSet).Float64", Method, 0}, + {"(*FlagSet).Float64Var", Method, 0}, + {"(*FlagSet).Func", Method, 16}, + {"(*FlagSet).Init", Method, 0}, + {"(*FlagSet).Int", Method, 0}, + {"(*FlagSet).Int64", Method, 0}, + {"(*FlagSet).Int64Var", Method, 0}, + {"(*FlagSet).IntVar", Method, 0}, + {"(*FlagSet).Lookup", Method, 0}, + {"(*FlagSet).NArg", Method, 0}, + {"(*FlagSet).NFlag", Method, 0}, + {"(*FlagSet).Name", Method, 10}, + {"(*FlagSet).Output", Method, 10}, + {"(*FlagSet).Parse", Method, 0}, + {"(*FlagSet).Parsed", Method, 0}, + {"(*FlagSet).PrintDefaults", Method, 0}, + {"(*FlagSet).Set", Method, 0}, + {"(*FlagSet).SetOutput", Method, 0}, + {"(*FlagSet).String", Method, 0}, + {"(*FlagSet).StringVar", Method, 0}, + {"(*FlagSet).TextVar", Method, 19}, + {"(*FlagSet).Uint", Method, 0}, + {"(*FlagSet).Uint64", Method, 0}, + {"(*FlagSet).Uint64Var", Method, 0}, + {"(*FlagSet).UintVar", Method, 0}, + {"(*FlagSet).Var", Method, 0}, + {"(*FlagSet).Visit", Method, 0}, + {"(*FlagSet).VisitAll", Method, 0}, + {"Arg", Func, 0}, + {"Args", Func, 0}, + {"Bool", Func, 0}, + {"BoolFunc", Func, 21}, + {"BoolVar", Func, 0}, + {"CommandLine", Var, 2}, + {"ContinueOnError", Const, 0}, + {"Duration", Func, 0}, + {"DurationVar", Func, 0}, + {"ErrHelp", Var, 0}, + {"ErrorHandling", Type, 0}, + {"ExitOnError", Const, 0}, + {"Flag", Type, 0}, + {"Flag.DefValue", Field, 0}, + {"Flag.Name", Field, 0}, + {"Flag.Usage", Field, 0}, + {"Flag.Value", Field, 0}, + {"FlagSet", Type, 0}, + {"FlagSet.Usage", Field, 0}, + {"Float64", Func, 0}, + {"Float64Var", Func, 0}, + {"Func", Func, 16}, + {"Getter", Type, 2}, + {"Int", Func, 0}, + {"Int64", Func, 0}, + {"Int64Var", Func, 0}, + {"IntVar", Func, 0}, + {"Lookup", Func, 0}, + {"NArg", Func, 0}, + {"NFlag", Func, 0}, + {"NewFlagSet", Func, 0}, + {"PanicOnError", Const, 0}, + {"Parse", Func, 0}, + {"Parsed", Func, 0}, + {"PrintDefaults", Func, 0}, + {"Set", Func, 0}, + {"String", Func, 0}, + {"StringVar", Func, 0}, + {"TextVar", Func, 19}, + {"Uint", Func, 0}, + {"Uint64", Func, 0}, + {"Uint64Var", Func, 0}, + {"UintVar", Func, 0}, + {"UnquoteUsage", Func, 5}, + {"Usage", Var, 0}, + {"Value", Type, 0}, + {"Var", Func, 0}, + {"Visit", Func, 0}, + {"VisitAll", Func, 0}, + }, + "fmt": { + {"Append", Func, 19}, + {"Appendf", Func, 19}, + {"Appendln", Func, 19}, + {"Errorf", Func, 0}, + {"FormatString", Func, 20}, + {"Formatter", Type, 0}, + {"Fprint", Func, 0}, + {"Fprintf", Func, 0}, + {"Fprintln", Func, 0}, + {"Fscan", Func, 0}, + {"Fscanf", Func, 0}, + {"Fscanln", Func, 0}, + {"GoStringer", Type, 0}, + {"Print", Func, 0}, + {"Printf", Func, 0}, + {"Println", Func, 0}, + {"Scan", Func, 0}, + {"ScanState", Type, 0}, + {"Scanf", Func, 0}, + {"Scanln", Func, 0}, + {"Scanner", Type, 0}, + {"Sprint", Func, 0}, + {"Sprintf", Func, 0}, + {"Sprintln", Func, 0}, + {"Sscan", Func, 0}, + {"Sscanf", Func, 0}, + {"Sscanln", Func, 0}, + {"State", Type, 0}, + {"Stringer", Type, 0}, + }, + "go/ast": { + {"(*ArrayType).End", Method, 0}, + {"(*ArrayType).Pos", Method, 0}, + {"(*AssignStmt).End", Method, 0}, + {"(*AssignStmt).Pos", Method, 0}, + {"(*BadDecl).End", Method, 0}, + {"(*BadDecl).Pos", Method, 0}, + {"(*BadExpr).End", Method, 0}, + {"(*BadExpr).Pos", Method, 0}, + {"(*BadStmt).End", Method, 0}, + {"(*BadStmt).Pos", Method, 0}, + {"(*BasicLit).End", Method, 0}, + {"(*BasicLit).Pos", Method, 0}, + {"(*BinaryExpr).End", Method, 0}, + {"(*BinaryExpr).Pos", Method, 0}, + {"(*BlockStmt).End", Method, 0}, + {"(*BlockStmt).Pos", Method, 0}, + {"(*BranchStmt).End", Method, 0}, + {"(*BranchStmt).Pos", Method, 0}, + {"(*CallExpr).End", Method, 0}, + {"(*CallExpr).Pos", Method, 0}, + {"(*CaseClause).End", Method, 0}, + {"(*CaseClause).Pos", Method, 0}, + {"(*ChanType).End", Method, 0}, + {"(*ChanType).Pos", Method, 0}, + {"(*CommClause).End", Method, 0}, + {"(*CommClause).Pos", Method, 0}, + {"(*Comment).End", Method, 0}, + {"(*Comment).Pos", Method, 0}, + {"(*CommentGroup).End", Method, 0}, + {"(*CommentGroup).Pos", Method, 0}, + {"(*CommentGroup).Text", Method, 0}, + {"(*CompositeLit).End", Method, 0}, + {"(*CompositeLit).Pos", Method, 0}, + {"(*DeclStmt).End", Method, 0}, + {"(*DeclStmt).Pos", Method, 0}, + {"(*DeferStmt).End", Method, 0}, + {"(*DeferStmt).Pos", Method, 0}, + {"(*Ellipsis).End", Method, 0}, + {"(*Ellipsis).Pos", Method, 0}, + {"(*EmptyStmt).End", Method, 0}, + {"(*EmptyStmt).Pos", Method, 0}, + {"(*ExprStmt).End", Method, 0}, + {"(*ExprStmt).Pos", Method, 0}, + {"(*Field).End", Method, 0}, + {"(*Field).Pos", Method, 0}, + {"(*FieldList).End", Method, 0}, + {"(*FieldList).NumFields", Method, 0}, + {"(*FieldList).Pos", Method, 0}, + {"(*File).End", Method, 0}, + {"(*File).Pos", Method, 0}, + {"(*ForStmt).End", Method, 0}, + {"(*ForStmt).Pos", Method, 0}, + {"(*FuncDecl).End", Method, 0}, + {"(*FuncDecl).Pos", Method, 0}, + {"(*FuncLit).End", Method, 0}, + {"(*FuncLit).Pos", Method, 0}, + {"(*FuncType).End", Method, 0}, + {"(*FuncType).Pos", Method, 0}, + {"(*GenDecl).End", Method, 0}, + {"(*GenDecl).Pos", Method, 0}, + {"(*GoStmt).End", Method, 0}, + {"(*GoStmt).Pos", Method, 0}, + {"(*Ident).End", Method, 0}, + {"(*Ident).IsExported", Method, 0}, + {"(*Ident).Pos", Method, 0}, + {"(*Ident).String", Method, 0}, + {"(*IfStmt).End", Method, 0}, + {"(*IfStmt).Pos", Method, 0}, + {"(*ImportSpec).End", Method, 0}, + {"(*ImportSpec).Pos", Method, 0}, + {"(*IncDecStmt).End", Method, 0}, + {"(*IncDecStmt).Pos", Method, 0}, + {"(*IndexExpr).End", Method, 0}, + {"(*IndexExpr).Pos", Method, 0}, + {"(*IndexListExpr).End", Method, 18}, + {"(*IndexListExpr).Pos", Method, 18}, + {"(*InterfaceType).End", Method, 0}, + {"(*InterfaceType).Pos", Method, 0}, + {"(*KeyValueExpr).End", Method, 0}, + {"(*KeyValueExpr).Pos", Method, 0}, + {"(*LabeledStmt).End", Method, 0}, + {"(*LabeledStmt).Pos", Method, 0}, + {"(*MapType).End", Method, 0}, + {"(*MapType).Pos", Method, 0}, + {"(*Object).Pos", Method, 0}, + {"(*Package).End", Method, 0}, + {"(*Package).Pos", Method, 0}, + {"(*ParenExpr).End", Method, 0}, + {"(*ParenExpr).Pos", Method, 0}, + {"(*RangeStmt).End", Method, 0}, + {"(*RangeStmt).Pos", Method, 0}, + {"(*ReturnStmt).End", Method, 0}, + {"(*ReturnStmt).Pos", Method, 0}, + {"(*Scope).Insert", Method, 0}, + {"(*Scope).Lookup", Method, 0}, + {"(*Scope).String", Method, 0}, + {"(*SelectStmt).End", Method, 0}, + {"(*SelectStmt).Pos", Method, 0}, + {"(*SelectorExpr).End", Method, 0}, + {"(*SelectorExpr).Pos", Method, 0}, + {"(*SendStmt).End", Method, 0}, + {"(*SendStmt).Pos", Method, 0}, + {"(*SliceExpr).End", Method, 0}, + {"(*SliceExpr).Pos", Method, 0}, + {"(*StarExpr).End", Method, 0}, + {"(*StarExpr).Pos", Method, 0}, + {"(*StructType).End", Method, 0}, + {"(*StructType).Pos", Method, 0}, + {"(*SwitchStmt).End", Method, 0}, + {"(*SwitchStmt).Pos", Method, 0}, + {"(*TypeAssertExpr).End", Method, 0}, + {"(*TypeAssertExpr).Pos", Method, 0}, + {"(*TypeSpec).End", Method, 0}, + {"(*TypeSpec).Pos", Method, 0}, + {"(*TypeSwitchStmt).End", Method, 0}, + {"(*TypeSwitchStmt).Pos", Method, 0}, + {"(*UnaryExpr).End", Method, 0}, + {"(*UnaryExpr).Pos", Method, 0}, + {"(*ValueSpec).End", Method, 0}, + {"(*ValueSpec).Pos", Method, 0}, + {"(CommentMap).Comments", Method, 1}, + {"(CommentMap).Filter", Method, 1}, + {"(CommentMap).String", Method, 1}, + {"(CommentMap).Update", Method, 1}, + {"(ObjKind).String", Method, 0}, + {"ArrayType", Type, 0}, + {"ArrayType.Elt", Field, 0}, + {"ArrayType.Lbrack", Field, 0}, + {"ArrayType.Len", Field, 0}, + {"AssignStmt", Type, 0}, + {"AssignStmt.Lhs", Field, 0}, + {"AssignStmt.Rhs", Field, 0}, + {"AssignStmt.Tok", Field, 0}, + {"AssignStmt.TokPos", Field, 0}, + {"Bad", Const, 0}, + {"BadDecl", Type, 0}, + {"BadDecl.From", Field, 0}, + {"BadDecl.To", Field, 0}, + {"BadExpr", Type, 0}, + {"BadExpr.From", Field, 0}, + {"BadExpr.To", Field, 0}, + {"BadStmt", Type, 0}, + {"BadStmt.From", Field, 0}, + {"BadStmt.To", Field, 0}, + {"BasicLit", Type, 0}, + {"BasicLit.Kind", Field, 0}, + {"BasicLit.Value", Field, 0}, + {"BasicLit.ValuePos", Field, 0}, + {"BinaryExpr", Type, 0}, + {"BinaryExpr.Op", Field, 0}, + {"BinaryExpr.OpPos", Field, 0}, + {"BinaryExpr.X", Field, 0}, + {"BinaryExpr.Y", Field, 0}, + {"BlockStmt", Type, 0}, + {"BlockStmt.Lbrace", Field, 0}, + {"BlockStmt.List", Field, 0}, + {"BlockStmt.Rbrace", Field, 0}, + {"BranchStmt", Type, 0}, + {"BranchStmt.Label", Field, 0}, + {"BranchStmt.Tok", Field, 0}, + {"BranchStmt.TokPos", Field, 0}, + {"CallExpr", Type, 0}, + {"CallExpr.Args", Field, 0}, + {"CallExpr.Ellipsis", Field, 0}, + {"CallExpr.Fun", Field, 0}, + {"CallExpr.Lparen", Field, 0}, + {"CallExpr.Rparen", Field, 0}, + {"CaseClause", Type, 0}, + {"CaseClause.Body", Field, 0}, + {"CaseClause.Case", Field, 0}, + {"CaseClause.Colon", Field, 0}, + {"CaseClause.List", Field, 0}, + {"ChanDir", Type, 0}, + {"ChanType", Type, 0}, + {"ChanType.Arrow", Field, 1}, + {"ChanType.Begin", Field, 0}, + {"ChanType.Dir", Field, 0}, + {"ChanType.Value", Field, 0}, + {"CommClause", Type, 0}, + {"CommClause.Body", Field, 0}, + {"CommClause.Case", Field, 0}, + {"CommClause.Colon", Field, 0}, + {"CommClause.Comm", Field, 0}, + {"Comment", Type, 0}, + {"Comment.Slash", Field, 0}, + {"Comment.Text", Field, 0}, + {"CommentGroup", Type, 0}, + {"CommentGroup.List", Field, 0}, + {"CommentMap", Type, 1}, + {"CompositeLit", Type, 0}, + {"CompositeLit.Elts", Field, 0}, + {"CompositeLit.Incomplete", Field, 11}, + {"CompositeLit.Lbrace", Field, 0}, + {"CompositeLit.Rbrace", Field, 0}, + {"CompositeLit.Type", Field, 0}, + {"Con", Const, 0}, + {"Decl", Type, 0}, + {"DeclStmt", Type, 0}, + {"DeclStmt.Decl", Field, 0}, + {"DeferStmt", Type, 0}, + {"DeferStmt.Call", Field, 0}, + {"DeferStmt.Defer", Field, 0}, + {"Ellipsis", Type, 0}, + {"Ellipsis.Ellipsis", Field, 0}, + {"Ellipsis.Elt", Field, 0}, + {"EmptyStmt", Type, 0}, + {"EmptyStmt.Implicit", Field, 5}, + {"EmptyStmt.Semicolon", Field, 0}, + {"Expr", Type, 0}, + {"ExprStmt", Type, 0}, + {"ExprStmt.X", Field, 0}, + {"Field", Type, 0}, + {"Field.Comment", Field, 0}, + {"Field.Doc", Field, 0}, + {"Field.Names", Field, 0}, + {"Field.Tag", Field, 0}, + {"Field.Type", Field, 0}, + {"FieldFilter", Type, 0}, + {"FieldList", Type, 0}, + {"FieldList.Closing", Field, 0}, + {"FieldList.List", Field, 0}, + {"FieldList.Opening", Field, 0}, + {"File", Type, 0}, + {"File.Comments", Field, 0}, + {"File.Decls", Field, 0}, + {"File.Doc", Field, 0}, + {"File.FileEnd", Field, 20}, + {"File.FileStart", Field, 20}, + {"File.GoVersion", Field, 21}, + {"File.Imports", Field, 0}, + {"File.Name", Field, 0}, + {"File.Package", Field, 0}, + {"File.Scope", Field, 0}, + {"File.Unresolved", Field, 0}, + {"FileExports", Func, 0}, + {"Filter", Type, 0}, + {"FilterDecl", Func, 0}, + {"FilterFile", Func, 0}, + {"FilterFuncDuplicates", Const, 0}, + {"FilterImportDuplicates", Const, 0}, + {"FilterPackage", Func, 0}, + {"FilterUnassociatedComments", Const, 0}, + {"ForStmt", Type, 0}, + {"ForStmt.Body", Field, 0}, + {"ForStmt.Cond", Field, 0}, + {"ForStmt.For", Field, 0}, + {"ForStmt.Init", Field, 0}, + {"ForStmt.Post", Field, 0}, + {"Fprint", Func, 0}, + {"Fun", Const, 0}, + {"FuncDecl", Type, 0}, + {"FuncDecl.Body", Field, 0}, + {"FuncDecl.Doc", Field, 0}, + {"FuncDecl.Name", Field, 0}, + {"FuncDecl.Recv", Field, 0}, + {"FuncDecl.Type", Field, 0}, + {"FuncLit", Type, 0}, + {"FuncLit.Body", Field, 0}, + {"FuncLit.Type", Field, 0}, + {"FuncType", Type, 0}, + {"FuncType.Func", Field, 0}, + {"FuncType.Params", Field, 0}, + {"FuncType.Results", Field, 0}, + {"FuncType.TypeParams", Field, 18}, + {"GenDecl", Type, 0}, + {"GenDecl.Doc", Field, 0}, + {"GenDecl.Lparen", Field, 0}, + {"GenDecl.Rparen", Field, 0}, + {"GenDecl.Specs", Field, 0}, + {"GenDecl.Tok", Field, 0}, + {"GenDecl.TokPos", Field, 0}, + {"GoStmt", Type, 0}, + {"GoStmt.Call", Field, 0}, + {"GoStmt.Go", Field, 0}, + {"Ident", Type, 0}, + {"Ident.Name", Field, 0}, + {"Ident.NamePos", Field, 0}, + {"Ident.Obj", Field, 0}, + {"IfStmt", Type, 0}, + {"IfStmt.Body", Field, 0}, + {"IfStmt.Cond", Field, 0}, + {"IfStmt.Else", Field, 0}, + {"IfStmt.If", Field, 0}, + {"IfStmt.Init", Field, 0}, + {"ImportSpec", Type, 0}, + {"ImportSpec.Comment", Field, 0}, + {"ImportSpec.Doc", Field, 0}, + {"ImportSpec.EndPos", Field, 0}, + {"ImportSpec.Name", Field, 0}, + {"ImportSpec.Path", Field, 0}, + {"Importer", Type, 0}, + {"IncDecStmt", Type, 0}, + {"IncDecStmt.Tok", Field, 0}, + {"IncDecStmt.TokPos", Field, 0}, + {"IncDecStmt.X", Field, 0}, + {"IndexExpr", Type, 0}, + {"IndexExpr.Index", Field, 0}, + {"IndexExpr.Lbrack", Field, 0}, + {"IndexExpr.Rbrack", Field, 0}, + {"IndexExpr.X", Field, 0}, + {"IndexListExpr", Type, 18}, + {"IndexListExpr.Indices", Field, 18}, + {"IndexListExpr.Lbrack", Field, 18}, + {"IndexListExpr.Rbrack", Field, 18}, + {"IndexListExpr.X", Field, 18}, + {"Inspect", Func, 0}, + {"InterfaceType", Type, 0}, + {"InterfaceType.Incomplete", Field, 0}, + {"InterfaceType.Interface", Field, 0}, + {"InterfaceType.Methods", Field, 0}, + {"IsExported", Func, 0}, + {"IsGenerated", Func, 21}, + {"KeyValueExpr", Type, 0}, + {"KeyValueExpr.Colon", Field, 0}, + {"KeyValueExpr.Key", Field, 0}, + {"KeyValueExpr.Value", Field, 0}, + {"LabeledStmt", Type, 0}, + {"LabeledStmt.Colon", Field, 0}, + {"LabeledStmt.Label", Field, 0}, + {"LabeledStmt.Stmt", Field, 0}, + {"Lbl", Const, 0}, + {"MapType", Type, 0}, + {"MapType.Key", Field, 0}, + {"MapType.Map", Field, 0}, + {"MapType.Value", Field, 0}, + {"MergeMode", Type, 0}, + {"MergePackageFiles", Func, 0}, + {"NewCommentMap", Func, 1}, + {"NewIdent", Func, 0}, + {"NewObj", Func, 0}, + {"NewPackage", Func, 0}, + {"NewScope", Func, 0}, + {"Node", Type, 0}, + {"NotNilFilter", Func, 0}, + {"ObjKind", Type, 0}, + {"Object", Type, 0}, + {"Object.Data", Field, 0}, + {"Object.Decl", Field, 0}, + {"Object.Kind", Field, 0}, + {"Object.Name", Field, 0}, + {"Object.Type", Field, 0}, + {"Package", Type, 0}, + {"Package.Files", Field, 0}, + {"Package.Imports", Field, 0}, + {"Package.Name", Field, 0}, + {"Package.Scope", Field, 0}, + {"PackageExports", Func, 0}, + {"ParenExpr", Type, 0}, + {"ParenExpr.Lparen", Field, 0}, + {"ParenExpr.Rparen", Field, 0}, + {"ParenExpr.X", Field, 0}, + {"Pkg", Const, 0}, + {"Print", Func, 0}, + {"RECV", Const, 0}, + {"RangeStmt", Type, 0}, + {"RangeStmt.Body", Field, 0}, + {"RangeStmt.For", Field, 0}, + {"RangeStmt.Key", Field, 0}, + {"RangeStmt.Range", Field, 20}, + {"RangeStmt.Tok", Field, 0}, + {"RangeStmt.TokPos", Field, 0}, + {"RangeStmt.Value", Field, 0}, + {"RangeStmt.X", Field, 0}, + {"ReturnStmt", Type, 0}, + {"ReturnStmt.Results", Field, 0}, + {"ReturnStmt.Return", Field, 0}, + {"SEND", Const, 0}, + {"Scope", Type, 0}, + {"Scope.Objects", Field, 0}, + {"Scope.Outer", Field, 0}, + {"SelectStmt", Type, 0}, + {"SelectStmt.Body", Field, 0}, + {"SelectStmt.Select", Field, 0}, + {"SelectorExpr", Type, 0}, + {"SelectorExpr.Sel", Field, 0}, + {"SelectorExpr.X", Field, 0}, + {"SendStmt", Type, 0}, + {"SendStmt.Arrow", Field, 0}, + {"SendStmt.Chan", Field, 0}, + {"SendStmt.Value", Field, 0}, + {"SliceExpr", Type, 0}, + {"SliceExpr.High", Field, 0}, + {"SliceExpr.Lbrack", Field, 0}, + {"SliceExpr.Low", Field, 0}, + {"SliceExpr.Max", Field, 2}, + {"SliceExpr.Rbrack", Field, 0}, + {"SliceExpr.Slice3", Field, 2}, + {"SliceExpr.X", Field, 0}, + {"SortImports", Func, 0}, + {"Spec", Type, 0}, + {"StarExpr", Type, 0}, + {"StarExpr.Star", Field, 0}, + {"StarExpr.X", Field, 0}, + {"Stmt", Type, 0}, + {"StructType", Type, 0}, + {"StructType.Fields", Field, 0}, + {"StructType.Incomplete", Field, 0}, + {"StructType.Struct", Field, 0}, + {"SwitchStmt", Type, 0}, + {"SwitchStmt.Body", Field, 0}, + {"SwitchStmt.Init", Field, 0}, + {"SwitchStmt.Switch", Field, 0}, + {"SwitchStmt.Tag", Field, 0}, + {"Typ", Const, 0}, + {"TypeAssertExpr", Type, 0}, + {"TypeAssertExpr.Lparen", Field, 2}, + {"TypeAssertExpr.Rparen", Field, 2}, + {"TypeAssertExpr.Type", Field, 0}, + {"TypeAssertExpr.X", Field, 0}, + {"TypeSpec", Type, 0}, + {"TypeSpec.Assign", Field, 9}, + {"TypeSpec.Comment", Field, 0}, + {"TypeSpec.Doc", Field, 0}, + {"TypeSpec.Name", Field, 0}, + {"TypeSpec.Type", Field, 0}, + {"TypeSpec.TypeParams", Field, 18}, + {"TypeSwitchStmt", Type, 0}, + {"TypeSwitchStmt.Assign", Field, 0}, + {"TypeSwitchStmt.Body", Field, 0}, + {"TypeSwitchStmt.Init", Field, 0}, + {"TypeSwitchStmt.Switch", Field, 0}, + {"UnaryExpr", Type, 0}, + {"UnaryExpr.Op", Field, 0}, + {"UnaryExpr.OpPos", Field, 0}, + {"UnaryExpr.X", Field, 0}, + {"Unparen", Func, 22}, + {"ValueSpec", Type, 0}, + {"ValueSpec.Comment", Field, 0}, + {"ValueSpec.Doc", Field, 0}, + {"ValueSpec.Names", Field, 0}, + {"ValueSpec.Type", Field, 0}, + {"ValueSpec.Values", Field, 0}, + {"Var", Const, 0}, + {"Visitor", Type, 0}, + {"Walk", Func, 0}, + }, + "go/build": { + {"(*Context).Import", Method, 0}, + {"(*Context).ImportDir", Method, 0}, + {"(*Context).MatchFile", Method, 2}, + {"(*Context).SrcDirs", Method, 0}, + {"(*MultiplePackageError).Error", Method, 4}, + {"(*NoGoError).Error", Method, 0}, + {"(*Package).IsCommand", Method, 0}, + {"AllowBinary", Const, 0}, + {"ArchChar", Func, 0}, + {"Context", Type, 0}, + {"Context.BuildTags", Field, 0}, + {"Context.CgoEnabled", Field, 0}, + {"Context.Compiler", Field, 0}, + {"Context.Dir", Field, 14}, + {"Context.GOARCH", Field, 0}, + {"Context.GOOS", Field, 0}, + {"Context.GOPATH", Field, 0}, + {"Context.GOROOT", Field, 0}, + {"Context.HasSubdir", Field, 0}, + {"Context.InstallSuffix", Field, 1}, + {"Context.IsAbsPath", Field, 0}, + {"Context.IsDir", Field, 0}, + {"Context.JoinPath", Field, 0}, + {"Context.OpenFile", Field, 0}, + {"Context.ReadDir", Field, 0}, + {"Context.ReleaseTags", Field, 1}, + {"Context.SplitPathList", Field, 0}, + {"Context.ToolTags", Field, 17}, + {"Context.UseAllFiles", Field, 0}, + {"Default", Var, 0}, + {"Directive", Type, 21}, + {"Directive.Pos", Field, 21}, + {"Directive.Text", Field, 21}, + {"FindOnly", Const, 0}, + {"IgnoreVendor", Const, 6}, + {"Import", Func, 0}, + {"ImportComment", Const, 4}, + {"ImportDir", Func, 0}, + {"ImportMode", Type, 0}, + {"IsLocalImport", Func, 0}, + {"MultiplePackageError", Type, 4}, + {"MultiplePackageError.Dir", Field, 4}, + {"MultiplePackageError.Files", Field, 4}, + {"MultiplePackageError.Packages", Field, 4}, + {"NoGoError", Type, 0}, + {"NoGoError.Dir", Field, 0}, + {"Package", Type, 0}, + {"Package.AllTags", Field, 2}, + {"Package.BinDir", Field, 0}, + {"Package.BinaryOnly", Field, 7}, + {"Package.CFiles", Field, 0}, + {"Package.CXXFiles", Field, 2}, + {"Package.CgoCFLAGS", Field, 0}, + {"Package.CgoCPPFLAGS", Field, 2}, + {"Package.CgoCXXFLAGS", Field, 2}, + {"Package.CgoFFLAGS", Field, 7}, + {"Package.CgoFiles", Field, 0}, + {"Package.CgoLDFLAGS", Field, 0}, + {"Package.CgoPkgConfig", Field, 0}, + {"Package.ConflictDir", Field, 2}, + {"Package.Dir", Field, 0}, + {"Package.Directives", Field, 21}, + {"Package.Doc", Field, 0}, + {"Package.EmbedPatternPos", Field, 16}, + {"Package.EmbedPatterns", Field, 16}, + {"Package.FFiles", Field, 7}, + {"Package.GoFiles", Field, 0}, + {"Package.Goroot", Field, 0}, + {"Package.HFiles", Field, 0}, + {"Package.IgnoredGoFiles", Field, 1}, + {"Package.IgnoredOtherFiles", Field, 16}, + {"Package.ImportComment", Field, 4}, + {"Package.ImportPath", Field, 0}, + {"Package.ImportPos", Field, 0}, + {"Package.Imports", Field, 0}, + {"Package.InvalidGoFiles", Field, 6}, + {"Package.MFiles", Field, 3}, + {"Package.Name", Field, 0}, + {"Package.PkgObj", Field, 0}, + {"Package.PkgRoot", Field, 0}, + {"Package.PkgTargetRoot", Field, 5}, + {"Package.Root", Field, 0}, + {"Package.SFiles", Field, 0}, + {"Package.SrcRoot", Field, 0}, + {"Package.SwigCXXFiles", Field, 1}, + {"Package.SwigFiles", Field, 1}, + {"Package.SysoFiles", Field, 0}, + {"Package.TestDirectives", Field, 21}, + {"Package.TestEmbedPatternPos", Field, 16}, + {"Package.TestEmbedPatterns", Field, 16}, + {"Package.TestGoFiles", Field, 0}, + {"Package.TestImportPos", Field, 0}, + {"Package.TestImports", Field, 0}, + {"Package.XTestDirectives", Field, 21}, + {"Package.XTestEmbedPatternPos", Field, 16}, + {"Package.XTestEmbedPatterns", Field, 16}, + {"Package.XTestGoFiles", Field, 0}, + {"Package.XTestImportPos", Field, 0}, + {"Package.XTestImports", Field, 0}, + {"ToolDir", Var, 0}, + }, + "go/build/constraint": { + {"(*AndExpr).Eval", Method, 16}, + {"(*AndExpr).String", Method, 16}, + {"(*NotExpr).Eval", Method, 16}, + {"(*NotExpr).String", Method, 16}, + {"(*OrExpr).Eval", Method, 16}, + {"(*OrExpr).String", Method, 16}, + {"(*SyntaxError).Error", Method, 16}, + {"(*TagExpr).Eval", Method, 16}, + {"(*TagExpr).String", Method, 16}, + {"AndExpr", Type, 16}, + {"AndExpr.X", Field, 16}, + {"AndExpr.Y", Field, 16}, + {"Expr", Type, 16}, + {"GoVersion", Func, 21}, + {"IsGoBuild", Func, 16}, + {"IsPlusBuild", Func, 16}, + {"NotExpr", Type, 16}, + {"NotExpr.X", Field, 16}, + {"OrExpr", Type, 16}, + {"OrExpr.X", Field, 16}, + {"OrExpr.Y", Field, 16}, + {"Parse", Func, 16}, + {"PlusBuildLines", Func, 16}, + {"SyntaxError", Type, 16}, + {"SyntaxError.Err", Field, 16}, + {"SyntaxError.Offset", Field, 16}, + {"TagExpr", Type, 16}, + {"TagExpr.Tag", Field, 16}, + }, + "go/constant": { + {"(Kind).String", Method, 18}, + {"BinaryOp", Func, 5}, + {"BitLen", Func, 5}, + {"Bool", Const, 5}, + {"BoolVal", Func, 5}, + {"Bytes", Func, 5}, + {"Compare", Func, 5}, + {"Complex", Const, 5}, + {"Denom", Func, 5}, + {"Float", Const, 5}, + {"Float32Val", Func, 5}, + {"Float64Val", Func, 5}, + {"Imag", Func, 5}, + {"Int", Const, 5}, + {"Int64Val", Func, 5}, + {"Kind", Type, 5}, + {"Make", Func, 13}, + {"MakeBool", Func, 5}, + {"MakeFloat64", Func, 5}, + {"MakeFromBytes", Func, 5}, + {"MakeFromLiteral", Func, 5}, + {"MakeImag", Func, 5}, + {"MakeInt64", Func, 5}, + {"MakeString", Func, 5}, + {"MakeUint64", Func, 5}, + {"MakeUnknown", Func, 5}, + {"Num", Func, 5}, + {"Real", Func, 5}, + {"Shift", Func, 5}, + {"Sign", Func, 5}, + {"String", Const, 5}, + {"StringVal", Func, 5}, + {"ToComplex", Func, 6}, + {"ToFloat", Func, 6}, + {"ToInt", Func, 6}, + {"Uint64Val", Func, 5}, + {"UnaryOp", Func, 5}, + {"Unknown", Const, 5}, + {"Val", Func, 13}, + {"Value", Type, 5}, + }, + "go/doc": { + {"(*Package).Filter", Method, 0}, + {"(*Package).HTML", Method, 19}, + {"(*Package).Markdown", Method, 19}, + {"(*Package).Parser", Method, 19}, + {"(*Package).Printer", Method, 19}, + {"(*Package).Synopsis", Method, 19}, + {"(*Package).Text", Method, 19}, + {"AllDecls", Const, 0}, + {"AllMethods", Const, 0}, + {"Example", Type, 0}, + {"Example.Code", Field, 0}, + {"Example.Comments", Field, 0}, + {"Example.Doc", Field, 0}, + {"Example.EmptyOutput", Field, 1}, + {"Example.Name", Field, 0}, + {"Example.Order", Field, 1}, + {"Example.Output", Field, 0}, + {"Example.Play", Field, 1}, + {"Example.Suffix", Field, 14}, + {"Example.Unordered", Field, 7}, + {"Examples", Func, 0}, + {"Filter", Type, 0}, + {"Func", Type, 0}, + {"Func.Decl", Field, 0}, + {"Func.Doc", Field, 0}, + {"Func.Examples", Field, 14}, + {"Func.Level", Field, 0}, + {"Func.Name", Field, 0}, + {"Func.Orig", Field, 0}, + {"Func.Recv", Field, 0}, + {"IllegalPrefixes", Var, 1}, + {"IsPredeclared", Func, 8}, + {"Mode", Type, 0}, + {"New", Func, 0}, + {"NewFromFiles", Func, 14}, + {"Note", Type, 1}, + {"Note.Body", Field, 1}, + {"Note.End", Field, 1}, + {"Note.Pos", Field, 1}, + {"Note.UID", Field, 1}, + {"Package", Type, 0}, + {"Package.Bugs", Field, 0}, + {"Package.Consts", Field, 0}, + {"Package.Doc", Field, 0}, + {"Package.Examples", Field, 14}, + {"Package.Filenames", Field, 0}, + {"Package.Funcs", Field, 0}, + {"Package.ImportPath", Field, 0}, + {"Package.Imports", Field, 0}, + {"Package.Name", Field, 0}, + {"Package.Notes", Field, 1}, + {"Package.Types", Field, 0}, + {"Package.Vars", Field, 0}, + {"PreserveAST", Const, 12}, + {"Synopsis", Func, 0}, + {"ToHTML", Func, 0}, + {"ToText", Func, 0}, + {"Type", Type, 0}, + {"Type.Consts", Field, 0}, + {"Type.Decl", Field, 0}, + {"Type.Doc", Field, 0}, + {"Type.Examples", Field, 14}, + {"Type.Funcs", Field, 0}, + {"Type.Methods", Field, 0}, + {"Type.Name", Field, 0}, + {"Type.Vars", Field, 0}, + {"Value", Type, 0}, + {"Value.Decl", Field, 0}, + {"Value.Doc", Field, 0}, + {"Value.Names", Field, 0}, + }, + "go/doc/comment": { + {"(*DocLink).DefaultURL", Method, 19}, + {"(*Heading).DefaultID", Method, 19}, + {"(*List).BlankBefore", Method, 19}, + {"(*List).BlankBetween", Method, 19}, + {"(*Parser).Parse", Method, 19}, + {"(*Printer).Comment", Method, 19}, + {"(*Printer).HTML", Method, 19}, + {"(*Printer).Markdown", Method, 19}, + {"(*Printer).Text", Method, 19}, + {"Block", Type, 19}, + {"Code", Type, 19}, + {"Code.Text", Field, 19}, + {"DefaultLookupPackage", Func, 19}, + {"Doc", Type, 19}, + {"Doc.Content", Field, 19}, + {"Doc.Links", Field, 19}, + {"DocLink", Type, 19}, + {"DocLink.ImportPath", Field, 19}, + {"DocLink.Name", Field, 19}, + {"DocLink.Recv", Field, 19}, + {"DocLink.Text", Field, 19}, + {"Heading", Type, 19}, + {"Heading.Text", Field, 19}, + {"Italic", Type, 19}, + {"Link", Type, 19}, + {"Link.Auto", Field, 19}, + {"Link.Text", Field, 19}, + {"Link.URL", Field, 19}, + {"LinkDef", Type, 19}, + {"LinkDef.Text", Field, 19}, + {"LinkDef.URL", Field, 19}, + {"LinkDef.Used", Field, 19}, + {"List", Type, 19}, + {"List.ForceBlankBefore", Field, 19}, + {"List.ForceBlankBetween", Field, 19}, + {"List.Items", Field, 19}, + {"ListItem", Type, 19}, + {"ListItem.Content", Field, 19}, + {"ListItem.Number", Field, 19}, + {"Paragraph", Type, 19}, + {"Paragraph.Text", Field, 19}, + {"Parser", Type, 19}, + {"Parser.LookupPackage", Field, 19}, + {"Parser.LookupSym", Field, 19}, + {"Parser.Words", Field, 19}, + {"Plain", Type, 19}, + {"Printer", Type, 19}, + {"Printer.DocLinkBaseURL", Field, 19}, + {"Printer.DocLinkURL", Field, 19}, + {"Printer.HeadingID", Field, 19}, + {"Printer.HeadingLevel", Field, 19}, + {"Printer.TextCodePrefix", Field, 19}, + {"Printer.TextPrefix", Field, 19}, + {"Printer.TextWidth", Field, 19}, + {"Text", Type, 19}, + }, + "go/format": { + {"Node", Func, 1}, + {"Source", Func, 1}, + }, + "go/importer": { + {"Default", Func, 5}, + {"For", Func, 5}, + {"ForCompiler", Func, 12}, + {"Lookup", Type, 5}, + }, + "go/parser": { + {"AllErrors", Const, 1}, + {"DeclarationErrors", Const, 0}, + {"ImportsOnly", Const, 0}, + {"Mode", Type, 0}, + {"PackageClauseOnly", Const, 0}, + {"ParseComments", Const, 0}, + {"ParseDir", Func, 0}, + {"ParseExpr", Func, 0}, + {"ParseExprFrom", Func, 5}, + {"ParseFile", Func, 0}, + {"SkipObjectResolution", Const, 17}, + {"SpuriousErrors", Const, 0}, + {"Trace", Const, 0}, + }, + "go/printer": { + {"(*Config).Fprint", Method, 0}, + {"CommentedNode", Type, 0}, + {"CommentedNode.Comments", Field, 0}, + {"CommentedNode.Node", Field, 0}, + {"Config", Type, 0}, + {"Config.Indent", Field, 1}, + {"Config.Mode", Field, 0}, + {"Config.Tabwidth", Field, 0}, + {"Fprint", Func, 0}, + {"Mode", Type, 0}, + {"RawFormat", Const, 0}, + {"SourcePos", Const, 0}, + {"TabIndent", Const, 0}, + {"UseSpaces", Const, 0}, + }, + "go/scanner": { + {"(*ErrorList).Add", Method, 0}, + {"(*ErrorList).RemoveMultiples", Method, 0}, + {"(*ErrorList).Reset", Method, 0}, + {"(*Scanner).Init", Method, 0}, + {"(*Scanner).Scan", Method, 0}, + {"(Error).Error", Method, 0}, + {"(ErrorList).Err", Method, 0}, + {"(ErrorList).Error", Method, 0}, + {"(ErrorList).Len", Method, 0}, + {"(ErrorList).Less", Method, 0}, + {"(ErrorList).Sort", Method, 0}, + {"(ErrorList).Swap", Method, 0}, + {"Error", Type, 0}, + {"Error.Msg", Field, 0}, + {"Error.Pos", Field, 0}, + {"ErrorHandler", Type, 0}, + {"ErrorList", Type, 0}, + {"Mode", Type, 0}, + {"PrintError", Func, 0}, + {"ScanComments", Const, 0}, + {"Scanner", Type, 0}, + {"Scanner.ErrorCount", Field, 0}, + }, + "go/token": { + {"(*File).AddLine", Method, 0}, + {"(*File).AddLineColumnInfo", Method, 11}, + {"(*File).AddLineInfo", Method, 0}, + {"(*File).Base", Method, 0}, + {"(*File).Line", Method, 0}, + {"(*File).LineCount", Method, 0}, + {"(*File).LineStart", Method, 12}, + {"(*File).Lines", Method, 21}, + {"(*File).MergeLine", Method, 2}, + {"(*File).Name", Method, 0}, + {"(*File).Offset", Method, 0}, + {"(*File).Pos", Method, 0}, + {"(*File).Position", Method, 0}, + {"(*File).PositionFor", Method, 4}, + {"(*File).SetLines", Method, 0}, + {"(*File).SetLinesForContent", Method, 0}, + {"(*File).Size", Method, 0}, + {"(*FileSet).AddFile", Method, 0}, + {"(*FileSet).Base", Method, 0}, + {"(*FileSet).File", Method, 0}, + {"(*FileSet).Iterate", Method, 0}, + {"(*FileSet).Position", Method, 0}, + {"(*FileSet).PositionFor", Method, 4}, + {"(*FileSet).Read", Method, 0}, + {"(*FileSet).RemoveFile", Method, 20}, + {"(*FileSet).Write", Method, 0}, + {"(*Position).IsValid", Method, 0}, + {"(Pos).IsValid", Method, 0}, + {"(Position).String", Method, 0}, + {"(Token).IsKeyword", Method, 0}, + {"(Token).IsLiteral", Method, 0}, + {"(Token).IsOperator", Method, 0}, + {"(Token).Precedence", Method, 0}, + {"(Token).String", Method, 0}, + {"ADD", Const, 0}, + {"ADD_ASSIGN", Const, 0}, + {"AND", Const, 0}, + {"AND_ASSIGN", Const, 0}, + {"AND_NOT", Const, 0}, + {"AND_NOT_ASSIGN", Const, 0}, + {"ARROW", Const, 0}, + {"ASSIGN", Const, 0}, + {"BREAK", Const, 0}, + {"CASE", Const, 0}, + {"CHAN", Const, 0}, + {"CHAR", Const, 0}, + {"COLON", Const, 0}, + {"COMMA", Const, 0}, + {"COMMENT", Const, 0}, + {"CONST", Const, 0}, + {"CONTINUE", Const, 0}, + {"DEC", Const, 0}, + {"DEFAULT", Const, 0}, + {"DEFER", Const, 0}, + {"DEFINE", Const, 0}, + {"ELLIPSIS", Const, 0}, + {"ELSE", Const, 0}, + {"EOF", Const, 0}, + {"EQL", Const, 0}, + {"FALLTHROUGH", Const, 0}, + {"FLOAT", Const, 0}, + {"FOR", Const, 0}, + {"FUNC", Const, 0}, + {"File", Type, 0}, + {"FileSet", Type, 0}, + {"GEQ", Const, 0}, + {"GO", Const, 0}, + {"GOTO", Const, 0}, + {"GTR", Const, 0}, + {"HighestPrec", Const, 0}, + {"IDENT", Const, 0}, + {"IF", Const, 0}, + {"ILLEGAL", Const, 0}, + {"IMAG", Const, 0}, + {"IMPORT", Const, 0}, + {"INC", Const, 0}, + {"INT", Const, 0}, + {"INTERFACE", Const, 0}, + {"IsExported", Func, 13}, + {"IsIdentifier", Func, 13}, + {"IsKeyword", Func, 13}, + {"LAND", Const, 0}, + {"LBRACE", Const, 0}, + {"LBRACK", Const, 0}, + {"LEQ", Const, 0}, + {"LOR", Const, 0}, + {"LPAREN", Const, 0}, + {"LSS", Const, 0}, + {"Lookup", Func, 0}, + {"LowestPrec", Const, 0}, + {"MAP", Const, 0}, + {"MUL", Const, 0}, + {"MUL_ASSIGN", Const, 0}, + {"NEQ", Const, 0}, + {"NOT", Const, 0}, + {"NewFileSet", Func, 0}, + {"NoPos", Const, 0}, + {"OR", Const, 0}, + {"OR_ASSIGN", Const, 0}, + {"PACKAGE", Const, 0}, + {"PERIOD", Const, 0}, + {"Pos", Type, 0}, + {"Position", Type, 0}, + {"Position.Column", Field, 0}, + {"Position.Filename", Field, 0}, + {"Position.Line", Field, 0}, + {"Position.Offset", Field, 0}, + {"QUO", Const, 0}, + {"QUO_ASSIGN", Const, 0}, + {"RANGE", Const, 0}, + {"RBRACE", Const, 0}, + {"RBRACK", Const, 0}, + {"REM", Const, 0}, + {"REM_ASSIGN", Const, 0}, + {"RETURN", Const, 0}, + {"RPAREN", Const, 0}, + {"SELECT", Const, 0}, + {"SEMICOLON", Const, 0}, + {"SHL", Const, 0}, + {"SHL_ASSIGN", Const, 0}, + {"SHR", Const, 0}, + {"SHR_ASSIGN", Const, 0}, + {"STRING", Const, 0}, + {"STRUCT", Const, 0}, + {"SUB", Const, 0}, + {"SUB_ASSIGN", Const, 0}, + {"SWITCH", Const, 0}, + {"TILDE", Const, 18}, + {"TYPE", Const, 0}, + {"Token", Type, 0}, + {"UnaryPrec", Const, 0}, + {"VAR", Const, 0}, + {"XOR", Const, 0}, + {"XOR_ASSIGN", Const, 0}, + }, + "go/types": { + {"(*Alias).Obj", Method, 22}, + {"(*Alias).String", Method, 22}, + {"(*Alias).Underlying", Method, 22}, + {"(*ArgumentError).Error", Method, 18}, + {"(*ArgumentError).Unwrap", Method, 18}, + {"(*Array).Elem", Method, 5}, + {"(*Array).Len", Method, 5}, + {"(*Array).String", Method, 5}, + {"(*Array).Underlying", Method, 5}, + {"(*Basic).Info", Method, 5}, + {"(*Basic).Kind", Method, 5}, + {"(*Basic).Name", Method, 5}, + {"(*Basic).String", Method, 5}, + {"(*Basic).Underlying", Method, 5}, + {"(*Builtin).Exported", Method, 5}, + {"(*Builtin).Id", Method, 5}, + {"(*Builtin).Name", Method, 5}, + {"(*Builtin).Parent", Method, 5}, + {"(*Builtin).Pkg", Method, 5}, + {"(*Builtin).Pos", Method, 5}, + {"(*Builtin).String", Method, 5}, + {"(*Builtin).Type", Method, 5}, + {"(*Chan).Dir", Method, 5}, + {"(*Chan).Elem", Method, 5}, + {"(*Chan).String", Method, 5}, + {"(*Chan).Underlying", Method, 5}, + {"(*Checker).Files", Method, 5}, + {"(*Config).Check", Method, 5}, + {"(*Const).Exported", Method, 5}, + {"(*Const).Id", Method, 5}, + {"(*Const).Name", Method, 5}, + {"(*Const).Parent", Method, 5}, + {"(*Const).Pkg", Method, 5}, + {"(*Const).Pos", Method, 5}, + {"(*Const).String", Method, 5}, + {"(*Const).Type", Method, 5}, + {"(*Const).Val", Method, 5}, + {"(*Func).Exported", Method, 5}, + {"(*Func).FullName", Method, 5}, + {"(*Func).Id", Method, 5}, + {"(*Func).Name", Method, 5}, + {"(*Func).Origin", Method, 19}, + {"(*Func).Parent", Method, 5}, + {"(*Func).Pkg", Method, 5}, + {"(*Func).Pos", Method, 5}, + {"(*Func).Scope", Method, 5}, + {"(*Func).String", Method, 5}, + {"(*Func).Type", Method, 5}, + {"(*Info).ObjectOf", Method, 5}, + {"(*Info).PkgNameOf", Method, 22}, + {"(*Info).TypeOf", Method, 5}, + {"(*Initializer).String", Method, 5}, + {"(*Interface).Complete", Method, 5}, + {"(*Interface).Embedded", Method, 5}, + {"(*Interface).EmbeddedType", Method, 11}, + {"(*Interface).Empty", Method, 5}, + {"(*Interface).ExplicitMethod", Method, 5}, + {"(*Interface).IsComparable", Method, 18}, + {"(*Interface).IsImplicit", Method, 18}, + {"(*Interface).IsMethodSet", Method, 18}, + {"(*Interface).MarkImplicit", Method, 18}, + {"(*Interface).Method", Method, 5}, + {"(*Interface).NumEmbeddeds", Method, 5}, + {"(*Interface).NumExplicitMethods", Method, 5}, + {"(*Interface).NumMethods", Method, 5}, + {"(*Interface).String", Method, 5}, + {"(*Interface).Underlying", Method, 5}, + {"(*Label).Exported", Method, 5}, + {"(*Label).Id", Method, 5}, + {"(*Label).Name", Method, 5}, + {"(*Label).Parent", Method, 5}, + {"(*Label).Pkg", Method, 5}, + {"(*Label).Pos", Method, 5}, + {"(*Label).String", Method, 5}, + {"(*Label).Type", Method, 5}, + {"(*Map).Elem", Method, 5}, + {"(*Map).Key", Method, 5}, + {"(*Map).String", Method, 5}, + {"(*Map).Underlying", Method, 5}, + {"(*MethodSet).At", Method, 5}, + {"(*MethodSet).Len", Method, 5}, + {"(*MethodSet).Lookup", Method, 5}, + {"(*MethodSet).String", Method, 5}, + {"(*Named).AddMethod", Method, 5}, + {"(*Named).Method", Method, 5}, + {"(*Named).NumMethods", Method, 5}, + {"(*Named).Obj", Method, 5}, + {"(*Named).Origin", Method, 18}, + {"(*Named).SetTypeParams", Method, 18}, + {"(*Named).SetUnderlying", Method, 5}, + {"(*Named).String", Method, 5}, + {"(*Named).TypeArgs", Method, 18}, + {"(*Named).TypeParams", Method, 18}, + {"(*Named).Underlying", Method, 5}, + {"(*Nil).Exported", Method, 5}, + {"(*Nil).Id", Method, 5}, + {"(*Nil).Name", Method, 5}, + {"(*Nil).Parent", Method, 5}, + {"(*Nil).Pkg", Method, 5}, + {"(*Nil).Pos", Method, 5}, + {"(*Nil).String", Method, 5}, + {"(*Nil).Type", Method, 5}, + {"(*Package).Complete", Method, 5}, + {"(*Package).GoVersion", Method, 21}, + {"(*Package).Imports", Method, 5}, + {"(*Package).MarkComplete", Method, 5}, + {"(*Package).Name", Method, 5}, + {"(*Package).Path", Method, 5}, + {"(*Package).Scope", Method, 5}, + {"(*Package).SetImports", Method, 5}, + {"(*Package).SetName", Method, 6}, + {"(*Package).String", Method, 5}, + {"(*PkgName).Exported", Method, 5}, + {"(*PkgName).Id", Method, 5}, + {"(*PkgName).Imported", Method, 5}, + {"(*PkgName).Name", Method, 5}, + {"(*PkgName).Parent", Method, 5}, + {"(*PkgName).Pkg", Method, 5}, + {"(*PkgName).Pos", Method, 5}, + {"(*PkgName).String", Method, 5}, + {"(*PkgName).Type", Method, 5}, + {"(*Pointer).Elem", Method, 5}, + {"(*Pointer).String", Method, 5}, + {"(*Pointer).Underlying", Method, 5}, + {"(*Scope).Child", Method, 5}, + {"(*Scope).Contains", Method, 5}, + {"(*Scope).End", Method, 5}, + {"(*Scope).Innermost", Method, 5}, + {"(*Scope).Insert", Method, 5}, + {"(*Scope).Len", Method, 5}, + {"(*Scope).Lookup", Method, 5}, + {"(*Scope).LookupParent", Method, 5}, + {"(*Scope).Names", Method, 5}, + {"(*Scope).NumChildren", Method, 5}, + {"(*Scope).Parent", Method, 5}, + {"(*Scope).Pos", Method, 5}, + {"(*Scope).String", Method, 5}, + {"(*Scope).WriteTo", Method, 5}, + {"(*Selection).Index", Method, 5}, + {"(*Selection).Indirect", Method, 5}, + {"(*Selection).Kind", Method, 5}, + {"(*Selection).Obj", Method, 5}, + {"(*Selection).Recv", Method, 5}, + {"(*Selection).String", Method, 5}, + {"(*Selection).Type", Method, 5}, + {"(*Signature).Params", Method, 5}, + {"(*Signature).Recv", Method, 5}, + {"(*Signature).RecvTypeParams", Method, 18}, + {"(*Signature).Results", Method, 5}, + {"(*Signature).String", Method, 5}, + {"(*Signature).TypeParams", Method, 18}, + {"(*Signature).Underlying", Method, 5}, + {"(*Signature).Variadic", Method, 5}, + {"(*Slice).Elem", Method, 5}, + {"(*Slice).String", Method, 5}, + {"(*Slice).Underlying", Method, 5}, + {"(*StdSizes).Alignof", Method, 5}, + {"(*StdSizes).Offsetsof", Method, 5}, + {"(*StdSizes).Sizeof", Method, 5}, + {"(*Struct).Field", Method, 5}, + {"(*Struct).NumFields", Method, 5}, + {"(*Struct).String", Method, 5}, + {"(*Struct).Tag", Method, 5}, + {"(*Struct).Underlying", Method, 5}, + {"(*Term).String", Method, 18}, + {"(*Term).Tilde", Method, 18}, + {"(*Term).Type", Method, 18}, + {"(*Tuple).At", Method, 5}, + {"(*Tuple).Len", Method, 5}, + {"(*Tuple).String", Method, 5}, + {"(*Tuple).Underlying", Method, 5}, + {"(*TypeList).At", Method, 18}, + {"(*TypeList).Len", Method, 18}, + {"(*TypeName).Exported", Method, 5}, + {"(*TypeName).Id", Method, 5}, + {"(*TypeName).IsAlias", Method, 9}, + {"(*TypeName).Name", Method, 5}, + {"(*TypeName).Parent", Method, 5}, + {"(*TypeName).Pkg", Method, 5}, + {"(*TypeName).Pos", Method, 5}, + {"(*TypeName).String", Method, 5}, + {"(*TypeName).Type", Method, 5}, + {"(*TypeParam).Constraint", Method, 18}, + {"(*TypeParam).Index", Method, 18}, + {"(*TypeParam).Obj", Method, 18}, + {"(*TypeParam).SetConstraint", Method, 18}, + {"(*TypeParam).String", Method, 18}, + {"(*TypeParam).Underlying", Method, 18}, + {"(*TypeParamList).At", Method, 18}, + {"(*TypeParamList).Len", Method, 18}, + {"(*Union).Len", Method, 18}, + {"(*Union).String", Method, 18}, + {"(*Union).Term", Method, 18}, + {"(*Union).Underlying", Method, 18}, + {"(*Var).Anonymous", Method, 5}, + {"(*Var).Embedded", Method, 11}, + {"(*Var).Exported", Method, 5}, + {"(*Var).Id", Method, 5}, + {"(*Var).IsField", Method, 5}, + {"(*Var).Name", Method, 5}, + {"(*Var).Origin", Method, 19}, + {"(*Var).Parent", Method, 5}, + {"(*Var).Pkg", Method, 5}, + {"(*Var).Pos", Method, 5}, + {"(*Var).String", Method, 5}, + {"(*Var).Type", Method, 5}, + {"(Checker).ObjectOf", Method, 5}, + {"(Checker).PkgNameOf", Method, 22}, + {"(Checker).TypeOf", Method, 5}, + {"(Error).Error", Method, 5}, + {"(TypeAndValue).Addressable", Method, 5}, + {"(TypeAndValue).Assignable", Method, 5}, + {"(TypeAndValue).HasOk", Method, 5}, + {"(TypeAndValue).IsBuiltin", Method, 5}, + {"(TypeAndValue).IsNil", Method, 5}, + {"(TypeAndValue).IsType", Method, 5}, + {"(TypeAndValue).IsValue", Method, 5}, + {"(TypeAndValue).IsVoid", Method, 5}, + {"Alias", Type, 22}, + {"ArgumentError", Type, 18}, + {"ArgumentError.Err", Field, 18}, + {"ArgumentError.Index", Field, 18}, + {"Array", Type, 5}, + {"AssertableTo", Func, 5}, + {"AssignableTo", Func, 5}, + {"Basic", Type, 5}, + {"BasicInfo", Type, 5}, + {"BasicKind", Type, 5}, + {"Bool", Const, 5}, + {"Builtin", Type, 5}, + {"Byte", Const, 5}, + {"Chan", Type, 5}, + {"ChanDir", Type, 5}, + {"CheckExpr", Func, 13}, + {"Checker", Type, 5}, + {"Checker.Info", Field, 5}, + {"Comparable", Func, 5}, + {"Complex128", Const, 5}, + {"Complex64", Const, 5}, + {"Config", Type, 5}, + {"Config.Context", Field, 18}, + {"Config.DisableUnusedImportCheck", Field, 5}, + {"Config.Error", Field, 5}, + {"Config.FakeImportC", Field, 5}, + {"Config.GoVersion", Field, 18}, + {"Config.IgnoreFuncBodies", Field, 5}, + {"Config.Importer", Field, 5}, + {"Config.Sizes", Field, 5}, + {"Const", Type, 5}, + {"Context", Type, 18}, + {"ConvertibleTo", Func, 5}, + {"DefPredeclaredTestFuncs", Func, 5}, + {"Default", Func, 8}, + {"Error", Type, 5}, + {"Error.Fset", Field, 5}, + {"Error.Msg", Field, 5}, + {"Error.Pos", Field, 5}, + {"Error.Soft", Field, 5}, + {"Eval", Func, 5}, + {"ExprString", Func, 5}, + {"FieldVal", Const, 5}, + {"Float32", Const, 5}, + {"Float64", Const, 5}, + {"Func", Type, 5}, + {"Id", Func, 5}, + {"Identical", Func, 5}, + {"IdenticalIgnoreTags", Func, 8}, + {"Implements", Func, 5}, + {"ImportMode", Type, 6}, + {"Importer", Type, 5}, + {"ImporterFrom", Type, 6}, + {"Info", Type, 5}, + {"Info.Defs", Field, 5}, + {"Info.FileVersions", Field, 22}, + {"Info.Implicits", Field, 5}, + {"Info.InitOrder", Field, 5}, + {"Info.Instances", Field, 18}, + {"Info.Scopes", Field, 5}, + {"Info.Selections", Field, 5}, + {"Info.Types", Field, 5}, + {"Info.Uses", Field, 5}, + {"Initializer", Type, 5}, + {"Initializer.Lhs", Field, 5}, + {"Initializer.Rhs", Field, 5}, + {"Instance", Type, 18}, + {"Instance.Type", Field, 18}, + {"Instance.TypeArgs", Field, 18}, + {"Instantiate", Func, 18}, + {"Int", Const, 5}, + {"Int16", Const, 5}, + {"Int32", Const, 5}, + {"Int64", Const, 5}, + {"Int8", Const, 5}, + {"Interface", Type, 5}, + {"Invalid", Const, 5}, + {"IsBoolean", Const, 5}, + {"IsComplex", Const, 5}, + {"IsConstType", Const, 5}, + {"IsFloat", Const, 5}, + {"IsInteger", Const, 5}, + {"IsInterface", Func, 5}, + {"IsNumeric", Const, 5}, + {"IsOrdered", Const, 5}, + {"IsString", Const, 5}, + {"IsUnsigned", Const, 5}, + {"IsUntyped", Const, 5}, + {"Label", Type, 5}, + {"LookupFieldOrMethod", Func, 5}, + {"Map", Type, 5}, + {"MethodExpr", Const, 5}, + {"MethodSet", Type, 5}, + {"MethodVal", Const, 5}, + {"MissingMethod", Func, 5}, + {"Named", Type, 5}, + {"NewAlias", Func, 22}, + {"NewArray", Func, 5}, + {"NewChan", Func, 5}, + {"NewChecker", Func, 5}, + {"NewConst", Func, 5}, + {"NewContext", Func, 18}, + {"NewField", Func, 5}, + {"NewFunc", Func, 5}, + {"NewInterface", Func, 5}, + {"NewInterfaceType", Func, 11}, + {"NewLabel", Func, 5}, + {"NewMap", Func, 5}, + {"NewMethodSet", Func, 5}, + {"NewNamed", Func, 5}, + {"NewPackage", Func, 5}, + {"NewParam", Func, 5}, + {"NewPkgName", Func, 5}, + {"NewPointer", Func, 5}, + {"NewScope", Func, 5}, + {"NewSignature", Func, 5}, + {"NewSignatureType", Func, 18}, + {"NewSlice", Func, 5}, + {"NewStruct", Func, 5}, + {"NewTerm", Func, 18}, + {"NewTuple", Func, 5}, + {"NewTypeName", Func, 5}, + {"NewTypeParam", Func, 18}, + {"NewUnion", Func, 18}, + {"NewVar", Func, 5}, + {"Nil", Type, 5}, + {"Object", Type, 5}, + {"ObjectString", Func, 5}, + {"Package", Type, 5}, + {"PkgName", Type, 5}, + {"Pointer", Type, 5}, + {"Qualifier", Type, 5}, + {"RecvOnly", Const, 5}, + {"RelativeTo", Func, 5}, + {"Rune", Const, 5}, + {"Satisfies", Func, 20}, + {"Scope", Type, 5}, + {"Selection", Type, 5}, + {"SelectionKind", Type, 5}, + {"SelectionString", Func, 5}, + {"SendOnly", Const, 5}, + {"SendRecv", Const, 5}, + {"Signature", Type, 5}, + {"Sizes", Type, 5}, + {"SizesFor", Func, 9}, + {"Slice", Type, 5}, + {"StdSizes", Type, 5}, + {"StdSizes.MaxAlign", Field, 5}, + {"StdSizes.WordSize", Field, 5}, + {"String", Const, 5}, + {"Struct", Type, 5}, + {"Term", Type, 18}, + {"Tuple", Type, 5}, + {"Typ", Var, 5}, + {"Type", Type, 5}, + {"TypeAndValue", Type, 5}, + {"TypeAndValue.Type", Field, 5}, + {"TypeAndValue.Value", Field, 5}, + {"TypeList", Type, 18}, + {"TypeName", Type, 5}, + {"TypeParam", Type, 18}, + {"TypeParamList", Type, 18}, + {"TypeString", Func, 5}, + {"Uint", Const, 5}, + {"Uint16", Const, 5}, + {"Uint32", Const, 5}, + {"Uint64", Const, 5}, + {"Uint8", Const, 5}, + {"Uintptr", Const, 5}, + {"Unalias", Func, 22}, + {"Union", Type, 18}, + {"Universe", Var, 5}, + {"Unsafe", Var, 5}, + {"UnsafePointer", Const, 5}, + {"UntypedBool", Const, 5}, + {"UntypedComplex", Const, 5}, + {"UntypedFloat", Const, 5}, + {"UntypedInt", Const, 5}, + {"UntypedNil", Const, 5}, + {"UntypedRune", Const, 5}, + {"UntypedString", Const, 5}, + {"Var", Type, 5}, + {"WriteExpr", Func, 5}, + {"WriteSignature", Func, 5}, + {"WriteType", Func, 5}, + }, + "go/version": { + {"Compare", Func, 22}, + {"IsValid", Func, 22}, + {"Lang", Func, 22}, + }, + "hash": { + {"Hash", Type, 0}, + {"Hash32", Type, 0}, + {"Hash64", Type, 0}, + }, + "hash/adler32": { + {"Checksum", Func, 0}, + {"New", Func, 0}, + {"Size", Const, 0}, + }, + "hash/crc32": { + {"Castagnoli", Const, 0}, + {"Checksum", Func, 0}, + {"ChecksumIEEE", Func, 0}, + {"IEEE", Const, 0}, + {"IEEETable", Var, 0}, + {"Koopman", Const, 0}, + {"MakeTable", Func, 0}, + {"New", Func, 0}, + {"NewIEEE", Func, 0}, + {"Size", Const, 0}, + {"Table", Type, 0}, + {"Update", Func, 0}, + }, + "hash/crc64": { + {"Checksum", Func, 0}, + {"ECMA", Const, 0}, + {"ISO", Const, 0}, + {"MakeTable", Func, 0}, + {"New", Func, 0}, + {"Size", Const, 0}, + {"Table", Type, 0}, + {"Update", Func, 0}, + }, + "hash/fnv": { + {"New128", Func, 9}, + {"New128a", Func, 9}, + {"New32", Func, 0}, + {"New32a", Func, 0}, + {"New64", Func, 0}, + {"New64a", Func, 0}, + }, + "hash/maphash": { + {"(*Hash).BlockSize", Method, 14}, + {"(*Hash).Reset", Method, 14}, + {"(*Hash).Seed", Method, 14}, + {"(*Hash).SetSeed", Method, 14}, + {"(*Hash).Size", Method, 14}, + {"(*Hash).Sum", Method, 14}, + {"(*Hash).Sum64", Method, 14}, + {"(*Hash).Write", Method, 14}, + {"(*Hash).WriteByte", Method, 14}, + {"(*Hash).WriteString", Method, 14}, + {"Bytes", Func, 19}, + {"Hash", Type, 14}, + {"MakeSeed", Func, 14}, + {"Seed", Type, 14}, + {"String", Func, 19}, + }, + "html": { + {"EscapeString", Func, 0}, + {"UnescapeString", Func, 0}, + }, + "html/template": { + {"(*Error).Error", Method, 0}, + {"(*Template).AddParseTree", Method, 0}, + {"(*Template).Clone", Method, 0}, + {"(*Template).DefinedTemplates", Method, 6}, + {"(*Template).Delims", Method, 0}, + {"(*Template).Execute", Method, 0}, + {"(*Template).ExecuteTemplate", Method, 0}, + {"(*Template).Funcs", Method, 0}, + {"(*Template).Lookup", Method, 0}, + {"(*Template).Name", Method, 0}, + {"(*Template).New", Method, 0}, + {"(*Template).Option", Method, 5}, + {"(*Template).Parse", Method, 0}, + {"(*Template).ParseFS", Method, 16}, + {"(*Template).ParseFiles", Method, 0}, + {"(*Template).ParseGlob", Method, 0}, + {"(*Template).Templates", Method, 0}, + {"CSS", Type, 0}, + {"ErrAmbigContext", Const, 0}, + {"ErrBadHTML", Const, 0}, + {"ErrBranchEnd", Const, 0}, + {"ErrEndContext", Const, 0}, + {"ErrJSTemplate", Const, 21}, + {"ErrNoSuchTemplate", Const, 0}, + {"ErrOutputContext", Const, 0}, + {"ErrPartialCharset", Const, 0}, + {"ErrPartialEscape", Const, 0}, + {"ErrPredefinedEscaper", Const, 9}, + {"ErrRangeLoopReentry", Const, 0}, + {"ErrSlashAmbig", Const, 0}, + {"Error", Type, 0}, + {"Error.Description", Field, 0}, + {"Error.ErrorCode", Field, 0}, + {"Error.Line", Field, 0}, + {"Error.Name", Field, 0}, + {"Error.Node", Field, 4}, + {"ErrorCode", Type, 0}, + {"FuncMap", Type, 0}, + {"HTML", Type, 0}, + {"HTMLAttr", Type, 0}, + {"HTMLEscape", Func, 0}, + {"HTMLEscapeString", Func, 0}, + {"HTMLEscaper", Func, 0}, + {"IsTrue", Func, 6}, + {"JS", Type, 0}, + {"JSEscape", Func, 0}, + {"JSEscapeString", Func, 0}, + {"JSEscaper", Func, 0}, + {"JSStr", Type, 0}, + {"Must", Func, 0}, + {"New", Func, 0}, + {"OK", Const, 0}, + {"ParseFS", Func, 16}, + {"ParseFiles", Func, 0}, + {"ParseGlob", Func, 0}, + {"Srcset", Type, 10}, + {"Template", Type, 0}, + {"Template.Tree", Field, 2}, + {"URL", Type, 0}, + {"URLQueryEscaper", Func, 0}, + }, + "image": { + {"(*Alpha).AlphaAt", Method, 4}, + {"(*Alpha).At", Method, 0}, + {"(*Alpha).Bounds", Method, 0}, + {"(*Alpha).ColorModel", Method, 0}, + {"(*Alpha).Opaque", Method, 0}, + {"(*Alpha).PixOffset", Method, 0}, + {"(*Alpha).RGBA64At", Method, 17}, + {"(*Alpha).Set", Method, 0}, + {"(*Alpha).SetAlpha", Method, 0}, + {"(*Alpha).SetRGBA64", Method, 17}, + {"(*Alpha).SubImage", Method, 0}, + {"(*Alpha16).Alpha16At", Method, 4}, + {"(*Alpha16).At", Method, 0}, + {"(*Alpha16).Bounds", Method, 0}, + {"(*Alpha16).ColorModel", Method, 0}, + {"(*Alpha16).Opaque", Method, 0}, + {"(*Alpha16).PixOffset", Method, 0}, + {"(*Alpha16).RGBA64At", Method, 17}, + {"(*Alpha16).Set", Method, 0}, + {"(*Alpha16).SetAlpha16", Method, 0}, + {"(*Alpha16).SetRGBA64", Method, 17}, + {"(*Alpha16).SubImage", Method, 0}, + {"(*CMYK).At", Method, 5}, + {"(*CMYK).Bounds", Method, 5}, + {"(*CMYK).CMYKAt", Method, 5}, + {"(*CMYK).ColorModel", Method, 5}, + {"(*CMYK).Opaque", Method, 5}, + {"(*CMYK).PixOffset", Method, 5}, + {"(*CMYK).RGBA64At", Method, 17}, + {"(*CMYK).Set", Method, 5}, + {"(*CMYK).SetCMYK", Method, 5}, + {"(*CMYK).SetRGBA64", Method, 17}, + {"(*CMYK).SubImage", Method, 5}, + {"(*Gray).At", Method, 0}, + {"(*Gray).Bounds", Method, 0}, + {"(*Gray).ColorModel", Method, 0}, + {"(*Gray).GrayAt", Method, 4}, + {"(*Gray).Opaque", Method, 0}, + {"(*Gray).PixOffset", Method, 0}, + {"(*Gray).RGBA64At", Method, 17}, + {"(*Gray).Set", Method, 0}, + {"(*Gray).SetGray", Method, 0}, + {"(*Gray).SetRGBA64", Method, 17}, + {"(*Gray).SubImage", Method, 0}, + {"(*Gray16).At", Method, 0}, + {"(*Gray16).Bounds", Method, 0}, + {"(*Gray16).ColorModel", Method, 0}, + {"(*Gray16).Gray16At", Method, 4}, + {"(*Gray16).Opaque", Method, 0}, + {"(*Gray16).PixOffset", Method, 0}, + {"(*Gray16).RGBA64At", Method, 17}, + {"(*Gray16).Set", Method, 0}, + {"(*Gray16).SetGray16", Method, 0}, + {"(*Gray16).SetRGBA64", Method, 17}, + {"(*Gray16).SubImage", Method, 0}, + {"(*NRGBA).At", Method, 0}, + {"(*NRGBA).Bounds", Method, 0}, + {"(*NRGBA).ColorModel", Method, 0}, + {"(*NRGBA).NRGBAAt", Method, 4}, + {"(*NRGBA).Opaque", Method, 0}, + {"(*NRGBA).PixOffset", Method, 0}, + {"(*NRGBA).RGBA64At", Method, 17}, + {"(*NRGBA).Set", Method, 0}, + {"(*NRGBA).SetNRGBA", Method, 0}, + {"(*NRGBA).SetRGBA64", Method, 17}, + {"(*NRGBA).SubImage", Method, 0}, + {"(*NRGBA64).At", Method, 0}, + {"(*NRGBA64).Bounds", Method, 0}, + {"(*NRGBA64).ColorModel", Method, 0}, + {"(*NRGBA64).NRGBA64At", Method, 4}, + {"(*NRGBA64).Opaque", Method, 0}, + {"(*NRGBA64).PixOffset", Method, 0}, + {"(*NRGBA64).RGBA64At", Method, 17}, + {"(*NRGBA64).Set", Method, 0}, + {"(*NRGBA64).SetNRGBA64", Method, 0}, + {"(*NRGBA64).SetRGBA64", Method, 17}, + {"(*NRGBA64).SubImage", Method, 0}, + {"(*NYCbCrA).AOffset", Method, 6}, + {"(*NYCbCrA).At", Method, 6}, + {"(*NYCbCrA).Bounds", Method, 6}, + {"(*NYCbCrA).COffset", Method, 6}, + {"(*NYCbCrA).ColorModel", Method, 6}, + {"(*NYCbCrA).NYCbCrAAt", Method, 6}, + {"(*NYCbCrA).Opaque", Method, 6}, + {"(*NYCbCrA).RGBA64At", Method, 17}, + {"(*NYCbCrA).SubImage", Method, 6}, + {"(*NYCbCrA).YCbCrAt", Method, 6}, + {"(*NYCbCrA).YOffset", Method, 6}, + {"(*Paletted).At", Method, 0}, + {"(*Paletted).Bounds", Method, 0}, + {"(*Paletted).ColorIndexAt", Method, 0}, + {"(*Paletted).ColorModel", Method, 0}, + {"(*Paletted).Opaque", Method, 0}, + {"(*Paletted).PixOffset", Method, 0}, + {"(*Paletted).RGBA64At", Method, 17}, + {"(*Paletted).Set", Method, 0}, + {"(*Paletted).SetColorIndex", Method, 0}, + {"(*Paletted).SetRGBA64", Method, 17}, + {"(*Paletted).SubImage", Method, 0}, + {"(*RGBA).At", Method, 0}, + {"(*RGBA).Bounds", Method, 0}, + {"(*RGBA).ColorModel", Method, 0}, + {"(*RGBA).Opaque", Method, 0}, + {"(*RGBA).PixOffset", Method, 0}, + {"(*RGBA).RGBA64At", Method, 17}, + {"(*RGBA).RGBAAt", Method, 4}, + {"(*RGBA).Set", Method, 0}, + {"(*RGBA).SetRGBA", Method, 0}, + {"(*RGBA).SetRGBA64", Method, 17}, + {"(*RGBA).SubImage", Method, 0}, + {"(*RGBA64).At", Method, 0}, + {"(*RGBA64).Bounds", Method, 0}, + {"(*RGBA64).ColorModel", Method, 0}, + {"(*RGBA64).Opaque", Method, 0}, + {"(*RGBA64).PixOffset", Method, 0}, + {"(*RGBA64).RGBA64At", Method, 4}, + {"(*RGBA64).Set", Method, 0}, + {"(*RGBA64).SetRGBA64", Method, 0}, + {"(*RGBA64).SubImage", Method, 0}, + {"(*Uniform).At", Method, 0}, + {"(*Uniform).Bounds", Method, 0}, + {"(*Uniform).ColorModel", Method, 0}, + {"(*Uniform).Convert", Method, 0}, + {"(*Uniform).Opaque", Method, 0}, + {"(*Uniform).RGBA", Method, 0}, + {"(*Uniform).RGBA64At", Method, 17}, + {"(*YCbCr).At", Method, 0}, + {"(*YCbCr).Bounds", Method, 0}, + {"(*YCbCr).COffset", Method, 0}, + {"(*YCbCr).ColorModel", Method, 0}, + {"(*YCbCr).Opaque", Method, 0}, + {"(*YCbCr).RGBA64At", Method, 17}, + {"(*YCbCr).SubImage", Method, 0}, + {"(*YCbCr).YCbCrAt", Method, 4}, + {"(*YCbCr).YOffset", Method, 0}, + {"(Point).Add", Method, 0}, + {"(Point).Div", Method, 0}, + {"(Point).Eq", Method, 0}, + {"(Point).In", Method, 0}, + {"(Point).Mod", Method, 0}, + {"(Point).Mul", Method, 0}, + {"(Point).String", Method, 0}, + {"(Point).Sub", Method, 0}, + {"(Rectangle).Add", Method, 0}, + {"(Rectangle).At", Method, 5}, + {"(Rectangle).Bounds", Method, 5}, + {"(Rectangle).Canon", Method, 0}, + {"(Rectangle).ColorModel", Method, 5}, + {"(Rectangle).Dx", Method, 0}, + {"(Rectangle).Dy", Method, 0}, + {"(Rectangle).Empty", Method, 0}, + {"(Rectangle).Eq", Method, 0}, + {"(Rectangle).In", Method, 0}, + {"(Rectangle).Inset", Method, 0}, + {"(Rectangle).Intersect", Method, 0}, + {"(Rectangle).Overlaps", Method, 0}, + {"(Rectangle).RGBA64At", Method, 17}, + {"(Rectangle).Size", Method, 0}, + {"(Rectangle).String", Method, 0}, + {"(Rectangle).Sub", Method, 0}, + {"(Rectangle).Union", Method, 0}, + {"(YCbCrSubsampleRatio).String", Method, 0}, + {"Alpha", Type, 0}, + {"Alpha.Pix", Field, 0}, + {"Alpha.Rect", Field, 0}, + {"Alpha.Stride", Field, 0}, + {"Alpha16", Type, 0}, + {"Alpha16.Pix", Field, 0}, + {"Alpha16.Rect", Field, 0}, + {"Alpha16.Stride", Field, 0}, + {"Black", Var, 0}, + {"CMYK", Type, 5}, + {"CMYK.Pix", Field, 5}, + {"CMYK.Rect", Field, 5}, + {"CMYK.Stride", Field, 5}, + {"Config", Type, 0}, + {"Config.ColorModel", Field, 0}, + {"Config.Height", Field, 0}, + {"Config.Width", Field, 0}, + {"Decode", Func, 0}, + {"DecodeConfig", Func, 0}, + {"ErrFormat", Var, 0}, + {"Gray", Type, 0}, + {"Gray.Pix", Field, 0}, + {"Gray.Rect", Field, 0}, + {"Gray.Stride", Field, 0}, + {"Gray16", Type, 0}, + {"Gray16.Pix", Field, 0}, + {"Gray16.Rect", Field, 0}, + {"Gray16.Stride", Field, 0}, + {"Image", Type, 0}, + {"NRGBA", Type, 0}, + {"NRGBA.Pix", Field, 0}, + {"NRGBA.Rect", Field, 0}, + {"NRGBA.Stride", Field, 0}, + {"NRGBA64", Type, 0}, + {"NRGBA64.Pix", Field, 0}, + {"NRGBA64.Rect", Field, 0}, + {"NRGBA64.Stride", Field, 0}, + {"NYCbCrA", Type, 6}, + {"NYCbCrA.A", Field, 6}, + {"NYCbCrA.AStride", Field, 6}, + {"NYCbCrA.YCbCr", Field, 6}, + {"NewAlpha", Func, 0}, + {"NewAlpha16", Func, 0}, + {"NewCMYK", Func, 5}, + {"NewGray", Func, 0}, + {"NewGray16", Func, 0}, + {"NewNRGBA", Func, 0}, + {"NewNRGBA64", Func, 0}, + {"NewNYCbCrA", Func, 6}, + {"NewPaletted", Func, 0}, + {"NewRGBA", Func, 0}, + {"NewRGBA64", Func, 0}, + {"NewUniform", Func, 0}, + {"NewYCbCr", Func, 0}, + {"Opaque", Var, 0}, + {"Paletted", Type, 0}, + {"Paletted.Palette", Field, 0}, + {"Paletted.Pix", Field, 0}, + {"Paletted.Rect", Field, 0}, + {"Paletted.Stride", Field, 0}, + {"PalettedImage", Type, 0}, + {"Point", Type, 0}, + {"Point.X", Field, 0}, + {"Point.Y", Field, 0}, + {"Pt", Func, 0}, + {"RGBA", Type, 0}, + {"RGBA.Pix", Field, 0}, + {"RGBA.Rect", Field, 0}, + {"RGBA.Stride", Field, 0}, + {"RGBA64", Type, 0}, + {"RGBA64.Pix", Field, 0}, + {"RGBA64.Rect", Field, 0}, + {"RGBA64.Stride", Field, 0}, + {"RGBA64Image", Type, 17}, + {"Rect", Func, 0}, + {"Rectangle", Type, 0}, + {"Rectangle.Max", Field, 0}, + {"Rectangle.Min", Field, 0}, + {"RegisterFormat", Func, 0}, + {"Transparent", Var, 0}, + {"Uniform", Type, 0}, + {"Uniform.C", Field, 0}, + {"White", Var, 0}, + {"YCbCr", Type, 0}, + {"YCbCr.CStride", Field, 0}, + {"YCbCr.Cb", Field, 0}, + {"YCbCr.Cr", Field, 0}, + {"YCbCr.Rect", Field, 0}, + {"YCbCr.SubsampleRatio", Field, 0}, + {"YCbCr.Y", Field, 0}, + {"YCbCr.YStride", Field, 0}, + {"YCbCrSubsampleRatio", Type, 0}, + {"YCbCrSubsampleRatio410", Const, 5}, + {"YCbCrSubsampleRatio411", Const, 5}, + {"YCbCrSubsampleRatio420", Const, 0}, + {"YCbCrSubsampleRatio422", Const, 0}, + {"YCbCrSubsampleRatio440", Const, 1}, + {"YCbCrSubsampleRatio444", Const, 0}, + {"ZP", Var, 0}, + {"ZR", Var, 0}, + }, + "image/color": { + {"(Alpha).RGBA", Method, 0}, + {"(Alpha16).RGBA", Method, 0}, + {"(CMYK).RGBA", Method, 5}, + {"(Gray).RGBA", Method, 0}, + {"(Gray16).RGBA", Method, 0}, + {"(NRGBA).RGBA", Method, 0}, + {"(NRGBA64).RGBA", Method, 0}, + {"(NYCbCrA).RGBA", Method, 6}, + {"(Palette).Convert", Method, 0}, + {"(Palette).Index", Method, 0}, + {"(RGBA).RGBA", Method, 0}, + {"(RGBA64).RGBA", Method, 0}, + {"(YCbCr).RGBA", Method, 0}, + {"Alpha", Type, 0}, + {"Alpha.A", Field, 0}, + {"Alpha16", Type, 0}, + {"Alpha16.A", Field, 0}, + {"Alpha16Model", Var, 0}, + {"AlphaModel", Var, 0}, + {"Black", Var, 0}, + {"CMYK", Type, 5}, + {"CMYK.C", Field, 5}, + {"CMYK.K", Field, 5}, + {"CMYK.M", Field, 5}, + {"CMYK.Y", Field, 5}, + {"CMYKModel", Var, 5}, + {"CMYKToRGB", Func, 5}, + {"Color", Type, 0}, + {"Gray", Type, 0}, + {"Gray.Y", Field, 0}, + {"Gray16", Type, 0}, + {"Gray16.Y", Field, 0}, + {"Gray16Model", Var, 0}, + {"GrayModel", Var, 0}, + {"Model", Type, 0}, + {"ModelFunc", Func, 0}, + {"NRGBA", Type, 0}, + {"NRGBA.A", Field, 0}, + {"NRGBA.B", Field, 0}, + {"NRGBA.G", Field, 0}, + {"NRGBA.R", Field, 0}, + {"NRGBA64", Type, 0}, + {"NRGBA64.A", Field, 0}, + {"NRGBA64.B", Field, 0}, + {"NRGBA64.G", Field, 0}, + {"NRGBA64.R", Field, 0}, + {"NRGBA64Model", Var, 0}, + {"NRGBAModel", Var, 0}, + {"NYCbCrA", Type, 6}, + {"NYCbCrA.A", Field, 6}, + {"NYCbCrA.YCbCr", Field, 6}, + {"NYCbCrAModel", Var, 6}, + {"Opaque", Var, 0}, + {"Palette", Type, 0}, + {"RGBA", Type, 0}, + {"RGBA.A", Field, 0}, + {"RGBA.B", Field, 0}, + {"RGBA.G", Field, 0}, + {"RGBA.R", Field, 0}, + {"RGBA64", Type, 0}, + {"RGBA64.A", Field, 0}, + {"RGBA64.B", Field, 0}, + {"RGBA64.G", Field, 0}, + {"RGBA64.R", Field, 0}, + {"RGBA64Model", Var, 0}, + {"RGBAModel", Var, 0}, + {"RGBToCMYK", Func, 5}, + {"RGBToYCbCr", Func, 0}, + {"Transparent", Var, 0}, + {"White", Var, 0}, + {"YCbCr", Type, 0}, + {"YCbCr.Cb", Field, 0}, + {"YCbCr.Cr", Field, 0}, + {"YCbCr.Y", Field, 0}, + {"YCbCrModel", Var, 0}, + {"YCbCrToRGB", Func, 0}, + }, + "image/color/palette": { + {"Plan9", Var, 2}, + {"WebSafe", Var, 2}, + }, + "image/draw": { + {"(Op).Draw", Method, 2}, + {"Draw", Func, 0}, + {"DrawMask", Func, 0}, + {"Drawer", Type, 2}, + {"FloydSteinberg", Var, 2}, + {"Image", Type, 0}, + {"Op", Type, 0}, + {"Over", Const, 0}, + {"Quantizer", Type, 2}, + {"RGBA64Image", Type, 17}, + {"Src", Const, 0}, + }, + "image/gif": { + {"Decode", Func, 0}, + {"DecodeAll", Func, 0}, + {"DecodeConfig", Func, 0}, + {"DisposalBackground", Const, 5}, + {"DisposalNone", Const, 5}, + {"DisposalPrevious", Const, 5}, + {"Encode", Func, 2}, + {"EncodeAll", Func, 2}, + {"GIF", Type, 0}, + {"GIF.BackgroundIndex", Field, 5}, + {"GIF.Config", Field, 5}, + {"GIF.Delay", Field, 0}, + {"GIF.Disposal", Field, 5}, + {"GIF.Image", Field, 0}, + {"GIF.LoopCount", Field, 0}, + {"Options", Type, 2}, + {"Options.Drawer", Field, 2}, + {"Options.NumColors", Field, 2}, + {"Options.Quantizer", Field, 2}, + }, + "image/jpeg": { + {"(FormatError).Error", Method, 0}, + {"(UnsupportedError).Error", Method, 0}, + {"Decode", Func, 0}, + {"DecodeConfig", Func, 0}, + {"DefaultQuality", Const, 0}, + {"Encode", Func, 0}, + {"FormatError", Type, 0}, + {"Options", Type, 0}, + {"Options.Quality", Field, 0}, + {"Reader", Type, 0}, + {"UnsupportedError", Type, 0}, + }, + "image/png": { + {"(*Encoder).Encode", Method, 4}, + {"(FormatError).Error", Method, 0}, + {"(UnsupportedError).Error", Method, 0}, + {"BestCompression", Const, 4}, + {"BestSpeed", Const, 4}, + {"CompressionLevel", Type, 4}, + {"Decode", Func, 0}, + {"DecodeConfig", Func, 0}, + {"DefaultCompression", Const, 4}, + {"Encode", Func, 0}, + {"Encoder", Type, 4}, + {"Encoder.BufferPool", Field, 9}, + {"Encoder.CompressionLevel", Field, 4}, + {"EncoderBuffer", Type, 9}, + {"EncoderBufferPool", Type, 9}, + {"FormatError", Type, 0}, + {"NoCompression", Const, 4}, + {"UnsupportedError", Type, 0}, + }, + "index/suffixarray": { + {"(*Index).Bytes", Method, 0}, + {"(*Index).FindAllIndex", Method, 0}, + {"(*Index).Lookup", Method, 0}, + {"(*Index).Read", Method, 0}, + {"(*Index).Write", Method, 0}, + {"Index", Type, 0}, + {"New", Func, 0}, + }, + "io": { + {"(*LimitedReader).Read", Method, 0}, + {"(*OffsetWriter).Seek", Method, 20}, + {"(*OffsetWriter).Write", Method, 20}, + {"(*OffsetWriter).WriteAt", Method, 20}, + {"(*PipeReader).Close", Method, 0}, + {"(*PipeReader).CloseWithError", Method, 0}, + {"(*PipeReader).Read", Method, 0}, + {"(*PipeWriter).Close", Method, 0}, + {"(*PipeWriter).CloseWithError", Method, 0}, + {"(*PipeWriter).Write", Method, 0}, + {"(*SectionReader).Outer", Method, 22}, + {"(*SectionReader).Read", Method, 0}, + {"(*SectionReader).ReadAt", Method, 0}, + {"(*SectionReader).Seek", Method, 0}, + {"(*SectionReader).Size", Method, 0}, + {"ByteReader", Type, 0}, + {"ByteScanner", Type, 0}, + {"ByteWriter", Type, 1}, + {"Closer", Type, 0}, + {"Copy", Func, 0}, + {"CopyBuffer", Func, 5}, + {"CopyN", Func, 0}, + {"Discard", Var, 16}, + {"EOF", Var, 0}, + {"ErrClosedPipe", Var, 0}, + {"ErrNoProgress", Var, 1}, + {"ErrShortBuffer", Var, 0}, + {"ErrShortWrite", Var, 0}, + {"ErrUnexpectedEOF", Var, 0}, + {"LimitReader", Func, 0}, + {"LimitedReader", Type, 0}, + {"LimitedReader.N", Field, 0}, + {"LimitedReader.R", Field, 0}, + {"MultiReader", Func, 0}, + {"MultiWriter", Func, 0}, + {"NewOffsetWriter", Func, 20}, + {"NewSectionReader", Func, 0}, + {"NopCloser", Func, 16}, + {"OffsetWriter", Type, 20}, + {"Pipe", Func, 0}, + {"PipeReader", Type, 0}, + {"PipeWriter", Type, 0}, + {"ReadAll", Func, 16}, + {"ReadAtLeast", Func, 0}, + {"ReadCloser", Type, 0}, + {"ReadFull", Func, 0}, + {"ReadSeekCloser", Type, 16}, + {"ReadSeeker", Type, 0}, + {"ReadWriteCloser", Type, 0}, + {"ReadWriteSeeker", Type, 0}, + {"ReadWriter", Type, 0}, + {"Reader", Type, 0}, + {"ReaderAt", Type, 0}, + {"ReaderFrom", Type, 0}, + {"RuneReader", Type, 0}, + {"RuneScanner", Type, 0}, + {"SectionReader", Type, 0}, + {"SeekCurrent", Const, 7}, + {"SeekEnd", Const, 7}, + {"SeekStart", Const, 7}, + {"Seeker", Type, 0}, + {"StringWriter", Type, 12}, + {"TeeReader", Func, 0}, + {"WriteCloser", Type, 0}, + {"WriteSeeker", Type, 0}, + {"WriteString", Func, 0}, + {"Writer", Type, 0}, + {"WriterAt", Type, 0}, + {"WriterTo", Type, 0}, + }, + "io/fs": { + {"(*PathError).Error", Method, 16}, + {"(*PathError).Timeout", Method, 16}, + {"(*PathError).Unwrap", Method, 16}, + {"(FileMode).IsDir", Method, 16}, + {"(FileMode).IsRegular", Method, 16}, + {"(FileMode).Perm", Method, 16}, + {"(FileMode).String", Method, 16}, + {"(FileMode).Type", Method, 16}, + {"DirEntry", Type, 16}, + {"ErrClosed", Var, 16}, + {"ErrExist", Var, 16}, + {"ErrInvalid", Var, 16}, + {"ErrNotExist", Var, 16}, + {"ErrPermission", Var, 16}, + {"FS", Type, 16}, + {"File", Type, 16}, + {"FileInfo", Type, 16}, + {"FileInfoToDirEntry", Func, 17}, + {"FileMode", Type, 16}, + {"FormatDirEntry", Func, 21}, + {"FormatFileInfo", Func, 21}, + {"Glob", Func, 16}, + {"GlobFS", Type, 16}, + {"ModeAppend", Const, 16}, + {"ModeCharDevice", Const, 16}, + {"ModeDevice", Const, 16}, + {"ModeDir", Const, 16}, + {"ModeExclusive", Const, 16}, + {"ModeIrregular", Const, 16}, + {"ModeNamedPipe", Const, 16}, + {"ModePerm", Const, 16}, + {"ModeSetgid", Const, 16}, + {"ModeSetuid", Const, 16}, + {"ModeSocket", Const, 16}, + {"ModeSticky", Const, 16}, + {"ModeSymlink", Const, 16}, + {"ModeTemporary", Const, 16}, + {"ModeType", Const, 16}, + {"PathError", Type, 16}, + {"PathError.Err", Field, 16}, + {"PathError.Op", Field, 16}, + {"PathError.Path", Field, 16}, + {"ReadDir", Func, 16}, + {"ReadDirFS", Type, 16}, + {"ReadDirFile", Type, 16}, + {"ReadFile", Func, 16}, + {"ReadFileFS", Type, 16}, + {"SkipAll", Var, 20}, + {"SkipDir", Var, 16}, + {"Stat", Func, 16}, + {"StatFS", Type, 16}, + {"Sub", Func, 16}, + {"SubFS", Type, 16}, + {"ValidPath", Func, 16}, + {"WalkDir", Func, 16}, + {"WalkDirFunc", Type, 16}, + }, + "io/ioutil": { + {"Discard", Var, 0}, + {"NopCloser", Func, 0}, + {"ReadAll", Func, 0}, + {"ReadDir", Func, 0}, + {"ReadFile", Func, 0}, + {"TempDir", Func, 0}, + {"TempFile", Func, 0}, + {"WriteFile", Func, 0}, + }, + "log": { + {"(*Logger).Fatal", Method, 0}, + {"(*Logger).Fatalf", Method, 0}, + {"(*Logger).Fatalln", Method, 0}, + {"(*Logger).Flags", Method, 0}, + {"(*Logger).Output", Method, 0}, + {"(*Logger).Panic", Method, 0}, + {"(*Logger).Panicf", Method, 0}, + {"(*Logger).Panicln", Method, 0}, + {"(*Logger).Prefix", Method, 0}, + {"(*Logger).Print", Method, 0}, + {"(*Logger).Printf", Method, 0}, + {"(*Logger).Println", Method, 0}, + {"(*Logger).SetFlags", Method, 0}, + {"(*Logger).SetOutput", Method, 5}, + {"(*Logger).SetPrefix", Method, 0}, + {"(*Logger).Writer", Method, 12}, + {"Default", Func, 16}, + {"Fatal", Func, 0}, + {"Fatalf", Func, 0}, + {"Fatalln", Func, 0}, + {"Flags", Func, 0}, + {"LUTC", Const, 5}, + {"Ldate", Const, 0}, + {"Llongfile", Const, 0}, + {"Lmicroseconds", Const, 0}, + {"Lmsgprefix", Const, 14}, + {"Logger", Type, 0}, + {"Lshortfile", Const, 0}, + {"LstdFlags", Const, 0}, + {"Ltime", Const, 0}, + {"New", Func, 0}, + {"Output", Func, 5}, + {"Panic", Func, 0}, + {"Panicf", Func, 0}, + {"Panicln", Func, 0}, + {"Prefix", Func, 0}, + {"Print", Func, 0}, + {"Printf", Func, 0}, + {"Println", Func, 0}, + {"SetFlags", Func, 0}, + {"SetOutput", Func, 0}, + {"SetPrefix", Func, 0}, + {"Writer", Func, 13}, + }, + "log/slog": { + {"(*JSONHandler).Enabled", Method, 21}, + {"(*JSONHandler).Handle", Method, 21}, + {"(*JSONHandler).WithAttrs", Method, 21}, + {"(*JSONHandler).WithGroup", Method, 21}, + {"(*Level).UnmarshalJSON", Method, 21}, + {"(*Level).UnmarshalText", Method, 21}, + {"(*LevelVar).Level", Method, 21}, + {"(*LevelVar).MarshalText", Method, 21}, + {"(*LevelVar).Set", Method, 21}, + {"(*LevelVar).String", Method, 21}, + {"(*LevelVar).UnmarshalText", Method, 21}, + {"(*Logger).Debug", Method, 21}, + {"(*Logger).DebugContext", Method, 21}, + {"(*Logger).Enabled", Method, 21}, + {"(*Logger).Error", Method, 21}, + {"(*Logger).ErrorContext", Method, 21}, + {"(*Logger).Handler", Method, 21}, + {"(*Logger).Info", Method, 21}, + {"(*Logger).InfoContext", Method, 21}, + {"(*Logger).Log", Method, 21}, + {"(*Logger).LogAttrs", Method, 21}, + {"(*Logger).Warn", Method, 21}, + {"(*Logger).WarnContext", Method, 21}, + {"(*Logger).With", Method, 21}, + {"(*Logger).WithGroup", Method, 21}, + {"(*Record).Add", Method, 21}, + {"(*Record).AddAttrs", Method, 21}, + {"(*TextHandler).Enabled", Method, 21}, + {"(*TextHandler).Handle", Method, 21}, + {"(*TextHandler).WithAttrs", Method, 21}, + {"(*TextHandler).WithGroup", Method, 21}, + {"(Attr).Equal", Method, 21}, + {"(Attr).String", Method, 21}, + {"(Kind).String", Method, 21}, + {"(Level).Level", Method, 21}, + {"(Level).MarshalJSON", Method, 21}, + {"(Level).MarshalText", Method, 21}, + {"(Level).String", Method, 21}, + {"(Record).Attrs", Method, 21}, + {"(Record).Clone", Method, 21}, + {"(Record).NumAttrs", Method, 21}, + {"(Value).Any", Method, 21}, + {"(Value).Bool", Method, 21}, + {"(Value).Duration", Method, 21}, + {"(Value).Equal", Method, 21}, + {"(Value).Float64", Method, 21}, + {"(Value).Group", Method, 21}, + {"(Value).Int64", Method, 21}, + {"(Value).Kind", Method, 21}, + {"(Value).LogValuer", Method, 21}, + {"(Value).Resolve", Method, 21}, + {"(Value).String", Method, 21}, + {"(Value).Time", Method, 21}, + {"(Value).Uint64", Method, 21}, + {"Any", Func, 21}, + {"AnyValue", Func, 21}, + {"Attr", Type, 21}, + {"Attr.Key", Field, 21}, + {"Attr.Value", Field, 21}, + {"Bool", Func, 21}, + {"BoolValue", Func, 21}, + {"Debug", Func, 21}, + {"DebugContext", Func, 21}, + {"Default", Func, 21}, + {"Duration", Func, 21}, + {"DurationValue", Func, 21}, + {"Error", Func, 21}, + {"ErrorContext", Func, 21}, + {"Float64", Func, 21}, + {"Float64Value", Func, 21}, + {"Group", Func, 21}, + {"GroupValue", Func, 21}, + {"Handler", Type, 21}, + {"HandlerOptions", Type, 21}, + {"HandlerOptions.AddSource", Field, 21}, + {"HandlerOptions.Level", Field, 21}, + {"HandlerOptions.ReplaceAttr", Field, 21}, + {"Info", Func, 21}, + {"InfoContext", Func, 21}, + {"Int", Func, 21}, + {"Int64", Func, 21}, + {"Int64Value", Func, 21}, + {"IntValue", Func, 21}, + {"JSONHandler", Type, 21}, + {"Kind", Type, 21}, + {"KindAny", Const, 21}, + {"KindBool", Const, 21}, + {"KindDuration", Const, 21}, + {"KindFloat64", Const, 21}, + {"KindGroup", Const, 21}, + {"KindInt64", Const, 21}, + {"KindLogValuer", Const, 21}, + {"KindString", Const, 21}, + {"KindTime", Const, 21}, + {"KindUint64", Const, 21}, + {"Level", Type, 21}, + {"LevelDebug", Const, 21}, + {"LevelError", Const, 21}, + {"LevelInfo", Const, 21}, + {"LevelKey", Const, 21}, + {"LevelVar", Type, 21}, + {"LevelWarn", Const, 21}, + {"Leveler", Type, 21}, + {"Log", Func, 21}, + {"LogAttrs", Func, 21}, + {"LogValuer", Type, 21}, + {"Logger", Type, 21}, + {"MessageKey", Const, 21}, + {"New", Func, 21}, + {"NewJSONHandler", Func, 21}, + {"NewLogLogger", Func, 21}, + {"NewRecord", Func, 21}, + {"NewTextHandler", Func, 21}, + {"Record", Type, 21}, + {"Record.Level", Field, 21}, + {"Record.Message", Field, 21}, + {"Record.PC", Field, 21}, + {"Record.Time", Field, 21}, + {"SetDefault", Func, 21}, + {"SetLogLoggerLevel", Func, 22}, + {"Source", Type, 21}, + {"Source.File", Field, 21}, + {"Source.Function", Field, 21}, + {"Source.Line", Field, 21}, + {"SourceKey", Const, 21}, + {"String", Func, 21}, + {"StringValue", Func, 21}, + {"TextHandler", Type, 21}, + {"Time", Func, 21}, + {"TimeKey", Const, 21}, + {"TimeValue", Func, 21}, + {"Uint64", Func, 21}, + {"Uint64Value", Func, 21}, + {"Value", Type, 21}, + {"Warn", Func, 21}, + {"WarnContext", Func, 21}, + {"With", Func, 21}, + }, + "log/syslog": { + {"(*Writer).Alert", Method, 0}, + {"(*Writer).Close", Method, 0}, + {"(*Writer).Crit", Method, 0}, + {"(*Writer).Debug", Method, 0}, + {"(*Writer).Emerg", Method, 0}, + {"(*Writer).Err", Method, 0}, + {"(*Writer).Info", Method, 0}, + {"(*Writer).Notice", Method, 0}, + {"(*Writer).Warning", Method, 0}, + {"(*Writer).Write", Method, 0}, + {"Dial", Func, 0}, + {"LOG_ALERT", Const, 0}, + {"LOG_AUTH", Const, 1}, + {"LOG_AUTHPRIV", Const, 1}, + {"LOG_CRIT", Const, 0}, + {"LOG_CRON", Const, 1}, + {"LOG_DAEMON", Const, 1}, + {"LOG_DEBUG", Const, 0}, + {"LOG_EMERG", Const, 0}, + {"LOG_ERR", Const, 0}, + {"LOG_FTP", Const, 1}, + {"LOG_INFO", Const, 0}, + {"LOG_KERN", Const, 1}, + {"LOG_LOCAL0", Const, 1}, + {"LOG_LOCAL1", Const, 1}, + {"LOG_LOCAL2", Const, 1}, + {"LOG_LOCAL3", Const, 1}, + {"LOG_LOCAL4", Const, 1}, + {"LOG_LOCAL5", Const, 1}, + {"LOG_LOCAL6", Const, 1}, + {"LOG_LOCAL7", Const, 1}, + {"LOG_LPR", Const, 1}, + {"LOG_MAIL", Const, 1}, + {"LOG_NEWS", Const, 1}, + {"LOG_NOTICE", Const, 0}, + {"LOG_SYSLOG", Const, 1}, + {"LOG_USER", Const, 1}, + {"LOG_UUCP", Const, 1}, + {"LOG_WARNING", Const, 0}, + {"New", Func, 0}, + {"NewLogger", Func, 0}, + {"Priority", Type, 0}, + {"Writer", Type, 0}, + }, + "maps": { + {"Clone", Func, 21}, + {"Copy", Func, 21}, + {"DeleteFunc", Func, 21}, + {"Equal", Func, 21}, + {"EqualFunc", Func, 21}, + }, + "math": { + {"Abs", Func, 0}, + {"Acos", Func, 0}, + {"Acosh", Func, 0}, + {"Asin", Func, 0}, + {"Asinh", Func, 0}, + {"Atan", Func, 0}, + {"Atan2", Func, 0}, + {"Atanh", Func, 0}, + {"Cbrt", Func, 0}, + {"Ceil", Func, 0}, + {"Copysign", Func, 0}, + {"Cos", Func, 0}, + {"Cosh", Func, 0}, + {"Dim", Func, 0}, + {"E", Const, 0}, + {"Erf", Func, 0}, + {"Erfc", Func, 0}, + {"Erfcinv", Func, 10}, + {"Erfinv", Func, 10}, + {"Exp", Func, 0}, + {"Exp2", Func, 0}, + {"Expm1", Func, 0}, + {"FMA", Func, 14}, + {"Float32bits", Func, 0}, + {"Float32frombits", Func, 0}, + {"Float64bits", Func, 0}, + {"Float64frombits", Func, 0}, + {"Floor", Func, 0}, + {"Frexp", Func, 0}, + {"Gamma", Func, 0}, + {"Hypot", Func, 0}, + {"Ilogb", Func, 0}, + {"Inf", Func, 0}, + {"IsInf", Func, 0}, + {"IsNaN", Func, 0}, + {"J0", Func, 0}, + {"J1", Func, 0}, + {"Jn", Func, 0}, + {"Ldexp", Func, 0}, + {"Lgamma", Func, 0}, + {"Ln10", Const, 0}, + {"Ln2", Const, 0}, + {"Log", Func, 0}, + {"Log10", Func, 0}, + {"Log10E", Const, 0}, + {"Log1p", Func, 0}, + {"Log2", Func, 0}, + {"Log2E", Const, 0}, + {"Logb", Func, 0}, + {"Max", Func, 0}, + {"MaxFloat32", Const, 0}, + {"MaxFloat64", Const, 0}, + {"MaxInt", Const, 17}, + {"MaxInt16", Const, 0}, + {"MaxInt32", Const, 0}, + {"MaxInt64", Const, 0}, + {"MaxInt8", Const, 0}, + {"MaxUint", Const, 17}, + {"MaxUint16", Const, 0}, + {"MaxUint32", Const, 0}, + {"MaxUint64", Const, 0}, + {"MaxUint8", Const, 0}, + {"Min", Func, 0}, + {"MinInt", Const, 17}, + {"MinInt16", Const, 0}, + {"MinInt32", Const, 0}, + {"MinInt64", Const, 0}, + {"MinInt8", Const, 0}, + {"Mod", Func, 0}, + {"Modf", Func, 0}, + {"NaN", Func, 0}, + {"Nextafter", Func, 0}, + {"Nextafter32", Func, 4}, + {"Phi", Const, 0}, + {"Pi", Const, 0}, + {"Pow", Func, 0}, + {"Pow10", Func, 0}, + {"Remainder", Func, 0}, + {"Round", Func, 10}, + {"RoundToEven", Func, 10}, + {"Signbit", Func, 0}, + {"Sin", Func, 0}, + {"Sincos", Func, 0}, + {"Sinh", Func, 0}, + {"SmallestNonzeroFloat32", Const, 0}, + {"SmallestNonzeroFloat64", Const, 0}, + {"Sqrt", Func, 0}, + {"Sqrt2", Const, 0}, + {"SqrtE", Const, 0}, + {"SqrtPhi", Const, 0}, + {"SqrtPi", Const, 0}, + {"Tan", Func, 0}, + {"Tanh", Func, 0}, + {"Trunc", Func, 0}, + {"Y0", Func, 0}, + {"Y1", Func, 0}, + {"Yn", Func, 0}, + }, + "math/big": { + {"(*Float).Abs", Method, 5}, + {"(*Float).Acc", Method, 5}, + {"(*Float).Add", Method, 5}, + {"(*Float).Append", Method, 5}, + {"(*Float).Cmp", Method, 5}, + {"(*Float).Copy", Method, 5}, + {"(*Float).Float32", Method, 5}, + {"(*Float).Float64", Method, 5}, + {"(*Float).Format", Method, 5}, + {"(*Float).GobDecode", Method, 7}, + {"(*Float).GobEncode", Method, 7}, + {"(*Float).Int", Method, 5}, + {"(*Float).Int64", Method, 5}, + {"(*Float).IsInf", Method, 5}, + {"(*Float).IsInt", Method, 5}, + {"(*Float).MantExp", Method, 5}, + {"(*Float).MarshalText", Method, 6}, + {"(*Float).MinPrec", Method, 5}, + {"(*Float).Mode", Method, 5}, + {"(*Float).Mul", Method, 5}, + {"(*Float).Neg", Method, 5}, + {"(*Float).Parse", Method, 5}, + {"(*Float).Prec", Method, 5}, + {"(*Float).Quo", Method, 5}, + {"(*Float).Rat", Method, 5}, + {"(*Float).Scan", Method, 8}, + {"(*Float).Set", Method, 5}, + {"(*Float).SetFloat64", Method, 5}, + {"(*Float).SetInf", Method, 5}, + {"(*Float).SetInt", Method, 5}, + {"(*Float).SetInt64", Method, 5}, + {"(*Float).SetMantExp", Method, 5}, + {"(*Float).SetMode", Method, 5}, + {"(*Float).SetPrec", Method, 5}, + {"(*Float).SetRat", Method, 5}, + {"(*Float).SetString", Method, 5}, + {"(*Float).SetUint64", Method, 5}, + {"(*Float).Sign", Method, 5}, + {"(*Float).Signbit", Method, 5}, + {"(*Float).Sqrt", Method, 10}, + {"(*Float).String", Method, 5}, + {"(*Float).Sub", Method, 5}, + {"(*Float).Text", Method, 5}, + {"(*Float).Uint64", Method, 5}, + {"(*Float).UnmarshalText", Method, 6}, + {"(*Int).Abs", Method, 0}, + {"(*Int).Add", Method, 0}, + {"(*Int).And", Method, 0}, + {"(*Int).AndNot", Method, 0}, + {"(*Int).Append", Method, 6}, + {"(*Int).Binomial", Method, 0}, + {"(*Int).Bit", Method, 0}, + {"(*Int).BitLen", Method, 0}, + {"(*Int).Bits", Method, 0}, + {"(*Int).Bytes", Method, 0}, + {"(*Int).Cmp", Method, 0}, + {"(*Int).CmpAbs", Method, 10}, + {"(*Int).Div", Method, 0}, + {"(*Int).DivMod", Method, 0}, + {"(*Int).Exp", Method, 0}, + {"(*Int).FillBytes", Method, 15}, + {"(*Int).Float64", Method, 21}, + {"(*Int).Format", Method, 0}, + {"(*Int).GCD", Method, 0}, + {"(*Int).GobDecode", Method, 0}, + {"(*Int).GobEncode", Method, 0}, + {"(*Int).Int64", Method, 0}, + {"(*Int).IsInt64", Method, 9}, + {"(*Int).IsUint64", Method, 9}, + {"(*Int).Lsh", Method, 0}, + {"(*Int).MarshalJSON", Method, 1}, + {"(*Int).MarshalText", Method, 3}, + {"(*Int).Mod", Method, 0}, + {"(*Int).ModInverse", Method, 0}, + {"(*Int).ModSqrt", Method, 5}, + {"(*Int).Mul", Method, 0}, + {"(*Int).MulRange", Method, 0}, + {"(*Int).Neg", Method, 0}, + {"(*Int).Not", Method, 0}, + {"(*Int).Or", Method, 0}, + {"(*Int).ProbablyPrime", Method, 0}, + {"(*Int).Quo", Method, 0}, + {"(*Int).QuoRem", Method, 0}, + {"(*Int).Rand", Method, 0}, + {"(*Int).Rem", Method, 0}, + {"(*Int).Rsh", Method, 0}, + {"(*Int).Scan", Method, 0}, + {"(*Int).Set", Method, 0}, + {"(*Int).SetBit", Method, 0}, + {"(*Int).SetBits", Method, 0}, + {"(*Int).SetBytes", Method, 0}, + {"(*Int).SetInt64", Method, 0}, + {"(*Int).SetString", Method, 0}, + {"(*Int).SetUint64", Method, 1}, + {"(*Int).Sign", Method, 0}, + {"(*Int).Sqrt", Method, 8}, + {"(*Int).String", Method, 0}, + {"(*Int).Sub", Method, 0}, + {"(*Int).Text", Method, 6}, + {"(*Int).TrailingZeroBits", Method, 13}, + {"(*Int).Uint64", Method, 1}, + {"(*Int).UnmarshalJSON", Method, 1}, + {"(*Int).UnmarshalText", Method, 3}, + {"(*Int).Xor", Method, 0}, + {"(*Rat).Abs", Method, 0}, + {"(*Rat).Add", Method, 0}, + {"(*Rat).Cmp", Method, 0}, + {"(*Rat).Denom", Method, 0}, + {"(*Rat).Float32", Method, 4}, + {"(*Rat).Float64", Method, 1}, + {"(*Rat).FloatPrec", Method, 22}, + {"(*Rat).FloatString", Method, 0}, + {"(*Rat).GobDecode", Method, 0}, + {"(*Rat).GobEncode", Method, 0}, + {"(*Rat).Inv", Method, 0}, + {"(*Rat).IsInt", Method, 0}, + {"(*Rat).MarshalText", Method, 3}, + {"(*Rat).Mul", Method, 0}, + {"(*Rat).Neg", Method, 0}, + {"(*Rat).Num", Method, 0}, + {"(*Rat).Quo", Method, 0}, + {"(*Rat).RatString", Method, 0}, + {"(*Rat).Scan", Method, 0}, + {"(*Rat).Set", Method, 0}, + {"(*Rat).SetFloat64", Method, 1}, + {"(*Rat).SetFrac", Method, 0}, + {"(*Rat).SetFrac64", Method, 0}, + {"(*Rat).SetInt", Method, 0}, + {"(*Rat).SetInt64", Method, 0}, + {"(*Rat).SetString", Method, 0}, + {"(*Rat).SetUint64", Method, 13}, + {"(*Rat).Sign", Method, 0}, + {"(*Rat).String", Method, 0}, + {"(*Rat).Sub", Method, 0}, + {"(*Rat).UnmarshalText", Method, 3}, + {"(Accuracy).String", Method, 5}, + {"(ErrNaN).Error", Method, 5}, + {"(RoundingMode).String", Method, 5}, + {"Above", Const, 5}, + {"Accuracy", Type, 5}, + {"AwayFromZero", Const, 5}, + {"Below", Const, 5}, + {"ErrNaN", Type, 5}, + {"Exact", Const, 5}, + {"Float", Type, 5}, + {"Int", Type, 0}, + {"Jacobi", Func, 5}, + {"MaxBase", Const, 0}, + {"MaxExp", Const, 5}, + {"MaxPrec", Const, 5}, + {"MinExp", Const, 5}, + {"NewFloat", Func, 5}, + {"NewInt", Func, 0}, + {"NewRat", Func, 0}, + {"ParseFloat", Func, 5}, + {"Rat", Type, 0}, + {"RoundingMode", Type, 5}, + {"ToNearestAway", Const, 5}, + {"ToNearestEven", Const, 5}, + {"ToNegativeInf", Const, 5}, + {"ToPositiveInf", Const, 5}, + {"ToZero", Const, 5}, + {"Word", Type, 0}, + }, + "math/bits": { + {"Add", Func, 12}, + {"Add32", Func, 12}, + {"Add64", Func, 12}, + {"Div", Func, 12}, + {"Div32", Func, 12}, + {"Div64", Func, 12}, + {"LeadingZeros", Func, 9}, + {"LeadingZeros16", Func, 9}, + {"LeadingZeros32", Func, 9}, + {"LeadingZeros64", Func, 9}, + {"LeadingZeros8", Func, 9}, + {"Len", Func, 9}, + {"Len16", Func, 9}, + {"Len32", Func, 9}, + {"Len64", Func, 9}, + {"Len8", Func, 9}, + {"Mul", Func, 12}, + {"Mul32", Func, 12}, + {"Mul64", Func, 12}, + {"OnesCount", Func, 9}, + {"OnesCount16", Func, 9}, + {"OnesCount32", Func, 9}, + {"OnesCount64", Func, 9}, + {"OnesCount8", Func, 9}, + {"Rem", Func, 14}, + {"Rem32", Func, 14}, + {"Rem64", Func, 14}, + {"Reverse", Func, 9}, + {"Reverse16", Func, 9}, + {"Reverse32", Func, 9}, + {"Reverse64", Func, 9}, + {"Reverse8", Func, 9}, + {"ReverseBytes", Func, 9}, + {"ReverseBytes16", Func, 9}, + {"ReverseBytes32", Func, 9}, + {"ReverseBytes64", Func, 9}, + {"RotateLeft", Func, 9}, + {"RotateLeft16", Func, 9}, + {"RotateLeft32", Func, 9}, + {"RotateLeft64", Func, 9}, + {"RotateLeft8", Func, 9}, + {"Sub", Func, 12}, + {"Sub32", Func, 12}, + {"Sub64", Func, 12}, + {"TrailingZeros", Func, 9}, + {"TrailingZeros16", Func, 9}, + {"TrailingZeros32", Func, 9}, + {"TrailingZeros64", Func, 9}, + {"TrailingZeros8", Func, 9}, + {"UintSize", Const, 9}, + }, + "math/cmplx": { + {"Abs", Func, 0}, + {"Acos", Func, 0}, + {"Acosh", Func, 0}, + {"Asin", Func, 0}, + {"Asinh", Func, 0}, + {"Atan", Func, 0}, + {"Atanh", Func, 0}, + {"Conj", Func, 0}, + {"Cos", Func, 0}, + {"Cosh", Func, 0}, + {"Cot", Func, 0}, + {"Exp", Func, 0}, + {"Inf", Func, 0}, + {"IsInf", Func, 0}, + {"IsNaN", Func, 0}, + {"Log", Func, 0}, + {"Log10", Func, 0}, + {"NaN", Func, 0}, + {"Phase", Func, 0}, + {"Polar", Func, 0}, + {"Pow", Func, 0}, + {"Rect", Func, 0}, + {"Sin", Func, 0}, + {"Sinh", Func, 0}, + {"Sqrt", Func, 0}, + {"Tan", Func, 0}, + {"Tanh", Func, 0}, + }, + "math/rand": { + {"(*Rand).ExpFloat64", Method, 0}, + {"(*Rand).Float32", Method, 0}, + {"(*Rand).Float64", Method, 0}, + {"(*Rand).Int", Method, 0}, + {"(*Rand).Int31", Method, 0}, + {"(*Rand).Int31n", Method, 0}, + {"(*Rand).Int63", Method, 0}, + {"(*Rand).Int63n", Method, 0}, + {"(*Rand).Intn", Method, 0}, + {"(*Rand).NormFloat64", Method, 0}, + {"(*Rand).Perm", Method, 0}, + {"(*Rand).Read", Method, 6}, + {"(*Rand).Seed", Method, 0}, + {"(*Rand).Shuffle", Method, 10}, + {"(*Rand).Uint32", Method, 0}, + {"(*Rand).Uint64", Method, 8}, + {"(*Zipf).Uint64", Method, 0}, + {"ExpFloat64", Func, 0}, + {"Float32", Func, 0}, + {"Float64", Func, 0}, + {"Int", Func, 0}, + {"Int31", Func, 0}, + {"Int31n", Func, 0}, + {"Int63", Func, 0}, + {"Int63n", Func, 0}, + {"Intn", Func, 0}, + {"New", Func, 0}, + {"NewSource", Func, 0}, + {"NewZipf", Func, 0}, + {"NormFloat64", Func, 0}, + {"Perm", Func, 0}, + {"Rand", Type, 0}, + {"Read", Func, 6}, + {"Seed", Func, 0}, + {"Shuffle", Func, 10}, + {"Source", Type, 0}, + {"Source64", Type, 8}, + {"Uint32", Func, 0}, + {"Uint64", Func, 8}, + {"Zipf", Type, 0}, + }, + "math/rand/v2": { + {"(*ChaCha8).MarshalBinary", Method, 22}, + {"(*ChaCha8).Seed", Method, 22}, + {"(*ChaCha8).Uint64", Method, 22}, + {"(*ChaCha8).UnmarshalBinary", Method, 22}, + {"(*PCG).MarshalBinary", Method, 22}, + {"(*PCG).Seed", Method, 22}, + {"(*PCG).Uint64", Method, 22}, + {"(*PCG).UnmarshalBinary", Method, 22}, + {"(*Rand).ExpFloat64", Method, 22}, + {"(*Rand).Float32", Method, 22}, + {"(*Rand).Float64", Method, 22}, + {"(*Rand).Int", Method, 22}, + {"(*Rand).Int32", Method, 22}, + {"(*Rand).Int32N", Method, 22}, + {"(*Rand).Int64", Method, 22}, + {"(*Rand).Int64N", Method, 22}, + {"(*Rand).IntN", Method, 22}, + {"(*Rand).NormFloat64", Method, 22}, + {"(*Rand).Perm", Method, 22}, + {"(*Rand).Shuffle", Method, 22}, + {"(*Rand).Uint32", Method, 22}, + {"(*Rand).Uint32N", Method, 22}, + {"(*Rand).Uint64", Method, 22}, + {"(*Rand).Uint64N", Method, 22}, + {"(*Rand).UintN", Method, 22}, + {"(*Zipf).Uint64", Method, 22}, + {"ChaCha8", Type, 22}, + {"ExpFloat64", Func, 22}, + {"Float32", Func, 22}, + {"Float64", Func, 22}, + {"Int", Func, 22}, + {"Int32", Func, 22}, + {"Int32N", Func, 22}, + {"Int64", Func, 22}, + {"Int64N", Func, 22}, + {"IntN", Func, 22}, + {"N", Func, 22}, + {"New", Func, 22}, + {"NewChaCha8", Func, 22}, + {"NewPCG", Func, 22}, + {"NewZipf", Func, 22}, + {"NormFloat64", Func, 22}, + {"PCG", Type, 22}, + {"Perm", Func, 22}, + {"Rand", Type, 22}, + {"Shuffle", Func, 22}, + {"Source", Type, 22}, + {"Uint32", Func, 22}, + {"Uint32N", Func, 22}, + {"Uint64", Func, 22}, + {"Uint64N", Func, 22}, + {"UintN", Func, 22}, + {"Zipf", Type, 22}, + }, + "mime": { + {"(*WordDecoder).Decode", Method, 5}, + {"(*WordDecoder).DecodeHeader", Method, 5}, + {"(WordEncoder).Encode", Method, 5}, + {"AddExtensionType", Func, 0}, + {"BEncoding", Const, 5}, + {"ErrInvalidMediaParameter", Var, 9}, + {"ExtensionsByType", Func, 5}, + {"FormatMediaType", Func, 0}, + {"ParseMediaType", Func, 0}, + {"QEncoding", Const, 5}, + {"TypeByExtension", Func, 0}, + {"WordDecoder", Type, 5}, + {"WordDecoder.CharsetReader", Field, 5}, + {"WordEncoder", Type, 5}, + }, + "mime/multipart": { + {"(*FileHeader).Open", Method, 0}, + {"(*Form).RemoveAll", Method, 0}, + {"(*Part).Close", Method, 0}, + {"(*Part).FileName", Method, 0}, + {"(*Part).FormName", Method, 0}, + {"(*Part).Read", Method, 0}, + {"(*Reader).NextPart", Method, 0}, + {"(*Reader).NextRawPart", Method, 14}, + {"(*Reader).ReadForm", Method, 0}, + {"(*Writer).Boundary", Method, 0}, + {"(*Writer).Close", Method, 0}, + {"(*Writer).CreateFormField", Method, 0}, + {"(*Writer).CreateFormFile", Method, 0}, + {"(*Writer).CreatePart", Method, 0}, + {"(*Writer).FormDataContentType", Method, 0}, + {"(*Writer).SetBoundary", Method, 1}, + {"(*Writer).WriteField", Method, 0}, + {"ErrMessageTooLarge", Var, 9}, + {"File", Type, 0}, + {"FileHeader", Type, 0}, + {"FileHeader.Filename", Field, 0}, + {"FileHeader.Header", Field, 0}, + {"FileHeader.Size", Field, 9}, + {"Form", Type, 0}, + {"Form.File", Field, 0}, + {"Form.Value", Field, 0}, + {"NewReader", Func, 0}, + {"NewWriter", Func, 0}, + {"Part", Type, 0}, + {"Part.Header", Field, 0}, + {"Reader", Type, 0}, + {"Writer", Type, 0}, + }, + "mime/quotedprintable": { + {"(*Reader).Read", Method, 5}, + {"(*Writer).Close", Method, 5}, + {"(*Writer).Write", Method, 5}, + {"NewReader", Func, 5}, + {"NewWriter", Func, 5}, + {"Reader", Type, 5}, + {"Writer", Type, 5}, + {"Writer.Binary", Field, 5}, + }, + "net": { + {"(*AddrError).Error", Method, 0}, + {"(*AddrError).Temporary", Method, 0}, + {"(*AddrError).Timeout", Method, 0}, + {"(*Buffers).Read", Method, 8}, + {"(*Buffers).WriteTo", Method, 8}, + {"(*DNSConfigError).Error", Method, 0}, + {"(*DNSConfigError).Temporary", Method, 0}, + {"(*DNSConfigError).Timeout", Method, 0}, + {"(*DNSConfigError).Unwrap", Method, 13}, + {"(*DNSError).Error", Method, 0}, + {"(*DNSError).Temporary", Method, 0}, + {"(*DNSError).Timeout", Method, 0}, + {"(*Dialer).Dial", Method, 1}, + {"(*Dialer).DialContext", Method, 7}, + {"(*Dialer).MultipathTCP", Method, 21}, + {"(*Dialer).SetMultipathTCP", Method, 21}, + {"(*IP).UnmarshalText", Method, 2}, + {"(*IPAddr).Network", Method, 0}, + {"(*IPAddr).String", Method, 0}, + {"(*IPConn).Close", Method, 0}, + {"(*IPConn).File", Method, 0}, + {"(*IPConn).LocalAddr", Method, 0}, + {"(*IPConn).Read", Method, 0}, + {"(*IPConn).ReadFrom", Method, 0}, + {"(*IPConn).ReadFromIP", Method, 0}, + {"(*IPConn).ReadMsgIP", Method, 1}, + {"(*IPConn).RemoteAddr", Method, 0}, + {"(*IPConn).SetDeadline", Method, 0}, + {"(*IPConn).SetReadBuffer", Method, 0}, + {"(*IPConn).SetReadDeadline", Method, 0}, + {"(*IPConn).SetWriteBuffer", Method, 0}, + {"(*IPConn).SetWriteDeadline", Method, 0}, + {"(*IPConn).SyscallConn", Method, 9}, + {"(*IPConn).Write", Method, 0}, + {"(*IPConn).WriteMsgIP", Method, 1}, + {"(*IPConn).WriteTo", Method, 0}, + {"(*IPConn).WriteToIP", Method, 0}, + {"(*IPNet).Contains", Method, 0}, + {"(*IPNet).Network", Method, 0}, + {"(*IPNet).String", Method, 0}, + {"(*Interface).Addrs", Method, 0}, + {"(*Interface).MulticastAddrs", Method, 0}, + {"(*ListenConfig).Listen", Method, 11}, + {"(*ListenConfig).ListenPacket", Method, 11}, + {"(*ListenConfig).MultipathTCP", Method, 21}, + {"(*ListenConfig).SetMultipathTCP", Method, 21}, + {"(*OpError).Error", Method, 0}, + {"(*OpError).Temporary", Method, 0}, + {"(*OpError).Timeout", Method, 0}, + {"(*OpError).Unwrap", Method, 13}, + {"(*ParseError).Error", Method, 0}, + {"(*ParseError).Temporary", Method, 17}, + {"(*ParseError).Timeout", Method, 17}, + {"(*Resolver).LookupAddr", Method, 8}, + {"(*Resolver).LookupCNAME", Method, 8}, + {"(*Resolver).LookupHost", Method, 8}, + {"(*Resolver).LookupIP", Method, 15}, + {"(*Resolver).LookupIPAddr", Method, 8}, + {"(*Resolver).LookupMX", Method, 8}, + {"(*Resolver).LookupNS", Method, 8}, + {"(*Resolver).LookupNetIP", Method, 18}, + {"(*Resolver).LookupPort", Method, 8}, + {"(*Resolver).LookupSRV", Method, 8}, + {"(*Resolver).LookupTXT", Method, 8}, + {"(*TCPAddr).AddrPort", Method, 18}, + {"(*TCPAddr).Network", Method, 0}, + {"(*TCPAddr).String", Method, 0}, + {"(*TCPConn).Close", Method, 0}, + {"(*TCPConn).CloseRead", Method, 0}, + {"(*TCPConn).CloseWrite", Method, 0}, + {"(*TCPConn).File", Method, 0}, + {"(*TCPConn).LocalAddr", Method, 0}, + {"(*TCPConn).MultipathTCP", Method, 21}, + {"(*TCPConn).Read", Method, 0}, + {"(*TCPConn).ReadFrom", Method, 0}, + {"(*TCPConn).RemoteAddr", Method, 0}, + {"(*TCPConn).SetDeadline", Method, 0}, + {"(*TCPConn).SetKeepAlive", Method, 0}, + {"(*TCPConn).SetKeepAlivePeriod", Method, 2}, + {"(*TCPConn).SetLinger", Method, 0}, + {"(*TCPConn).SetNoDelay", Method, 0}, + {"(*TCPConn).SetReadBuffer", Method, 0}, + {"(*TCPConn).SetReadDeadline", Method, 0}, + {"(*TCPConn).SetWriteBuffer", Method, 0}, + {"(*TCPConn).SetWriteDeadline", Method, 0}, + {"(*TCPConn).SyscallConn", Method, 9}, + {"(*TCPConn).Write", Method, 0}, + {"(*TCPConn).WriteTo", Method, 22}, + {"(*TCPListener).Accept", Method, 0}, + {"(*TCPListener).AcceptTCP", Method, 0}, + {"(*TCPListener).Addr", Method, 0}, + {"(*TCPListener).Close", Method, 0}, + {"(*TCPListener).File", Method, 0}, + {"(*TCPListener).SetDeadline", Method, 0}, + {"(*TCPListener).SyscallConn", Method, 10}, + {"(*UDPAddr).AddrPort", Method, 18}, + {"(*UDPAddr).Network", Method, 0}, + {"(*UDPAddr).String", Method, 0}, + {"(*UDPConn).Close", Method, 0}, + {"(*UDPConn).File", Method, 0}, + {"(*UDPConn).LocalAddr", Method, 0}, + {"(*UDPConn).Read", Method, 0}, + {"(*UDPConn).ReadFrom", Method, 0}, + {"(*UDPConn).ReadFromUDP", Method, 0}, + {"(*UDPConn).ReadFromUDPAddrPort", Method, 18}, + {"(*UDPConn).ReadMsgUDP", Method, 1}, + {"(*UDPConn).ReadMsgUDPAddrPort", Method, 18}, + {"(*UDPConn).RemoteAddr", Method, 0}, + {"(*UDPConn).SetDeadline", Method, 0}, + {"(*UDPConn).SetReadBuffer", Method, 0}, + {"(*UDPConn).SetReadDeadline", Method, 0}, + {"(*UDPConn).SetWriteBuffer", Method, 0}, + {"(*UDPConn).SetWriteDeadline", Method, 0}, + {"(*UDPConn).SyscallConn", Method, 9}, + {"(*UDPConn).Write", Method, 0}, + {"(*UDPConn).WriteMsgUDP", Method, 1}, + {"(*UDPConn).WriteMsgUDPAddrPort", Method, 18}, + {"(*UDPConn).WriteTo", Method, 0}, + {"(*UDPConn).WriteToUDP", Method, 0}, + {"(*UDPConn).WriteToUDPAddrPort", Method, 18}, + {"(*UnixAddr).Network", Method, 0}, + {"(*UnixAddr).String", Method, 0}, + {"(*UnixConn).Close", Method, 0}, + {"(*UnixConn).CloseRead", Method, 1}, + {"(*UnixConn).CloseWrite", Method, 1}, + {"(*UnixConn).File", Method, 0}, + {"(*UnixConn).LocalAddr", Method, 0}, + {"(*UnixConn).Read", Method, 0}, + {"(*UnixConn).ReadFrom", Method, 0}, + {"(*UnixConn).ReadFromUnix", Method, 0}, + {"(*UnixConn).ReadMsgUnix", Method, 0}, + {"(*UnixConn).RemoteAddr", Method, 0}, + {"(*UnixConn).SetDeadline", Method, 0}, + {"(*UnixConn).SetReadBuffer", Method, 0}, + {"(*UnixConn).SetReadDeadline", Method, 0}, + {"(*UnixConn).SetWriteBuffer", Method, 0}, + {"(*UnixConn).SetWriteDeadline", Method, 0}, + {"(*UnixConn).SyscallConn", Method, 9}, + {"(*UnixConn).Write", Method, 0}, + {"(*UnixConn).WriteMsgUnix", Method, 0}, + {"(*UnixConn).WriteTo", Method, 0}, + {"(*UnixConn).WriteToUnix", Method, 0}, + {"(*UnixListener).Accept", Method, 0}, + {"(*UnixListener).AcceptUnix", Method, 0}, + {"(*UnixListener).Addr", Method, 0}, + {"(*UnixListener).Close", Method, 0}, + {"(*UnixListener).File", Method, 0}, + {"(*UnixListener).SetDeadline", Method, 0}, + {"(*UnixListener).SetUnlinkOnClose", Method, 8}, + {"(*UnixListener).SyscallConn", Method, 10}, + {"(Flags).String", Method, 0}, + {"(HardwareAddr).String", Method, 0}, + {"(IP).DefaultMask", Method, 0}, + {"(IP).Equal", Method, 0}, + {"(IP).IsGlobalUnicast", Method, 0}, + {"(IP).IsInterfaceLocalMulticast", Method, 0}, + {"(IP).IsLinkLocalMulticast", Method, 0}, + {"(IP).IsLinkLocalUnicast", Method, 0}, + {"(IP).IsLoopback", Method, 0}, + {"(IP).IsMulticast", Method, 0}, + {"(IP).IsPrivate", Method, 17}, + {"(IP).IsUnspecified", Method, 0}, + {"(IP).MarshalText", Method, 2}, + {"(IP).Mask", Method, 0}, + {"(IP).String", Method, 0}, + {"(IP).To16", Method, 0}, + {"(IP).To4", Method, 0}, + {"(IPMask).Size", Method, 0}, + {"(IPMask).String", Method, 0}, + {"(InvalidAddrError).Error", Method, 0}, + {"(InvalidAddrError).Temporary", Method, 0}, + {"(InvalidAddrError).Timeout", Method, 0}, + {"(UnknownNetworkError).Error", Method, 0}, + {"(UnknownNetworkError).Temporary", Method, 0}, + {"(UnknownNetworkError).Timeout", Method, 0}, + {"Addr", Type, 0}, + {"AddrError", Type, 0}, + {"AddrError.Addr", Field, 0}, + {"AddrError.Err", Field, 0}, + {"Buffers", Type, 8}, + {"CIDRMask", Func, 0}, + {"Conn", Type, 0}, + {"DNSConfigError", Type, 0}, + {"DNSConfigError.Err", Field, 0}, + {"DNSError", Type, 0}, + {"DNSError.Err", Field, 0}, + {"DNSError.IsNotFound", Field, 13}, + {"DNSError.IsTemporary", Field, 6}, + {"DNSError.IsTimeout", Field, 0}, + {"DNSError.Name", Field, 0}, + {"DNSError.Server", Field, 0}, + {"DefaultResolver", Var, 8}, + {"Dial", Func, 0}, + {"DialIP", Func, 0}, + {"DialTCP", Func, 0}, + {"DialTimeout", Func, 0}, + {"DialUDP", Func, 0}, + {"DialUnix", Func, 0}, + {"Dialer", Type, 1}, + {"Dialer.Cancel", Field, 6}, + {"Dialer.Control", Field, 11}, + {"Dialer.ControlContext", Field, 20}, + {"Dialer.Deadline", Field, 1}, + {"Dialer.DualStack", Field, 2}, + {"Dialer.FallbackDelay", Field, 5}, + {"Dialer.KeepAlive", Field, 3}, + {"Dialer.LocalAddr", Field, 1}, + {"Dialer.Resolver", Field, 8}, + {"Dialer.Timeout", Field, 1}, + {"ErrClosed", Var, 16}, + {"ErrWriteToConnected", Var, 0}, + {"Error", Type, 0}, + {"FileConn", Func, 0}, + {"FileListener", Func, 0}, + {"FilePacketConn", Func, 0}, + {"FlagBroadcast", Const, 0}, + {"FlagLoopback", Const, 0}, + {"FlagMulticast", Const, 0}, + {"FlagPointToPoint", Const, 0}, + {"FlagRunning", Const, 20}, + {"FlagUp", Const, 0}, + {"Flags", Type, 0}, + {"HardwareAddr", Type, 0}, + {"IP", Type, 0}, + {"IPAddr", Type, 0}, + {"IPAddr.IP", Field, 0}, + {"IPAddr.Zone", Field, 1}, + {"IPConn", Type, 0}, + {"IPMask", Type, 0}, + {"IPNet", Type, 0}, + {"IPNet.IP", Field, 0}, + {"IPNet.Mask", Field, 0}, + {"IPv4", Func, 0}, + {"IPv4Mask", Func, 0}, + {"IPv4allrouter", Var, 0}, + {"IPv4allsys", Var, 0}, + {"IPv4bcast", Var, 0}, + {"IPv4len", Const, 0}, + {"IPv4zero", Var, 0}, + {"IPv6interfacelocalallnodes", Var, 0}, + {"IPv6len", Const, 0}, + {"IPv6linklocalallnodes", Var, 0}, + {"IPv6linklocalallrouters", Var, 0}, + {"IPv6loopback", Var, 0}, + {"IPv6unspecified", Var, 0}, + {"IPv6zero", Var, 0}, + {"Interface", Type, 0}, + {"Interface.Flags", Field, 0}, + {"Interface.HardwareAddr", Field, 0}, + {"Interface.Index", Field, 0}, + {"Interface.MTU", Field, 0}, + {"Interface.Name", Field, 0}, + {"InterfaceAddrs", Func, 0}, + {"InterfaceByIndex", Func, 0}, + {"InterfaceByName", Func, 0}, + {"Interfaces", Func, 0}, + {"InvalidAddrError", Type, 0}, + {"JoinHostPort", Func, 0}, + {"Listen", Func, 0}, + {"ListenConfig", Type, 11}, + {"ListenConfig.Control", Field, 11}, + {"ListenConfig.KeepAlive", Field, 13}, + {"ListenIP", Func, 0}, + {"ListenMulticastUDP", Func, 0}, + {"ListenPacket", Func, 0}, + {"ListenTCP", Func, 0}, + {"ListenUDP", Func, 0}, + {"ListenUnix", Func, 0}, + {"ListenUnixgram", Func, 0}, + {"Listener", Type, 0}, + {"LookupAddr", Func, 0}, + {"LookupCNAME", Func, 0}, + {"LookupHost", Func, 0}, + {"LookupIP", Func, 0}, + {"LookupMX", Func, 0}, + {"LookupNS", Func, 1}, + {"LookupPort", Func, 0}, + {"LookupSRV", Func, 0}, + {"LookupTXT", Func, 0}, + {"MX", Type, 0}, + {"MX.Host", Field, 0}, + {"MX.Pref", Field, 0}, + {"NS", Type, 1}, + {"NS.Host", Field, 1}, + {"OpError", Type, 0}, + {"OpError.Addr", Field, 0}, + {"OpError.Err", Field, 0}, + {"OpError.Net", Field, 0}, + {"OpError.Op", Field, 0}, + {"OpError.Source", Field, 5}, + {"PacketConn", Type, 0}, + {"ParseCIDR", Func, 0}, + {"ParseError", Type, 0}, + {"ParseError.Text", Field, 0}, + {"ParseError.Type", Field, 0}, + {"ParseIP", Func, 0}, + {"ParseMAC", Func, 0}, + {"Pipe", Func, 0}, + {"ResolveIPAddr", Func, 0}, + {"ResolveTCPAddr", Func, 0}, + {"ResolveUDPAddr", Func, 0}, + {"ResolveUnixAddr", Func, 0}, + {"Resolver", Type, 8}, + {"Resolver.Dial", Field, 9}, + {"Resolver.PreferGo", Field, 8}, + {"Resolver.StrictErrors", Field, 9}, + {"SRV", Type, 0}, + {"SRV.Port", Field, 0}, + {"SRV.Priority", Field, 0}, + {"SRV.Target", Field, 0}, + {"SRV.Weight", Field, 0}, + {"SplitHostPort", Func, 0}, + {"TCPAddr", Type, 0}, + {"TCPAddr.IP", Field, 0}, + {"TCPAddr.Port", Field, 0}, + {"TCPAddr.Zone", Field, 1}, + {"TCPAddrFromAddrPort", Func, 18}, + {"TCPConn", Type, 0}, + {"TCPListener", Type, 0}, + {"UDPAddr", Type, 0}, + {"UDPAddr.IP", Field, 0}, + {"UDPAddr.Port", Field, 0}, + {"UDPAddr.Zone", Field, 1}, + {"UDPAddrFromAddrPort", Func, 18}, + {"UDPConn", Type, 0}, + {"UnixAddr", Type, 0}, + {"UnixAddr.Name", Field, 0}, + {"UnixAddr.Net", Field, 0}, + {"UnixConn", Type, 0}, + {"UnixListener", Type, 0}, + {"UnknownNetworkError", Type, 0}, + }, + "net/http": { + {"(*Client).CloseIdleConnections", Method, 12}, + {"(*Client).Do", Method, 0}, + {"(*Client).Get", Method, 0}, + {"(*Client).Head", Method, 0}, + {"(*Client).Post", Method, 0}, + {"(*Client).PostForm", Method, 0}, + {"(*Cookie).String", Method, 0}, + {"(*Cookie).Valid", Method, 18}, + {"(*MaxBytesError).Error", Method, 19}, + {"(*ProtocolError).Error", Method, 0}, + {"(*ProtocolError).Is", Method, 21}, + {"(*Request).AddCookie", Method, 0}, + {"(*Request).BasicAuth", Method, 4}, + {"(*Request).Clone", Method, 13}, + {"(*Request).Context", Method, 7}, + {"(*Request).Cookie", Method, 0}, + {"(*Request).Cookies", Method, 0}, + {"(*Request).FormFile", Method, 0}, + {"(*Request).FormValue", Method, 0}, + {"(*Request).MultipartReader", Method, 0}, + {"(*Request).ParseForm", Method, 0}, + {"(*Request).ParseMultipartForm", Method, 0}, + {"(*Request).PathValue", Method, 22}, + {"(*Request).PostFormValue", Method, 1}, + {"(*Request).ProtoAtLeast", Method, 0}, + {"(*Request).Referer", Method, 0}, + {"(*Request).SetBasicAuth", Method, 0}, + {"(*Request).SetPathValue", Method, 22}, + {"(*Request).UserAgent", Method, 0}, + {"(*Request).WithContext", Method, 7}, + {"(*Request).Write", Method, 0}, + {"(*Request).WriteProxy", Method, 0}, + {"(*Response).Cookies", Method, 0}, + {"(*Response).Location", Method, 0}, + {"(*Response).ProtoAtLeast", Method, 0}, + {"(*Response).Write", Method, 0}, + {"(*ResponseController).EnableFullDuplex", Method, 21}, + {"(*ResponseController).Flush", Method, 20}, + {"(*ResponseController).Hijack", Method, 20}, + {"(*ResponseController).SetReadDeadline", Method, 20}, + {"(*ResponseController).SetWriteDeadline", Method, 20}, + {"(*ServeMux).Handle", Method, 0}, + {"(*ServeMux).HandleFunc", Method, 0}, + {"(*ServeMux).Handler", Method, 1}, + {"(*ServeMux).ServeHTTP", Method, 0}, + {"(*Server).Close", Method, 8}, + {"(*Server).ListenAndServe", Method, 0}, + {"(*Server).ListenAndServeTLS", Method, 0}, + {"(*Server).RegisterOnShutdown", Method, 9}, + {"(*Server).Serve", Method, 0}, + {"(*Server).ServeTLS", Method, 9}, + {"(*Server).SetKeepAlivesEnabled", Method, 3}, + {"(*Server).Shutdown", Method, 8}, + {"(*Transport).CancelRequest", Method, 1}, + {"(*Transport).Clone", Method, 13}, + {"(*Transport).CloseIdleConnections", Method, 0}, + {"(*Transport).RegisterProtocol", Method, 0}, + {"(*Transport).RoundTrip", Method, 0}, + {"(ConnState).String", Method, 3}, + {"(Dir).Open", Method, 0}, + {"(HandlerFunc).ServeHTTP", Method, 0}, + {"(Header).Add", Method, 0}, + {"(Header).Clone", Method, 13}, + {"(Header).Del", Method, 0}, + {"(Header).Get", Method, 0}, + {"(Header).Set", Method, 0}, + {"(Header).Values", Method, 14}, + {"(Header).Write", Method, 0}, + {"(Header).WriteSubset", Method, 0}, + {"AllowQuerySemicolons", Func, 17}, + {"CanonicalHeaderKey", Func, 0}, + {"Client", Type, 0}, + {"Client.CheckRedirect", Field, 0}, + {"Client.Jar", Field, 0}, + {"Client.Timeout", Field, 3}, + {"Client.Transport", Field, 0}, + {"CloseNotifier", Type, 1}, + {"ConnState", Type, 3}, + {"Cookie", Type, 0}, + {"Cookie.Domain", Field, 0}, + {"Cookie.Expires", Field, 0}, + {"Cookie.HttpOnly", Field, 0}, + {"Cookie.MaxAge", Field, 0}, + {"Cookie.Name", Field, 0}, + {"Cookie.Path", Field, 0}, + {"Cookie.Raw", Field, 0}, + {"Cookie.RawExpires", Field, 0}, + {"Cookie.SameSite", Field, 11}, + {"Cookie.Secure", Field, 0}, + {"Cookie.Unparsed", Field, 0}, + {"Cookie.Value", Field, 0}, + {"CookieJar", Type, 0}, + {"DefaultClient", Var, 0}, + {"DefaultMaxHeaderBytes", Const, 0}, + {"DefaultMaxIdleConnsPerHost", Const, 0}, + {"DefaultServeMux", Var, 0}, + {"DefaultTransport", Var, 0}, + {"DetectContentType", Func, 0}, + {"Dir", Type, 0}, + {"ErrAbortHandler", Var, 8}, + {"ErrBodyNotAllowed", Var, 0}, + {"ErrBodyReadAfterClose", Var, 0}, + {"ErrContentLength", Var, 0}, + {"ErrHandlerTimeout", Var, 0}, + {"ErrHeaderTooLong", Var, 0}, + {"ErrHijacked", Var, 0}, + {"ErrLineTooLong", Var, 0}, + {"ErrMissingBoundary", Var, 0}, + {"ErrMissingContentLength", Var, 0}, + {"ErrMissingFile", Var, 0}, + {"ErrNoCookie", Var, 0}, + {"ErrNoLocation", Var, 0}, + {"ErrNotMultipart", Var, 0}, + {"ErrNotSupported", Var, 0}, + {"ErrSchemeMismatch", Var, 21}, + {"ErrServerClosed", Var, 8}, + {"ErrShortBody", Var, 0}, + {"ErrSkipAltProtocol", Var, 6}, + {"ErrUnexpectedTrailer", Var, 0}, + {"ErrUseLastResponse", Var, 7}, + {"ErrWriteAfterFlush", Var, 0}, + {"Error", Func, 0}, + {"FS", Func, 16}, + {"File", Type, 0}, + {"FileServer", Func, 0}, + {"FileServerFS", Func, 22}, + {"FileSystem", Type, 0}, + {"Flusher", Type, 0}, + {"Get", Func, 0}, + {"Handle", Func, 0}, + {"HandleFunc", Func, 0}, + {"Handler", Type, 0}, + {"HandlerFunc", Type, 0}, + {"Head", Func, 0}, + {"Header", Type, 0}, + {"Hijacker", Type, 0}, + {"ListenAndServe", Func, 0}, + {"ListenAndServeTLS", Func, 0}, + {"LocalAddrContextKey", Var, 7}, + {"MaxBytesError", Type, 19}, + {"MaxBytesError.Limit", Field, 19}, + {"MaxBytesHandler", Func, 18}, + {"MaxBytesReader", Func, 0}, + {"MethodConnect", Const, 6}, + {"MethodDelete", Const, 6}, + {"MethodGet", Const, 6}, + {"MethodHead", Const, 6}, + {"MethodOptions", Const, 6}, + {"MethodPatch", Const, 6}, + {"MethodPost", Const, 6}, + {"MethodPut", Const, 6}, + {"MethodTrace", Const, 6}, + {"NewFileTransport", Func, 0}, + {"NewFileTransportFS", Func, 22}, + {"NewRequest", Func, 0}, + {"NewRequestWithContext", Func, 13}, + {"NewResponseController", Func, 20}, + {"NewServeMux", Func, 0}, + {"NoBody", Var, 8}, + {"NotFound", Func, 0}, + {"NotFoundHandler", Func, 0}, + {"ParseHTTPVersion", Func, 0}, + {"ParseTime", Func, 1}, + {"Post", Func, 0}, + {"PostForm", Func, 0}, + {"ProtocolError", Type, 0}, + {"ProtocolError.ErrorString", Field, 0}, + {"ProxyFromEnvironment", Func, 0}, + {"ProxyURL", Func, 0}, + {"PushOptions", Type, 8}, + {"PushOptions.Header", Field, 8}, + {"PushOptions.Method", Field, 8}, + {"Pusher", Type, 8}, + {"ReadRequest", Func, 0}, + {"ReadResponse", Func, 0}, + {"Redirect", Func, 0}, + {"RedirectHandler", Func, 0}, + {"Request", Type, 0}, + {"Request.Body", Field, 0}, + {"Request.Cancel", Field, 5}, + {"Request.Close", Field, 0}, + {"Request.ContentLength", Field, 0}, + {"Request.Form", Field, 0}, + {"Request.GetBody", Field, 8}, + {"Request.Header", Field, 0}, + {"Request.Host", Field, 0}, + {"Request.Method", Field, 0}, + {"Request.MultipartForm", Field, 0}, + {"Request.PostForm", Field, 1}, + {"Request.Proto", Field, 0}, + {"Request.ProtoMajor", Field, 0}, + {"Request.ProtoMinor", Field, 0}, + {"Request.RemoteAddr", Field, 0}, + {"Request.RequestURI", Field, 0}, + {"Request.Response", Field, 7}, + {"Request.TLS", Field, 0}, + {"Request.Trailer", Field, 0}, + {"Request.TransferEncoding", Field, 0}, + {"Request.URL", Field, 0}, + {"Response", Type, 0}, + {"Response.Body", Field, 0}, + {"Response.Close", Field, 0}, + {"Response.ContentLength", Field, 0}, + {"Response.Header", Field, 0}, + {"Response.Proto", Field, 0}, + {"Response.ProtoMajor", Field, 0}, + {"Response.ProtoMinor", Field, 0}, + {"Response.Request", Field, 0}, + {"Response.Status", Field, 0}, + {"Response.StatusCode", Field, 0}, + {"Response.TLS", Field, 3}, + {"Response.Trailer", Field, 0}, + {"Response.TransferEncoding", Field, 0}, + {"Response.Uncompressed", Field, 7}, + {"ResponseController", Type, 20}, + {"ResponseWriter", Type, 0}, + {"RoundTripper", Type, 0}, + {"SameSite", Type, 11}, + {"SameSiteDefaultMode", Const, 11}, + {"SameSiteLaxMode", Const, 11}, + {"SameSiteNoneMode", Const, 13}, + {"SameSiteStrictMode", Const, 11}, + {"Serve", Func, 0}, + {"ServeContent", Func, 0}, + {"ServeFile", Func, 0}, + {"ServeFileFS", Func, 22}, + {"ServeMux", Type, 0}, + {"ServeTLS", Func, 9}, + {"Server", Type, 0}, + {"Server.Addr", Field, 0}, + {"Server.BaseContext", Field, 13}, + {"Server.ConnContext", Field, 13}, + {"Server.ConnState", Field, 3}, + {"Server.DisableGeneralOptionsHandler", Field, 20}, + {"Server.ErrorLog", Field, 3}, + {"Server.Handler", Field, 0}, + {"Server.IdleTimeout", Field, 8}, + {"Server.MaxHeaderBytes", Field, 0}, + {"Server.ReadHeaderTimeout", Field, 8}, + {"Server.ReadTimeout", Field, 0}, + {"Server.TLSConfig", Field, 0}, + {"Server.TLSNextProto", Field, 1}, + {"Server.WriteTimeout", Field, 0}, + {"ServerContextKey", Var, 7}, + {"SetCookie", Func, 0}, + {"StateActive", Const, 3}, + {"StateClosed", Const, 3}, + {"StateHijacked", Const, 3}, + {"StateIdle", Const, 3}, + {"StateNew", Const, 3}, + {"StatusAccepted", Const, 0}, + {"StatusAlreadyReported", Const, 7}, + {"StatusBadGateway", Const, 0}, + {"StatusBadRequest", Const, 0}, + {"StatusConflict", Const, 0}, + {"StatusContinue", Const, 0}, + {"StatusCreated", Const, 0}, + {"StatusEarlyHints", Const, 13}, + {"StatusExpectationFailed", Const, 0}, + {"StatusFailedDependency", Const, 7}, + {"StatusForbidden", Const, 0}, + {"StatusFound", Const, 0}, + {"StatusGatewayTimeout", Const, 0}, + {"StatusGone", Const, 0}, + {"StatusHTTPVersionNotSupported", Const, 0}, + {"StatusIMUsed", Const, 7}, + {"StatusInsufficientStorage", Const, 7}, + {"StatusInternalServerError", Const, 0}, + {"StatusLengthRequired", Const, 0}, + {"StatusLocked", Const, 7}, + {"StatusLoopDetected", Const, 7}, + {"StatusMethodNotAllowed", Const, 0}, + {"StatusMisdirectedRequest", Const, 11}, + {"StatusMovedPermanently", Const, 0}, + {"StatusMultiStatus", Const, 7}, + {"StatusMultipleChoices", Const, 0}, + {"StatusNetworkAuthenticationRequired", Const, 6}, + {"StatusNoContent", Const, 0}, + {"StatusNonAuthoritativeInfo", Const, 0}, + {"StatusNotAcceptable", Const, 0}, + {"StatusNotExtended", Const, 7}, + {"StatusNotFound", Const, 0}, + {"StatusNotImplemented", Const, 0}, + {"StatusNotModified", Const, 0}, + {"StatusOK", Const, 0}, + {"StatusPartialContent", Const, 0}, + {"StatusPaymentRequired", Const, 0}, + {"StatusPermanentRedirect", Const, 7}, + {"StatusPreconditionFailed", Const, 0}, + {"StatusPreconditionRequired", Const, 6}, + {"StatusProcessing", Const, 7}, + {"StatusProxyAuthRequired", Const, 0}, + {"StatusRequestEntityTooLarge", Const, 0}, + {"StatusRequestHeaderFieldsTooLarge", Const, 6}, + {"StatusRequestTimeout", Const, 0}, + {"StatusRequestURITooLong", Const, 0}, + {"StatusRequestedRangeNotSatisfiable", Const, 0}, + {"StatusResetContent", Const, 0}, + {"StatusSeeOther", Const, 0}, + {"StatusServiceUnavailable", Const, 0}, + {"StatusSwitchingProtocols", Const, 0}, + {"StatusTeapot", Const, 0}, + {"StatusTemporaryRedirect", Const, 0}, + {"StatusText", Func, 0}, + {"StatusTooEarly", Const, 12}, + {"StatusTooManyRequests", Const, 6}, + {"StatusUnauthorized", Const, 0}, + {"StatusUnavailableForLegalReasons", Const, 6}, + {"StatusUnprocessableEntity", Const, 7}, + {"StatusUnsupportedMediaType", Const, 0}, + {"StatusUpgradeRequired", Const, 7}, + {"StatusUseProxy", Const, 0}, + {"StatusVariantAlsoNegotiates", Const, 7}, + {"StripPrefix", Func, 0}, + {"TimeFormat", Const, 0}, + {"TimeoutHandler", Func, 0}, + {"TrailerPrefix", Const, 8}, + {"Transport", Type, 0}, + {"Transport.Dial", Field, 0}, + {"Transport.DialContext", Field, 7}, + {"Transport.DialTLS", Field, 4}, + {"Transport.DialTLSContext", Field, 14}, + {"Transport.DisableCompression", Field, 0}, + {"Transport.DisableKeepAlives", Field, 0}, + {"Transport.ExpectContinueTimeout", Field, 6}, + {"Transport.ForceAttemptHTTP2", Field, 13}, + {"Transport.GetProxyConnectHeader", Field, 16}, + {"Transport.IdleConnTimeout", Field, 7}, + {"Transport.MaxConnsPerHost", Field, 11}, + {"Transport.MaxIdleConns", Field, 7}, + {"Transport.MaxIdleConnsPerHost", Field, 0}, + {"Transport.MaxResponseHeaderBytes", Field, 7}, + {"Transport.OnProxyConnectResponse", Field, 20}, + {"Transport.Proxy", Field, 0}, + {"Transport.ProxyConnectHeader", Field, 8}, + {"Transport.ReadBufferSize", Field, 13}, + {"Transport.ResponseHeaderTimeout", Field, 1}, + {"Transport.TLSClientConfig", Field, 0}, + {"Transport.TLSHandshakeTimeout", Field, 3}, + {"Transport.TLSNextProto", Field, 6}, + {"Transport.WriteBufferSize", Field, 13}, + }, + "net/http/cgi": { + {"(*Handler).ServeHTTP", Method, 0}, + {"Handler", Type, 0}, + {"Handler.Args", Field, 0}, + {"Handler.Dir", Field, 0}, + {"Handler.Env", Field, 0}, + {"Handler.InheritEnv", Field, 0}, + {"Handler.Logger", Field, 0}, + {"Handler.Path", Field, 0}, + {"Handler.PathLocationHandler", Field, 0}, + {"Handler.Root", Field, 0}, + {"Handler.Stderr", Field, 7}, + {"Request", Func, 0}, + {"RequestFromMap", Func, 0}, + {"Serve", Func, 0}, + }, + "net/http/cookiejar": { + {"(*Jar).Cookies", Method, 1}, + {"(*Jar).SetCookies", Method, 1}, + {"Jar", Type, 1}, + {"New", Func, 1}, + {"Options", Type, 1}, + {"Options.PublicSuffixList", Field, 1}, + {"PublicSuffixList", Type, 1}, + }, + "net/http/fcgi": { + {"ErrConnClosed", Var, 5}, + {"ErrRequestAborted", Var, 5}, + {"ProcessEnv", Func, 9}, + {"Serve", Func, 0}, + }, + "net/http/httptest": { + {"(*ResponseRecorder).Flush", Method, 0}, + {"(*ResponseRecorder).Header", Method, 0}, + {"(*ResponseRecorder).Result", Method, 7}, + {"(*ResponseRecorder).Write", Method, 0}, + {"(*ResponseRecorder).WriteHeader", Method, 0}, + {"(*ResponseRecorder).WriteString", Method, 6}, + {"(*Server).Certificate", Method, 9}, + {"(*Server).Client", Method, 9}, + {"(*Server).Close", Method, 0}, + {"(*Server).CloseClientConnections", Method, 0}, + {"(*Server).Start", Method, 0}, + {"(*Server).StartTLS", Method, 0}, + {"DefaultRemoteAddr", Const, 0}, + {"NewRecorder", Func, 0}, + {"NewRequest", Func, 7}, + {"NewServer", Func, 0}, + {"NewTLSServer", Func, 0}, + {"NewUnstartedServer", Func, 0}, + {"ResponseRecorder", Type, 0}, + {"ResponseRecorder.Body", Field, 0}, + {"ResponseRecorder.Code", Field, 0}, + {"ResponseRecorder.Flushed", Field, 0}, + {"ResponseRecorder.HeaderMap", Field, 0}, + {"Server", Type, 0}, + {"Server.Config", Field, 0}, + {"Server.EnableHTTP2", Field, 14}, + {"Server.Listener", Field, 0}, + {"Server.TLS", Field, 0}, + {"Server.URL", Field, 0}, + }, + "net/http/httptrace": { + {"ClientTrace", Type, 7}, + {"ClientTrace.ConnectDone", Field, 7}, + {"ClientTrace.ConnectStart", Field, 7}, + {"ClientTrace.DNSDone", Field, 7}, + {"ClientTrace.DNSStart", Field, 7}, + {"ClientTrace.GetConn", Field, 7}, + {"ClientTrace.Got100Continue", Field, 7}, + {"ClientTrace.Got1xxResponse", Field, 11}, + {"ClientTrace.GotConn", Field, 7}, + {"ClientTrace.GotFirstResponseByte", Field, 7}, + {"ClientTrace.PutIdleConn", Field, 7}, + {"ClientTrace.TLSHandshakeDone", Field, 8}, + {"ClientTrace.TLSHandshakeStart", Field, 8}, + {"ClientTrace.Wait100Continue", Field, 7}, + {"ClientTrace.WroteHeaderField", Field, 11}, + {"ClientTrace.WroteHeaders", Field, 7}, + {"ClientTrace.WroteRequest", Field, 7}, + {"ContextClientTrace", Func, 7}, + {"DNSDoneInfo", Type, 7}, + {"DNSDoneInfo.Addrs", Field, 7}, + {"DNSDoneInfo.Coalesced", Field, 7}, + {"DNSDoneInfo.Err", Field, 7}, + {"DNSStartInfo", Type, 7}, + {"DNSStartInfo.Host", Field, 7}, + {"GotConnInfo", Type, 7}, + {"GotConnInfo.Conn", Field, 7}, + {"GotConnInfo.IdleTime", Field, 7}, + {"GotConnInfo.Reused", Field, 7}, + {"GotConnInfo.WasIdle", Field, 7}, + {"WithClientTrace", Func, 7}, + {"WroteRequestInfo", Type, 7}, + {"WroteRequestInfo.Err", Field, 7}, + }, + "net/http/httputil": { + {"(*ClientConn).Close", Method, 0}, + {"(*ClientConn).Do", Method, 0}, + {"(*ClientConn).Hijack", Method, 0}, + {"(*ClientConn).Pending", Method, 0}, + {"(*ClientConn).Read", Method, 0}, + {"(*ClientConn).Write", Method, 0}, + {"(*ProxyRequest).SetURL", Method, 20}, + {"(*ProxyRequest).SetXForwarded", Method, 20}, + {"(*ReverseProxy).ServeHTTP", Method, 0}, + {"(*ServerConn).Close", Method, 0}, + {"(*ServerConn).Hijack", Method, 0}, + {"(*ServerConn).Pending", Method, 0}, + {"(*ServerConn).Read", Method, 0}, + {"(*ServerConn).Write", Method, 0}, + {"BufferPool", Type, 6}, + {"ClientConn", Type, 0}, + {"DumpRequest", Func, 0}, + {"DumpRequestOut", Func, 0}, + {"DumpResponse", Func, 0}, + {"ErrClosed", Var, 0}, + {"ErrLineTooLong", Var, 0}, + {"ErrPersistEOF", Var, 0}, + {"ErrPipeline", Var, 0}, + {"NewChunkedReader", Func, 0}, + {"NewChunkedWriter", Func, 0}, + {"NewClientConn", Func, 0}, + {"NewProxyClientConn", Func, 0}, + {"NewServerConn", Func, 0}, + {"NewSingleHostReverseProxy", Func, 0}, + {"ProxyRequest", Type, 20}, + {"ProxyRequest.In", Field, 20}, + {"ProxyRequest.Out", Field, 20}, + {"ReverseProxy", Type, 0}, + {"ReverseProxy.BufferPool", Field, 6}, + {"ReverseProxy.Director", Field, 0}, + {"ReverseProxy.ErrorHandler", Field, 11}, + {"ReverseProxy.ErrorLog", Field, 4}, + {"ReverseProxy.FlushInterval", Field, 0}, + {"ReverseProxy.ModifyResponse", Field, 8}, + {"ReverseProxy.Rewrite", Field, 20}, + {"ReverseProxy.Transport", Field, 0}, + {"ServerConn", Type, 0}, + }, + "net/http/pprof": { + {"Cmdline", Func, 0}, + {"Handler", Func, 0}, + {"Index", Func, 0}, + {"Profile", Func, 0}, + {"Symbol", Func, 0}, + {"Trace", Func, 5}, + }, + "net/mail": { + {"(*Address).String", Method, 0}, + {"(*AddressParser).Parse", Method, 5}, + {"(*AddressParser).ParseList", Method, 5}, + {"(Header).AddressList", Method, 0}, + {"(Header).Date", Method, 0}, + {"(Header).Get", Method, 0}, + {"Address", Type, 0}, + {"Address.Address", Field, 0}, + {"Address.Name", Field, 0}, + {"AddressParser", Type, 5}, + {"AddressParser.WordDecoder", Field, 5}, + {"ErrHeaderNotPresent", Var, 0}, + {"Header", Type, 0}, + {"Message", Type, 0}, + {"Message.Body", Field, 0}, + {"Message.Header", Field, 0}, + {"ParseAddress", Func, 1}, + {"ParseAddressList", Func, 1}, + {"ParseDate", Func, 8}, + {"ReadMessage", Func, 0}, + }, + "net/netip": { + {"(*Addr).UnmarshalBinary", Method, 18}, + {"(*Addr).UnmarshalText", Method, 18}, + {"(*AddrPort).UnmarshalBinary", Method, 18}, + {"(*AddrPort).UnmarshalText", Method, 18}, + {"(*Prefix).UnmarshalBinary", Method, 18}, + {"(*Prefix).UnmarshalText", Method, 18}, + {"(Addr).AppendTo", Method, 18}, + {"(Addr).As16", Method, 18}, + {"(Addr).As4", Method, 18}, + {"(Addr).AsSlice", Method, 18}, + {"(Addr).BitLen", Method, 18}, + {"(Addr).Compare", Method, 18}, + {"(Addr).Is4", Method, 18}, + {"(Addr).Is4In6", Method, 18}, + {"(Addr).Is6", Method, 18}, + {"(Addr).IsGlobalUnicast", Method, 18}, + {"(Addr).IsInterfaceLocalMulticast", Method, 18}, + {"(Addr).IsLinkLocalMulticast", Method, 18}, + {"(Addr).IsLinkLocalUnicast", Method, 18}, + {"(Addr).IsLoopback", Method, 18}, + {"(Addr).IsMulticast", Method, 18}, + {"(Addr).IsPrivate", Method, 18}, + {"(Addr).IsUnspecified", Method, 18}, + {"(Addr).IsValid", Method, 18}, + {"(Addr).Less", Method, 18}, + {"(Addr).MarshalBinary", Method, 18}, + {"(Addr).MarshalText", Method, 18}, + {"(Addr).Next", Method, 18}, + {"(Addr).Prefix", Method, 18}, + {"(Addr).Prev", Method, 18}, + {"(Addr).String", Method, 18}, + {"(Addr).StringExpanded", Method, 18}, + {"(Addr).Unmap", Method, 18}, + {"(Addr).WithZone", Method, 18}, + {"(Addr).Zone", Method, 18}, + {"(AddrPort).Addr", Method, 18}, + {"(AddrPort).AppendTo", Method, 18}, + {"(AddrPort).Compare", Method, 22}, + {"(AddrPort).IsValid", Method, 18}, + {"(AddrPort).MarshalBinary", Method, 18}, + {"(AddrPort).MarshalText", Method, 18}, + {"(AddrPort).Port", Method, 18}, + {"(AddrPort).String", Method, 18}, + {"(Prefix).Addr", Method, 18}, + {"(Prefix).AppendTo", Method, 18}, + {"(Prefix).Bits", Method, 18}, + {"(Prefix).Contains", Method, 18}, + {"(Prefix).IsSingleIP", Method, 18}, + {"(Prefix).IsValid", Method, 18}, + {"(Prefix).MarshalBinary", Method, 18}, + {"(Prefix).MarshalText", Method, 18}, + {"(Prefix).Masked", Method, 18}, + {"(Prefix).Overlaps", Method, 18}, + {"(Prefix).String", Method, 18}, + {"Addr", Type, 18}, + {"AddrFrom16", Func, 18}, + {"AddrFrom4", Func, 18}, + {"AddrFromSlice", Func, 18}, + {"AddrPort", Type, 18}, + {"AddrPortFrom", Func, 18}, + {"IPv4Unspecified", Func, 18}, + {"IPv6LinkLocalAllNodes", Func, 18}, + {"IPv6LinkLocalAllRouters", Func, 20}, + {"IPv6Loopback", Func, 20}, + {"IPv6Unspecified", Func, 18}, + {"MustParseAddr", Func, 18}, + {"MustParseAddrPort", Func, 18}, + {"MustParsePrefix", Func, 18}, + {"ParseAddr", Func, 18}, + {"ParseAddrPort", Func, 18}, + {"ParsePrefix", Func, 18}, + {"Prefix", Type, 18}, + {"PrefixFrom", Func, 18}, + }, + "net/rpc": { + {"(*Client).Call", Method, 0}, + {"(*Client).Close", Method, 0}, + {"(*Client).Go", Method, 0}, + {"(*Server).Accept", Method, 0}, + {"(*Server).HandleHTTP", Method, 0}, + {"(*Server).Register", Method, 0}, + {"(*Server).RegisterName", Method, 0}, + {"(*Server).ServeCodec", Method, 0}, + {"(*Server).ServeConn", Method, 0}, + {"(*Server).ServeHTTP", Method, 0}, + {"(*Server).ServeRequest", Method, 0}, + {"(ServerError).Error", Method, 0}, + {"Accept", Func, 0}, + {"Call", Type, 0}, + {"Call.Args", Field, 0}, + {"Call.Done", Field, 0}, + {"Call.Error", Field, 0}, + {"Call.Reply", Field, 0}, + {"Call.ServiceMethod", Field, 0}, + {"Client", Type, 0}, + {"ClientCodec", Type, 0}, + {"DefaultDebugPath", Const, 0}, + {"DefaultRPCPath", Const, 0}, + {"DefaultServer", Var, 0}, + {"Dial", Func, 0}, + {"DialHTTP", Func, 0}, + {"DialHTTPPath", Func, 0}, + {"ErrShutdown", Var, 0}, + {"HandleHTTP", Func, 0}, + {"NewClient", Func, 0}, + {"NewClientWithCodec", Func, 0}, + {"NewServer", Func, 0}, + {"Register", Func, 0}, + {"RegisterName", Func, 0}, + {"Request", Type, 0}, + {"Request.Seq", Field, 0}, + {"Request.ServiceMethod", Field, 0}, + {"Response", Type, 0}, + {"Response.Error", Field, 0}, + {"Response.Seq", Field, 0}, + {"Response.ServiceMethod", Field, 0}, + {"ServeCodec", Func, 0}, + {"ServeConn", Func, 0}, + {"ServeRequest", Func, 0}, + {"Server", Type, 0}, + {"ServerCodec", Type, 0}, + {"ServerError", Type, 0}, + }, + "net/rpc/jsonrpc": { + {"Dial", Func, 0}, + {"NewClient", Func, 0}, + {"NewClientCodec", Func, 0}, + {"NewServerCodec", Func, 0}, + {"ServeConn", Func, 0}, + }, + "net/smtp": { + {"(*Client).Auth", Method, 0}, + {"(*Client).Close", Method, 2}, + {"(*Client).Data", Method, 0}, + {"(*Client).Extension", Method, 0}, + {"(*Client).Hello", Method, 1}, + {"(*Client).Mail", Method, 0}, + {"(*Client).Noop", Method, 10}, + {"(*Client).Quit", Method, 0}, + {"(*Client).Rcpt", Method, 0}, + {"(*Client).Reset", Method, 0}, + {"(*Client).StartTLS", Method, 0}, + {"(*Client).TLSConnectionState", Method, 5}, + {"(*Client).Verify", Method, 0}, + {"Auth", Type, 0}, + {"CRAMMD5Auth", Func, 0}, + {"Client", Type, 0}, + {"Client.Text", Field, 0}, + {"Dial", Func, 0}, + {"NewClient", Func, 0}, + {"PlainAuth", Func, 0}, + {"SendMail", Func, 0}, + {"ServerInfo", Type, 0}, + {"ServerInfo.Auth", Field, 0}, + {"ServerInfo.Name", Field, 0}, + {"ServerInfo.TLS", Field, 0}, + }, + "net/textproto": { + {"(*Conn).Close", Method, 0}, + {"(*Conn).Cmd", Method, 0}, + {"(*Conn).DotReader", Method, 0}, + {"(*Conn).DotWriter", Method, 0}, + {"(*Conn).EndRequest", Method, 0}, + {"(*Conn).EndResponse", Method, 0}, + {"(*Conn).Next", Method, 0}, + {"(*Conn).PrintfLine", Method, 0}, + {"(*Conn).ReadCodeLine", Method, 0}, + {"(*Conn).ReadContinuedLine", Method, 0}, + {"(*Conn).ReadContinuedLineBytes", Method, 0}, + {"(*Conn).ReadDotBytes", Method, 0}, + {"(*Conn).ReadDotLines", Method, 0}, + {"(*Conn).ReadLine", Method, 0}, + {"(*Conn).ReadLineBytes", Method, 0}, + {"(*Conn).ReadMIMEHeader", Method, 0}, + {"(*Conn).ReadResponse", Method, 0}, + {"(*Conn).StartRequest", Method, 0}, + {"(*Conn).StartResponse", Method, 0}, + {"(*Error).Error", Method, 0}, + {"(*Pipeline).EndRequest", Method, 0}, + {"(*Pipeline).EndResponse", Method, 0}, + {"(*Pipeline).Next", Method, 0}, + {"(*Pipeline).StartRequest", Method, 0}, + {"(*Pipeline).StartResponse", Method, 0}, + {"(*Reader).DotReader", Method, 0}, + {"(*Reader).ReadCodeLine", Method, 0}, + {"(*Reader).ReadContinuedLine", Method, 0}, + {"(*Reader).ReadContinuedLineBytes", Method, 0}, + {"(*Reader).ReadDotBytes", Method, 0}, + {"(*Reader).ReadDotLines", Method, 0}, + {"(*Reader).ReadLine", Method, 0}, + {"(*Reader).ReadLineBytes", Method, 0}, + {"(*Reader).ReadMIMEHeader", Method, 0}, + {"(*Reader).ReadResponse", Method, 0}, + {"(*Writer).DotWriter", Method, 0}, + {"(*Writer).PrintfLine", Method, 0}, + {"(MIMEHeader).Add", Method, 0}, + {"(MIMEHeader).Del", Method, 0}, + {"(MIMEHeader).Get", Method, 0}, + {"(MIMEHeader).Set", Method, 0}, + {"(MIMEHeader).Values", Method, 14}, + {"(ProtocolError).Error", Method, 0}, + {"CanonicalMIMEHeaderKey", Func, 0}, + {"Conn", Type, 0}, + {"Conn.Pipeline", Field, 0}, + {"Conn.Reader", Field, 0}, + {"Conn.Writer", Field, 0}, + {"Dial", Func, 0}, + {"Error", Type, 0}, + {"Error.Code", Field, 0}, + {"Error.Msg", Field, 0}, + {"MIMEHeader", Type, 0}, + {"NewConn", Func, 0}, + {"NewReader", Func, 0}, + {"NewWriter", Func, 0}, + {"Pipeline", Type, 0}, + {"ProtocolError", Type, 0}, + {"Reader", Type, 0}, + {"Reader.R", Field, 0}, + {"TrimBytes", Func, 1}, + {"TrimString", Func, 1}, + {"Writer", Type, 0}, + {"Writer.W", Field, 0}, + }, + "net/url": { + {"(*Error).Error", Method, 0}, + {"(*Error).Temporary", Method, 6}, + {"(*Error).Timeout", Method, 6}, + {"(*Error).Unwrap", Method, 13}, + {"(*URL).EscapedFragment", Method, 15}, + {"(*URL).EscapedPath", Method, 5}, + {"(*URL).Hostname", Method, 8}, + {"(*URL).IsAbs", Method, 0}, + {"(*URL).JoinPath", Method, 19}, + {"(*URL).MarshalBinary", Method, 8}, + {"(*URL).Parse", Method, 0}, + {"(*URL).Port", Method, 8}, + {"(*URL).Query", Method, 0}, + {"(*URL).Redacted", Method, 15}, + {"(*URL).RequestURI", Method, 0}, + {"(*URL).ResolveReference", Method, 0}, + {"(*URL).String", Method, 0}, + {"(*URL).UnmarshalBinary", Method, 8}, + {"(*Userinfo).Password", Method, 0}, + {"(*Userinfo).String", Method, 0}, + {"(*Userinfo).Username", Method, 0}, + {"(EscapeError).Error", Method, 0}, + {"(InvalidHostError).Error", Method, 6}, + {"(Values).Add", Method, 0}, + {"(Values).Del", Method, 0}, + {"(Values).Encode", Method, 0}, + {"(Values).Get", Method, 0}, + {"(Values).Has", Method, 17}, + {"(Values).Set", Method, 0}, + {"Error", Type, 0}, + {"Error.Err", Field, 0}, + {"Error.Op", Field, 0}, + {"Error.URL", Field, 0}, + {"EscapeError", Type, 0}, + {"InvalidHostError", Type, 6}, + {"JoinPath", Func, 19}, + {"Parse", Func, 0}, + {"ParseQuery", Func, 0}, + {"ParseRequestURI", Func, 0}, + {"PathEscape", Func, 8}, + {"PathUnescape", Func, 8}, + {"QueryEscape", Func, 0}, + {"QueryUnescape", Func, 0}, + {"URL", Type, 0}, + {"URL.ForceQuery", Field, 7}, + {"URL.Fragment", Field, 0}, + {"URL.Host", Field, 0}, + {"URL.OmitHost", Field, 19}, + {"URL.Opaque", Field, 0}, + {"URL.Path", Field, 0}, + {"URL.RawFragment", Field, 15}, + {"URL.RawPath", Field, 5}, + {"URL.RawQuery", Field, 0}, + {"URL.Scheme", Field, 0}, + {"URL.User", Field, 0}, + {"User", Func, 0}, + {"UserPassword", Func, 0}, + {"Userinfo", Type, 0}, + {"Values", Type, 0}, + }, + "os": { + {"(*File).Chdir", Method, 0}, + {"(*File).Chmod", Method, 0}, + {"(*File).Chown", Method, 0}, + {"(*File).Close", Method, 0}, + {"(*File).Fd", Method, 0}, + {"(*File).Name", Method, 0}, + {"(*File).Read", Method, 0}, + {"(*File).ReadAt", Method, 0}, + {"(*File).ReadDir", Method, 16}, + {"(*File).ReadFrom", Method, 15}, + {"(*File).Readdir", Method, 0}, + {"(*File).Readdirnames", Method, 0}, + {"(*File).Seek", Method, 0}, + {"(*File).SetDeadline", Method, 10}, + {"(*File).SetReadDeadline", Method, 10}, + {"(*File).SetWriteDeadline", Method, 10}, + {"(*File).Stat", Method, 0}, + {"(*File).Sync", Method, 0}, + {"(*File).SyscallConn", Method, 12}, + {"(*File).Truncate", Method, 0}, + {"(*File).Write", Method, 0}, + {"(*File).WriteAt", Method, 0}, + {"(*File).WriteString", Method, 0}, + {"(*File).WriteTo", Method, 22}, + {"(*LinkError).Error", Method, 0}, + {"(*LinkError).Unwrap", Method, 13}, + {"(*PathError).Error", Method, 0}, + {"(*PathError).Timeout", Method, 10}, + {"(*PathError).Unwrap", Method, 13}, + {"(*Process).Kill", Method, 0}, + {"(*Process).Release", Method, 0}, + {"(*Process).Signal", Method, 0}, + {"(*Process).Wait", Method, 0}, + {"(*ProcessState).ExitCode", Method, 12}, + {"(*ProcessState).Exited", Method, 0}, + {"(*ProcessState).Pid", Method, 0}, + {"(*ProcessState).String", Method, 0}, + {"(*ProcessState).Success", Method, 0}, + {"(*ProcessState).Sys", Method, 0}, + {"(*ProcessState).SysUsage", Method, 0}, + {"(*ProcessState).SystemTime", Method, 0}, + {"(*ProcessState).UserTime", Method, 0}, + {"(*SyscallError).Error", Method, 0}, + {"(*SyscallError).Timeout", Method, 10}, + {"(*SyscallError).Unwrap", Method, 13}, + {"(FileMode).IsDir", Method, 0}, + {"(FileMode).IsRegular", Method, 1}, + {"(FileMode).Perm", Method, 0}, + {"(FileMode).String", Method, 0}, + {"Args", Var, 0}, + {"Chdir", Func, 0}, + {"Chmod", Func, 0}, + {"Chown", Func, 0}, + {"Chtimes", Func, 0}, + {"Clearenv", Func, 0}, + {"Create", Func, 0}, + {"CreateTemp", Func, 16}, + {"DevNull", Const, 0}, + {"DirEntry", Type, 16}, + {"DirFS", Func, 16}, + {"Environ", Func, 0}, + {"ErrClosed", Var, 8}, + {"ErrDeadlineExceeded", Var, 15}, + {"ErrExist", Var, 0}, + {"ErrInvalid", Var, 0}, + {"ErrNoDeadline", Var, 10}, + {"ErrNotExist", Var, 0}, + {"ErrPermission", Var, 0}, + {"ErrProcessDone", Var, 16}, + {"Executable", Func, 8}, + {"Exit", Func, 0}, + {"Expand", Func, 0}, + {"ExpandEnv", Func, 0}, + {"File", Type, 0}, + {"FileInfo", Type, 0}, + {"FileMode", Type, 0}, + {"FindProcess", Func, 0}, + {"Getegid", Func, 0}, + {"Getenv", Func, 0}, + {"Geteuid", Func, 0}, + {"Getgid", Func, 0}, + {"Getgroups", Func, 0}, + {"Getpagesize", Func, 0}, + {"Getpid", Func, 0}, + {"Getppid", Func, 0}, + {"Getuid", Func, 0}, + {"Getwd", Func, 0}, + {"Hostname", Func, 0}, + {"Interrupt", Var, 0}, + {"IsExist", Func, 0}, + {"IsNotExist", Func, 0}, + {"IsPathSeparator", Func, 0}, + {"IsPermission", Func, 0}, + {"IsTimeout", Func, 10}, + {"Kill", Var, 0}, + {"Lchown", Func, 0}, + {"Link", Func, 0}, + {"LinkError", Type, 0}, + {"LinkError.Err", Field, 0}, + {"LinkError.New", Field, 0}, + {"LinkError.Old", Field, 0}, + {"LinkError.Op", Field, 0}, + {"LookupEnv", Func, 5}, + {"Lstat", Func, 0}, + {"Mkdir", Func, 0}, + {"MkdirAll", Func, 0}, + {"MkdirTemp", Func, 16}, + {"ModeAppend", Const, 0}, + {"ModeCharDevice", Const, 0}, + {"ModeDevice", Const, 0}, + {"ModeDir", Const, 0}, + {"ModeExclusive", Const, 0}, + {"ModeIrregular", Const, 11}, + {"ModeNamedPipe", Const, 0}, + {"ModePerm", Const, 0}, + {"ModeSetgid", Const, 0}, + {"ModeSetuid", Const, 0}, + {"ModeSocket", Const, 0}, + {"ModeSticky", Const, 0}, + {"ModeSymlink", Const, 0}, + {"ModeTemporary", Const, 0}, + {"ModeType", Const, 0}, + {"NewFile", Func, 0}, + {"NewSyscallError", Func, 0}, + {"O_APPEND", Const, 0}, + {"O_CREATE", Const, 0}, + {"O_EXCL", Const, 0}, + {"O_RDONLY", Const, 0}, + {"O_RDWR", Const, 0}, + {"O_SYNC", Const, 0}, + {"O_TRUNC", Const, 0}, + {"O_WRONLY", Const, 0}, + {"Open", Func, 0}, + {"OpenFile", Func, 0}, + {"PathError", Type, 0}, + {"PathError.Err", Field, 0}, + {"PathError.Op", Field, 0}, + {"PathError.Path", Field, 0}, + {"PathListSeparator", Const, 0}, + {"PathSeparator", Const, 0}, + {"Pipe", Func, 0}, + {"ProcAttr", Type, 0}, + {"ProcAttr.Dir", Field, 0}, + {"ProcAttr.Env", Field, 0}, + {"ProcAttr.Files", Field, 0}, + {"ProcAttr.Sys", Field, 0}, + {"Process", Type, 0}, + {"Process.Pid", Field, 0}, + {"ProcessState", Type, 0}, + {"ReadDir", Func, 16}, + {"ReadFile", Func, 16}, + {"Readlink", Func, 0}, + {"Remove", Func, 0}, + {"RemoveAll", Func, 0}, + {"Rename", Func, 0}, + {"SEEK_CUR", Const, 0}, + {"SEEK_END", Const, 0}, + {"SEEK_SET", Const, 0}, + {"SameFile", Func, 0}, + {"Setenv", Func, 0}, + {"Signal", Type, 0}, + {"StartProcess", Func, 0}, + {"Stat", Func, 0}, + {"Stderr", Var, 0}, + {"Stdin", Var, 0}, + {"Stdout", Var, 0}, + {"Symlink", Func, 0}, + {"SyscallError", Type, 0}, + {"SyscallError.Err", Field, 0}, + {"SyscallError.Syscall", Field, 0}, + {"TempDir", Func, 0}, + {"Truncate", Func, 0}, + {"Unsetenv", Func, 4}, + {"UserCacheDir", Func, 11}, + {"UserConfigDir", Func, 13}, + {"UserHomeDir", Func, 12}, + {"WriteFile", Func, 16}, + }, + "os/exec": { + {"(*Cmd).CombinedOutput", Method, 0}, + {"(*Cmd).Environ", Method, 19}, + {"(*Cmd).Output", Method, 0}, + {"(*Cmd).Run", Method, 0}, + {"(*Cmd).Start", Method, 0}, + {"(*Cmd).StderrPipe", Method, 0}, + {"(*Cmd).StdinPipe", Method, 0}, + {"(*Cmd).StdoutPipe", Method, 0}, + {"(*Cmd).String", Method, 13}, + {"(*Cmd).Wait", Method, 0}, + {"(*Error).Error", Method, 0}, + {"(*Error).Unwrap", Method, 13}, + {"(*ExitError).Error", Method, 0}, + {"(ExitError).ExitCode", Method, 12}, + {"(ExitError).Exited", Method, 0}, + {"(ExitError).Pid", Method, 0}, + {"(ExitError).String", Method, 0}, + {"(ExitError).Success", Method, 0}, + {"(ExitError).Sys", Method, 0}, + {"(ExitError).SysUsage", Method, 0}, + {"(ExitError).SystemTime", Method, 0}, + {"(ExitError).UserTime", Method, 0}, + {"Cmd", Type, 0}, + {"Cmd.Args", Field, 0}, + {"Cmd.Cancel", Field, 20}, + {"Cmd.Dir", Field, 0}, + {"Cmd.Env", Field, 0}, + {"Cmd.Err", Field, 19}, + {"Cmd.ExtraFiles", Field, 0}, + {"Cmd.Path", Field, 0}, + {"Cmd.Process", Field, 0}, + {"Cmd.ProcessState", Field, 0}, + {"Cmd.Stderr", Field, 0}, + {"Cmd.Stdin", Field, 0}, + {"Cmd.Stdout", Field, 0}, + {"Cmd.SysProcAttr", Field, 0}, + {"Cmd.WaitDelay", Field, 20}, + {"Command", Func, 0}, + {"CommandContext", Func, 7}, + {"ErrDot", Var, 19}, + {"ErrNotFound", Var, 0}, + {"ErrWaitDelay", Var, 20}, + {"Error", Type, 0}, + {"Error.Err", Field, 0}, + {"Error.Name", Field, 0}, + {"ExitError", Type, 0}, + {"ExitError.ProcessState", Field, 0}, + {"ExitError.Stderr", Field, 6}, + {"LookPath", Func, 0}, + }, + "os/signal": { + {"Ignore", Func, 5}, + {"Ignored", Func, 11}, + {"Notify", Func, 0}, + {"NotifyContext", Func, 16}, + {"Reset", Func, 5}, + {"Stop", Func, 1}, + }, + "os/user": { + {"(*User).GroupIds", Method, 7}, + {"(UnknownGroupError).Error", Method, 7}, + {"(UnknownGroupIdError).Error", Method, 7}, + {"(UnknownUserError).Error", Method, 0}, + {"(UnknownUserIdError).Error", Method, 0}, + {"Current", Func, 0}, + {"Group", Type, 7}, + {"Group.Gid", Field, 7}, + {"Group.Name", Field, 7}, + {"Lookup", Func, 0}, + {"LookupGroup", Func, 7}, + {"LookupGroupId", Func, 7}, + {"LookupId", Func, 0}, + {"UnknownGroupError", Type, 7}, + {"UnknownGroupIdError", Type, 7}, + {"UnknownUserError", Type, 0}, + {"UnknownUserIdError", Type, 0}, + {"User", Type, 0}, + {"User.Gid", Field, 0}, + {"User.HomeDir", Field, 0}, + {"User.Name", Field, 0}, + {"User.Uid", Field, 0}, + {"User.Username", Field, 0}, + }, + "path": { + {"Base", Func, 0}, + {"Clean", Func, 0}, + {"Dir", Func, 0}, + {"ErrBadPattern", Var, 0}, + {"Ext", Func, 0}, + {"IsAbs", Func, 0}, + {"Join", Func, 0}, + {"Match", Func, 0}, + {"Split", Func, 0}, + }, + "path/filepath": { + {"Abs", Func, 0}, + {"Base", Func, 0}, + {"Clean", Func, 0}, + {"Dir", Func, 0}, + {"ErrBadPattern", Var, 0}, + {"EvalSymlinks", Func, 0}, + {"Ext", Func, 0}, + {"FromSlash", Func, 0}, + {"Glob", Func, 0}, + {"HasPrefix", Func, 0}, + {"IsAbs", Func, 0}, + {"IsLocal", Func, 20}, + {"Join", Func, 0}, + {"ListSeparator", Const, 0}, + {"Match", Func, 0}, + {"Rel", Func, 0}, + {"Separator", Const, 0}, + {"SkipAll", Var, 20}, + {"SkipDir", Var, 0}, + {"Split", Func, 0}, + {"SplitList", Func, 0}, + {"ToSlash", Func, 0}, + {"VolumeName", Func, 0}, + {"Walk", Func, 0}, + {"WalkDir", Func, 16}, + {"WalkFunc", Type, 0}, + }, + "plugin": { + {"(*Plugin).Lookup", Method, 8}, + {"Open", Func, 8}, + {"Plugin", Type, 8}, + {"Symbol", Type, 8}, + }, + "reflect": { + {"(*MapIter).Key", Method, 12}, + {"(*MapIter).Next", Method, 12}, + {"(*MapIter).Reset", Method, 18}, + {"(*MapIter).Value", Method, 12}, + {"(*ValueError).Error", Method, 0}, + {"(ChanDir).String", Method, 0}, + {"(Kind).String", Method, 0}, + {"(Method).IsExported", Method, 17}, + {"(StructField).IsExported", Method, 17}, + {"(StructTag).Get", Method, 0}, + {"(StructTag).Lookup", Method, 7}, + {"(Value).Addr", Method, 0}, + {"(Value).Bool", Method, 0}, + {"(Value).Bytes", Method, 0}, + {"(Value).Call", Method, 0}, + {"(Value).CallSlice", Method, 0}, + {"(Value).CanAddr", Method, 0}, + {"(Value).CanComplex", Method, 18}, + {"(Value).CanConvert", Method, 17}, + {"(Value).CanFloat", Method, 18}, + {"(Value).CanInt", Method, 18}, + {"(Value).CanInterface", Method, 0}, + {"(Value).CanSet", Method, 0}, + {"(Value).CanUint", Method, 18}, + {"(Value).Cap", Method, 0}, + {"(Value).Clear", Method, 21}, + {"(Value).Close", Method, 0}, + {"(Value).Comparable", Method, 20}, + {"(Value).Complex", Method, 0}, + {"(Value).Convert", Method, 1}, + {"(Value).Elem", Method, 0}, + {"(Value).Equal", Method, 20}, + {"(Value).Field", Method, 0}, + {"(Value).FieldByIndex", Method, 0}, + {"(Value).FieldByIndexErr", Method, 18}, + {"(Value).FieldByName", Method, 0}, + {"(Value).FieldByNameFunc", Method, 0}, + {"(Value).Float", Method, 0}, + {"(Value).Grow", Method, 20}, + {"(Value).Index", Method, 0}, + {"(Value).Int", Method, 0}, + {"(Value).Interface", Method, 0}, + {"(Value).InterfaceData", Method, 0}, + {"(Value).IsNil", Method, 0}, + {"(Value).IsValid", Method, 0}, + {"(Value).IsZero", Method, 13}, + {"(Value).Kind", Method, 0}, + {"(Value).Len", Method, 0}, + {"(Value).MapIndex", Method, 0}, + {"(Value).MapKeys", Method, 0}, + {"(Value).MapRange", Method, 12}, + {"(Value).Method", Method, 0}, + {"(Value).MethodByName", Method, 0}, + {"(Value).NumField", Method, 0}, + {"(Value).NumMethod", Method, 0}, + {"(Value).OverflowComplex", Method, 0}, + {"(Value).OverflowFloat", Method, 0}, + {"(Value).OverflowInt", Method, 0}, + {"(Value).OverflowUint", Method, 0}, + {"(Value).Pointer", Method, 0}, + {"(Value).Recv", Method, 0}, + {"(Value).Send", Method, 0}, + {"(Value).Set", Method, 0}, + {"(Value).SetBool", Method, 0}, + {"(Value).SetBytes", Method, 0}, + {"(Value).SetCap", Method, 2}, + {"(Value).SetComplex", Method, 0}, + {"(Value).SetFloat", Method, 0}, + {"(Value).SetInt", Method, 0}, + {"(Value).SetIterKey", Method, 18}, + {"(Value).SetIterValue", Method, 18}, + {"(Value).SetLen", Method, 0}, + {"(Value).SetMapIndex", Method, 0}, + {"(Value).SetPointer", Method, 0}, + {"(Value).SetString", Method, 0}, + {"(Value).SetUint", Method, 0}, + {"(Value).SetZero", Method, 20}, + {"(Value).Slice", Method, 0}, + {"(Value).Slice3", Method, 2}, + {"(Value).String", Method, 0}, + {"(Value).TryRecv", Method, 0}, + {"(Value).TrySend", Method, 0}, + {"(Value).Type", Method, 0}, + {"(Value).Uint", Method, 0}, + {"(Value).UnsafeAddr", Method, 0}, + {"(Value).UnsafePointer", Method, 18}, + {"Append", Func, 0}, + {"AppendSlice", Func, 0}, + {"Array", Const, 0}, + {"ArrayOf", Func, 5}, + {"Bool", Const, 0}, + {"BothDir", Const, 0}, + {"Chan", Const, 0}, + {"ChanDir", Type, 0}, + {"ChanOf", Func, 1}, + {"Complex128", Const, 0}, + {"Complex64", Const, 0}, + {"Copy", Func, 0}, + {"DeepEqual", Func, 0}, + {"Float32", Const, 0}, + {"Float64", Const, 0}, + {"Func", Const, 0}, + {"FuncOf", Func, 5}, + {"Indirect", Func, 0}, + {"Int", Const, 0}, + {"Int16", Const, 0}, + {"Int32", Const, 0}, + {"Int64", Const, 0}, + {"Int8", Const, 0}, + {"Interface", Const, 0}, + {"Invalid", Const, 0}, + {"Kind", Type, 0}, + {"MakeChan", Func, 0}, + {"MakeFunc", Func, 1}, + {"MakeMap", Func, 0}, + {"MakeMapWithSize", Func, 9}, + {"MakeSlice", Func, 0}, + {"Map", Const, 0}, + {"MapIter", Type, 12}, + {"MapOf", Func, 1}, + {"Method", Type, 0}, + {"Method.Func", Field, 0}, + {"Method.Index", Field, 0}, + {"Method.Name", Field, 0}, + {"Method.PkgPath", Field, 0}, + {"Method.Type", Field, 0}, + {"New", Func, 0}, + {"NewAt", Func, 0}, + {"Pointer", Const, 18}, + {"PointerTo", Func, 18}, + {"Ptr", Const, 0}, + {"PtrTo", Func, 0}, + {"RecvDir", Const, 0}, + {"Select", Func, 1}, + {"SelectCase", Type, 1}, + {"SelectCase.Chan", Field, 1}, + {"SelectCase.Dir", Field, 1}, + {"SelectCase.Send", Field, 1}, + {"SelectDefault", Const, 1}, + {"SelectDir", Type, 1}, + {"SelectRecv", Const, 1}, + {"SelectSend", Const, 1}, + {"SendDir", Const, 0}, + {"Slice", Const, 0}, + {"SliceHeader", Type, 0}, + {"SliceHeader.Cap", Field, 0}, + {"SliceHeader.Data", Field, 0}, + {"SliceHeader.Len", Field, 0}, + {"SliceOf", Func, 1}, + {"String", Const, 0}, + {"StringHeader", Type, 0}, + {"StringHeader.Data", Field, 0}, + {"StringHeader.Len", Field, 0}, + {"Struct", Const, 0}, + {"StructField", Type, 0}, + {"StructField.Anonymous", Field, 0}, + {"StructField.Index", Field, 0}, + {"StructField.Name", Field, 0}, + {"StructField.Offset", Field, 0}, + {"StructField.PkgPath", Field, 0}, + {"StructField.Tag", Field, 0}, + {"StructField.Type", Field, 0}, + {"StructOf", Func, 7}, + {"StructTag", Type, 0}, + {"Swapper", Func, 8}, + {"Type", Type, 0}, + {"TypeFor", Func, 22}, + {"TypeOf", Func, 0}, + {"Uint", Const, 0}, + {"Uint16", Const, 0}, + {"Uint32", Const, 0}, + {"Uint64", Const, 0}, + {"Uint8", Const, 0}, + {"Uintptr", Const, 0}, + {"UnsafePointer", Const, 0}, + {"Value", Type, 0}, + {"ValueError", Type, 0}, + {"ValueError.Kind", Field, 0}, + {"ValueError.Method", Field, 0}, + {"ValueOf", Func, 0}, + {"VisibleFields", Func, 17}, + {"Zero", Func, 0}, + }, + "regexp": { + {"(*Regexp).Copy", Method, 6}, + {"(*Regexp).Expand", Method, 0}, + {"(*Regexp).ExpandString", Method, 0}, + {"(*Regexp).Find", Method, 0}, + {"(*Regexp).FindAll", Method, 0}, + {"(*Regexp).FindAllIndex", Method, 0}, + {"(*Regexp).FindAllString", Method, 0}, + {"(*Regexp).FindAllStringIndex", Method, 0}, + {"(*Regexp).FindAllStringSubmatch", Method, 0}, + {"(*Regexp).FindAllStringSubmatchIndex", Method, 0}, + {"(*Regexp).FindAllSubmatch", Method, 0}, + {"(*Regexp).FindAllSubmatchIndex", Method, 0}, + {"(*Regexp).FindIndex", Method, 0}, + {"(*Regexp).FindReaderIndex", Method, 0}, + {"(*Regexp).FindReaderSubmatchIndex", Method, 0}, + {"(*Regexp).FindString", Method, 0}, + {"(*Regexp).FindStringIndex", Method, 0}, + {"(*Regexp).FindStringSubmatch", Method, 0}, + {"(*Regexp).FindStringSubmatchIndex", Method, 0}, + {"(*Regexp).FindSubmatch", Method, 0}, + {"(*Regexp).FindSubmatchIndex", Method, 0}, + {"(*Regexp).LiteralPrefix", Method, 0}, + {"(*Regexp).Longest", Method, 1}, + {"(*Regexp).MarshalText", Method, 21}, + {"(*Regexp).Match", Method, 0}, + {"(*Regexp).MatchReader", Method, 0}, + {"(*Regexp).MatchString", Method, 0}, + {"(*Regexp).NumSubexp", Method, 0}, + {"(*Regexp).ReplaceAll", Method, 0}, + {"(*Regexp).ReplaceAllFunc", Method, 0}, + {"(*Regexp).ReplaceAllLiteral", Method, 0}, + {"(*Regexp).ReplaceAllLiteralString", Method, 0}, + {"(*Regexp).ReplaceAllString", Method, 0}, + {"(*Regexp).ReplaceAllStringFunc", Method, 0}, + {"(*Regexp).Split", Method, 1}, + {"(*Regexp).String", Method, 0}, + {"(*Regexp).SubexpIndex", Method, 15}, + {"(*Regexp).SubexpNames", Method, 0}, + {"(*Regexp).UnmarshalText", Method, 21}, + {"Compile", Func, 0}, + {"CompilePOSIX", Func, 0}, + {"Match", Func, 0}, + {"MatchReader", Func, 0}, + {"MatchString", Func, 0}, + {"MustCompile", Func, 0}, + {"MustCompilePOSIX", Func, 0}, + {"QuoteMeta", Func, 0}, + {"Regexp", Type, 0}, + }, + "regexp/syntax": { + {"(*Error).Error", Method, 0}, + {"(*Inst).MatchEmptyWidth", Method, 0}, + {"(*Inst).MatchRune", Method, 0}, + {"(*Inst).MatchRunePos", Method, 3}, + {"(*Inst).String", Method, 0}, + {"(*Prog).Prefix", Method, 0}, + {"(*Prog).StartCond", Method, 0}, + {"(*Prog).String", Method, 0}, + {"(*Regexp).CapNames", Method, 0}, + {"(*Regexp).Equal", Method, 0}, + {"(*Regexp).MaxCap", Method, 0}, + {"(*Regexp).Simplify", Method, 0}, + {"(*Regexp).String", Method, 0}, + {"(ErrorCode).String", Method, 0}, + {"(InstOp).String", Method, 3}, + {"(Op).String", Method, 11}, + {"ClassNL", Const, 0}, + {"Compile", Func, 0}, + {"DotNL", Const, 0}, + {"EmptyBeginLine", Const, 0}, + {"EmptyBeginText", Const, 0}, + {"EmptyEndLine", Const, 0}, + {"EmptyEndText", Const, 0}, + {"EmptyNoWordBoundary", Const, 0}, + {"EmptyOp", Type, 0}, + {"EmptyOpContext", Func, 0}, + {"EmptyWordBoundary", Const, 0}, + {"ErrInternalError", Const, 0}, + {"ErrInvalidCharClass", Const, 0}, + {"ErrInvalidCharRange", Const, 0}, + {"ErrInvalidEscape", Const, 0}, + {"ErrInvalidNamedCapture", Const, 0}, + {"ErrInvalidPerlOp", Const, 0}, + {"ErrInvalidRepeatOp", Const, 0}, + {"ErrInvalidRepeatSize", Const, 0}, + {"ErrInvalidUTF8", Const, 0}, + {"ErrLarge", Const, 20}, + {"ErrMissingBracket", Const, 0}, + {"ErrMissingParen", Const, 0}, + {"ErrMissingRepeatArgument", Const, 0}, + {"ErrNestingDepth", Const, 19}, + {"ErrTrailingBackslash", Const, 0}, + {"ErrUnexpectedParen", Const, 1}, + {"Error", Type, 0}, + {"Error.Code", Field, 0}, + {"Error.Expr", Field, 0}, + {"ErrorCode", Type, 0}, + {"Flags", Type, 0}, + {"FoldCase", Const, 0}, + {"Inst", Type, 0}, + {"Inst.Arg", Field, 0}, + {"Inst.Op", Field, 0}, + {"Inst.Out", Field, 0}, + {"Inst.Rune", Field, 0}, + {"InstAlt", Const, 0}, + {"InstAltMatch", Const, 0}, + {"InstCapture", Const, 0}, + {"InstEmptyWidth", Const, 0}, + {"InstFail", Const, 0}, + {"InstMatch", Const, 0}, + {"InstNop", Const, 0}, + {"InstOp", Type, 0}, + {"InstRune", Const, 0}, + {"InstRune1", Const, 0}, + {"InstRuneAny", Const, 0}, + {"InstRuneAnyNotNL", Const, 0}, + {"IsWordChar", Func, 0}, + {"Literal", Const, 0}, + {"MatchNL", Const, 0}, + {"NonGreedy", Const, 0}, + {"OneLine", Const, 0}, + {"Op", Type, 0}, + {"OpAlternate", Const, 0}, + {"OpAnyChar", Const, 0}, + {"OpAnyCharNotNL", Const, 0}, + {"OpBeginLine", Const, 0}, + {"OpBeginText", Const, 0}, + {"OpCapture", Const, 0}, + {"OpCharClass", Const, 0}, + {"OpConcat", Const, 0}, + {"OpEmptyMatch", Const, 0}, + {"OpEndLine", Const, 0}, + {"OpEndText", Const, 0}, + {"OpLiteral", Const, 0}, + {"OpNoMatch", Const, 0}, + {"OpNoWordBoundary", Const, 0}, + {"OpPlus", Const, 0}, + {"OpQuest", Const, 0}, + {"OpRepeat", Const, 0}, + {"OpStar", Const, 0}, + {"OpWordBoundary", Const, 0}, + {"POSIX", Const, 0}, + {"Parse", Func, 0}, + {"Perl", Const, 0}, + {"PerlX", Const, 0}, + {"Prog", Type, 0}, + {"Prog.Inst", Field, 0}, + {"Prog.NumCap", Field, 0}, + {"Prog.Start", Field, 0}, + {"Regexp", Type, 0}, + {"Regexp.Cap", Field, 0}, + {"Regexp.Flags", Field, 0}, + {"Regexp.Max", Field, 0}, + {"Regexp.Min", Field, 0}, + {"Regexp.Name", Field, 0}, + {"Regexp.Op", Field, 0}, + {"Regexp.Rune", Field, 0}, + {"Regexp.Rune0", Field, 0}, + {"Regexp.Sub", Field, 0}, + {"Regexp.Sub0", Field, 0}, + {"Simple", Const, 0}, + {"UnicodeGroups", Const, 0}, + {"WasDollar", Const, 0}, + }, + "runtime": { + {"(*BlockProfileRecord).Stack", Method, 1}, + {"(*Frames).Next", Method, 7}, + {"(*Func).Entry", Method, 0}, + {"(*Func).FileLine", Method, 0}, + {"(*Func).Name", Method, 0}, + {"(*MemProfileRecord).InUseBytes", Method, 0}, + {"(*MemProfileRecord).InUseObjects", Method, 0}, + {"(*MemProfileRecord).Stack", Method, 0}, + {"(*PanicNilError).Error", Method, 21}, + {"(*PanicNilError).RuntimeError", Method, 21}, + {"(*Pinner).Pin", Method, 21}, + {"(*Pinner).Unpin", Method, 21}, + {"(*StackRecord).Stack", Method, 0}, + {"(*TypeAssertionError).Error", Method, 0}, + {"(*TypeAssertionError).RuntimeError", Method, 0}, + {"BlockProfile", Func, 1}, + {"BlockProfileRecord", Type, 1}, + {"BlockProfileRecord.Count", Field, 1}, + {"BlockProfileRecord.Cycles", Field, 1}, + {"BlockProfileRecord.StackRecord", Field, 1}, + {"Breakpoint", Func, 0}, + {"CPUProfile", Func, 0}, + {"Caller", Func, 0}, + {"Callers", Func, 0}, + {"CallersFrames", Func, 7}, + {"Compiler", Const, 0}, + {"Error", Type, 0}, + {"Frame", Type, 7}, + {"Frame.Entry", Field, 7}, + {"Frame.File", Field, 7}, + {"Frame.Func", Field, 7}, + {"Frame.Function", Field, 7}, + {"Frame.Line", Field, 7}, + {"Frame.PC", Field, 7}, + {"Frames", Type, 7}, + {"Func", Type, 0}, + {"FuncForPC", Func, 0}, + {"GC", Func, 0}, + {"GOARCH", Const, 0}, + {"GOMAXPROCS", Func, 0}, + {"GOOS", Const, 0}, + {"GOROOT", Func, 0}, + {"Goexit", Func, 0}, + {"GoroutineProfile", Func, 0}, + {"Gosched", Func, 0}, + {"KeepAlive", Func, 7}, + {"LockOSThread", Func, 0}, + {"MemProfile", Func, 0}, + {"MemProfileRate", Var, 0}, + {"MemProfileRecord", Type, 0}, + {"MemProfileRecord.AllocBytes", Field, 0}, + {"MemProfileRecord.AllocObjects", Field, 0}, + {"MemProfileRecord.FreeBytes", Field, 0}, + {"MemProfileRecord.FreeObjects", Field, 0}, + {"MemProfileRecord.Stack0", Field, 0}, + {"MemStats", Type, 0}, + {"MemStats.Alloc", Field, 0}, + {"MemStats.BuckHashSys", Field, 0}, + {"MemStats.BySize", Field, 0}, + {"MemStats.DebugGC", Field, 0}, + {"MemStats.EnableGC", Field, 0}, + {"MemStats.Frees", Field, 0}, + {"MemStats.GCCPUFraction", Field, 5}, + {"MemStats.GCSys", Field, 2}, + {"MemStats.HeapAlloc", Field, 0}, + {"MemStats.HeapIdle", Field, 0}, + {"MemStats.HeapInuse", Field, 0}, + {"MemStats.HeapObjects", Field, 0}, + {"MemStats.HeapReleased", Field, 0}, + {"MemStats.HeapSys", Field, 0}, + {"MemStats.LastGC", Field, 0}, + {"MemStats.Lookups", Field, 0}, + {"MemStats.MCacheInuse", Field, 0}, + {"MemStats.MCacheSys", Field, 0}, + {"MemStats.MSpanInuse", Field, 0}, + {"MemStats.MSpanSys", Field, 0}, + {"MemStats.Mallocs", Field, 0}, + {"MemStats.NextGC", Field, 0}, + {"MemStats.NumForcedGC", Field, 8}, + {"MemStats.NumGC", Field, 0}, + {"MemStats.OtherSys", Field, 2}, + {"MemStats.PauseEnd", Field, 4}, + {"MemStats.PauseNs", Field, 0}, + {"MemStats.PauseTotalNs", Field, 0}, + {"MemStats.StackInuse", Field, 0}, + {"MemStats.StackSys", Field, 0}, + {"MemStats.Sys", Field, 0}, + {"MemStats.TotalAlloc", Field, 0}, + {"MutexProfile", Func, 8}, + {"NumCPU", Func, 0}, + {"NumCgoCall", Func, 0}, + {"NumGoroutine", Func, 0}, + {"PanicNilError", Type, 21}, + {"Pinner", Type, 21}, + {"ReadMemStats", Func, 0}, + {"ReadTrace", Func, 5}, + {"SetBlockProfileRate", Func, 1}, + {"SetCPUProfileRate", Func, 0}, + {"SetCgoTraceback", Func, 7}, + {"SetFinalizer", Func, 0}, + {"SetMutexProfileFraction", Func, 8}, + {"Stack", Func, 0}, + {"StackRecord", Type, 0}, + {"StackRecord.Stack0", Field, 0}, + {"StartTrace", Func, 5}, + {"StopTrace", Func, 5}, + {"ThreadCreateProfile", Func, 0}, + {"TypeAssertionError", Type, 0}, + {"UnlockOSThread", Func, 0}, + {"Version", Func, 0}, + }, + "runtime/cgo": { + {"(Handle).Delete", Method, 17}, + {"(Handle).Value", Method, 17}, + {"Handle", Type, 17}, + {"Incomplete", Type, 20}, + {"NewHandle", Func, 17}, + }, + "runtime/coverage": { + {"ClearCounters", Func, 20}, + {"WriteCounters", Func, 20}, + {"WriteCountersDir", Func, 20}, + {"WriteMeta", Func, 20}, + {"WriteMetaDir", Func, 20}, + }, + "runtime/debug": { + {"(*BuildInfo).String", Method, 18}, + {"BuildInfo", Type, 12}, + {"BuildInfo.Deps", Field, 12}, + {"BuildInfo.GoVersion", Field, 18}, + {"BuildInfo.Main", Field, 12}, + {"BuildInfo.Path", Field, 12}, + {"BuildInfo.Settings", Field, 18}, + {"BuildSetting", Type, 18}, + {"BuildSetting.Key", Field, 18}, + {"BuildSetting.Value", Field, 18}, + {"FreeOSMemory", Func, 1}, + {"GCStats", Type, 1}, + {"GCStats.LastGC", Field, 1}, + {"GCStats.NumGC", Field, 1}, + {"GCStats.Pause", Field, 1}, + {"GCStats.PauseEnd", Field, 4}, + {"GCStats.PauseQuantiles", Field, 1}, + {"GCStats.PauseTotal", Field, 1}, + {"Module", Type, 12}, + {"Module.Path", Field, 12}, + {"Module.Replace", Field, 12}, + {"Module.Sum", Field, 12}, + {"Module.Version", Field, 12}, + {"ParseBuildInfo", Func, 18}, + {"PrintStack", Func, 0}, + {"ReadBuildInfo", Func, 12}, + {"ReadGCStats", Func, 1}, + {"SetGCPercent", Func, 1}, + {"SetMaxStack", Func, 2}, + {"SetMaxThreads", Func, 2}, + {"SetMemoryLimit", Func, 19}, + {"SetPanicOnFault", Func, 3}, + {"SetTraceback", Func, 6}, + {"Stack", Func, 0}, + {"WriteHeapDump", Func, 3}, + }, + "runtime/metrics": { + {"(Value).Float64", Method, 16}, + {"(Value).Float64Histogram", Method, 16}, + {"(Value).Kind", Method, 16}, + {"(Value).Uint64", Method, 16}, + {"All", Func, 16}, + {"Description", Type, 16}, + {"Description.Cumulative", Field, 16}, + {"Description.Description", Field, 16}, + {"Description.Kind", Field, 16}, + {"Description.Name", Field, 16}, + {"Float64Histogram", Type, 16}, + {"Float64Histogram.Buckets", Field, 16}, + {"Float64Histogram.Counts", Field, 16}, + {"KindBad", Const, 16}, + {"KindFloat64", Const, 16}, + {"KindFloat64Histogram", Const, 16}, + {"KindUint64", Const, 16}, + {"Read", Func, 16}, + {"Sample", Type, 16}, + {"Sample.Name", Field, 16}, + {"Sample.Value", Field, 16}, + {"Value", Type, 16}, + {"ValueKind", Type, 16}, + }, + "runtime/pprof": { + {"(*Profile).Add", Method, 0}, + {"(*Profile).Count", Method, 0}, + {"(*Profile).Name", Method, 0}, + {"(*Profile).Remove", Method, 0}, + {"(*Profile).WriteTo", Method, 0}, + {"Do", Func, 9}, + {"ForLabels", Func, 9}, + {"Label", Func, 9}, + {"LabelSet", Type, 9}, + {"Labels", Func, 9}, + {"Lookup", Func, 0}, + {"NewProfile", Func, 0}, + {"Profile", Type, 0}, + {"Profiles", Func, 0}, + {"SetGoroutineLabels", Func, 9}, + {"StartCPUProfile", Func, 0}, + {"StopCPUProfile", Func, 0}, + {"WithLabels", Func, 9}, + {"WriteHeapProfile", Func, 0}, + }, + "runtime/trace": { + {"(*Region).End", Method, 11}, + {"(*Task).End", Method, 11}, + {"IsEnabled", Func, 11}, + {"Log", Func, 11}, + {"Logf", Func, 11}, + {"NewTask", Func, 11}, + {"Region", Type, 11}, + {"Start", Func, 5}, + {"StartRegion", Func, 11}, + {"Stop", Func, 5}, + {"Task", Type, 11}, + {"WithRegion", Func, 11}, + }, + "slices": { + {"BinarySearch", Func, 21}, + {"BinarySearchFunc", Func, 21}, + {"Clip", Func, 21}, + {"Clone", Func, 21}, + {"Compact", Func, 21}, + {"CompactFunc", Func, 21}, + {"Compare", Func, 21}, + {"CompareFunc", Func, 21}, + {"Concat", Func, 22}, + {"Contains", Func, 21}, + {"ContainsFunc", Func, 21}, + {"Delete", Func, 21}, + {"DeleteFunc", Func, 21}, + {"Equal", Func, 21}, + {"EqualFunc", Func, 21}, + {"Grow", Func, 21}, + {"Index", Func, 21}, + {"IndexFunc", Func, 21}, + {"Insert", Func, 21}, + {"IsSorted", Func, 21}, + {"IsSortedFunc", Func, 21}, + {"Max", Func, 21}, + {"MaxFunc", Func, 21}, + {"Min", Func, 21}, + {"MinFunc", Func, 21}, + {"Replace", Func, 21}, + {"Reverse", Func, 21}, + {"Sort", Func, 21}, + {"SortFunc", Func, 21}, + {"SortStableFunc", Func, 21}, + }, + "sort": { + {"(Float64Slice).Len", Method, 0}, + {"(Float64Slice).Less", Method, 0}, + {"(Float64Slice).Search", Method, 0}, + {"(Float64Slice).Sort", Method, 0}, + {"(Float64Slice).Swap", Method, 0}, + {"(IntSlice).Len", Method, 0}, + {"(IntSlice).Less", Method, 0}, + {"(IntSlice).Search", Method, 0}, + {"(IntSlice).Sort", Method, 0}, + {"(IntSlice).Swap", Method, 0}, + {"(StringSlice).Len", Method, 0}, + {"(StringSlice).Less", Method, 0}, + {"(StringSlice).Search", Method, 0}, + {"(StringSlice).Sort", Method, 0}, + {"(StringSlice).Swap", Method, 0}, + {"Find", Func, 19}, + {"Float64Slice", Type, 0}, + {"Float64s", Func, 0}, + {"Float64sAreSorted", Func, 0}, + {"IntSlice", Type, 0}, + {"Interface", Type, 0}, + {"Ints", Func, 0}, + {"IntsAreSorted", Func, 0}, + {"IsSorted", Func, 0}, + {"Reverse", Func, 1}, + {"Search", Func, 0}, + {"SearchFloat64s", Func, 0}, + {"SearchInts", Func, 0}, + {"SearchStrings", Func, 0}, + {"Slice", Func, 8}, + {"SliceIsSorted", Func, 8}, + {"SliceStable", Func, 8}, + {"Sort", Func, 0}, + {"Stable", Func, 2}, + {"StringSlice", Type, 0}, + {"Strings", Func, 0}, + {"StringsAreSorted", Func, 0}, + }, + "strconv": { + {"(*NumError).Error", Method, 0}, + {"(*NumError).Unwrap", Method, 14}, + {"AppendBool", Func, 0}, + {"AppendFloat", Func, 0}, + {"AppendInt", Func, 0}, + {"AppendQuote", Func, 0}, + {"AppendQuoteRune", Func, 0}, + {"AppendQuoteRuneToASCII", Func, 0}, + {"AppendQuoteRuneToGraphic", Func, 6}, + {"AppendQuoteToASCII", Func, 0}, + {"AppendQuoteToGraphic", Func, 6}, + {"AppendUint", Func, 0}, + {"Atoi", Func, 0}, + {"CanBackquote", Func, 0}, + {"ErrRange", Var, 0}, + {"ErrSyntax", Var, 0}, + {"FormatBool", Func, 0}, + {"FormatComplex", Func, 15}, + {"FormatFloat", Func, 0}, + {"FormatInt", Func, 0}, + {"FormatUint", Func, 0}, + {"IntSize", Const, 0}, + {"IsGraphic", Func, 6}, + {"IsPrint", Func, 0}, + {"Itoa", Func, 0}, + {"NumError", Type, 0}, + {"NumError.Err", Field, 0}, + {"NumError.Func", Field, 0}, + {"NumError.Num", Field, 0}, + {"ParseBool", Func, 0}, + {"ParseComplex", Func, 15}, + {"ParseFloat", Func, 0}, + {"ParseInt", Func, 0}, + {"ParseUint", Func, 0}, + {"Quote", Func, 0}, + {"QuoteRune", Func, 0}, + {"QuoteRuneToASCII", Func, 0}, + {"QuoteRuneToGraphic", Func, 6}, + {"QuoteToASCII", Func, 0}, + {"QuoteToGraphic", Func, 6}, + {"QuotedPrefix", Func, 17}, + {"Unquote", Func, 0}, + {"UnquoteChar", Func, 0}, + }, + "strings": { + {"(*Builder).Cap", Method, 12}, + {"(*Builder).Grow", Method, 10}, + {"(*Builder).Len", Method, 10}, + {"(*Builder).Reset", Method, 10}, + {"(*Builder).String", Method, 10}, + {"(*Builder).Write", Method, 10}, + {"(*Builder).WriteByte", Method, 10}, + {"(*Builder).WriteRune", Method, 10}, + {"(*Builder).WriteString", Method, 10}, + {"(*Reader).Len", Method, 0}, + {"(*Reader).Read", Method, 0}, + {"(*Reader).ReadAt", Method, 0}, + {"(*Reader).ReadByte", Method, 0}, + {"(*Reader).ReadRune", Method, 0}, + {"(*Reader).Reset", Method, 7}, + {"(*Reader).Seek", Method, 0}, + {"(*Reader).Size", Method, 5}, + {"(*Reader).UnreadByte", Method, 0}, + {"(*Reader).UnreadRune", Method, 0}, + {"(*Reader).WriteTo", Method, 1}, + {"(*Replacer).Replace", Method, 0}, + {"(*Replacer).WriteString", Method, 0}, + {"Builder", Type, 10}, + {"Clone", Func, 18}, + {"Compare", Func, 5}, + {"Contains", Func, 0}, + {"ContainsAny", Func, 0}, + {"ContainsFunc", Func, 21}, + {"ContainsRune", Func, 0}, + {"Count", Func, 0}, + {"Cut", Func, 18}, + {"CutPrefix", Func, 20}, + {"CutSuffix", Func, 20}, + {"EqualFold", Func, 0}, + {"Fields", Func, 0}, + {"FieldsFunc", Func, 0}, + {"HasPrefix", Func, 0}, + {"HasSuffix", Func, 0}, + {"Index", Func, 0}, + {"IndexAny", Func, 0}, + {"IndexByte", Func, 2}, + {"IndexFunc", Func, 0}, + {"IndexRune", Func, 0}, + {"Join", Func, 0}, + {"LastIndex", Func, 0}, + {"LastIndexAny", Func, 0}, + {"LastIndexByte", Func, 5}, + {"LastIndexFunc", Func, 0}, + {"Map", Func, 0}, + {"NewReader", Func, 0}, + {"NewReplacer", Func, 0}, + {"Reader", Type, 0}, + {"Repeat", Func, 0}, + {"Replace", Func, 0}, + {"ReplaceAll", Func, 12}, + {"Replacer", Type, 0}, + {"Split", Func, 0}, + {"SplitAfter", Func, 0}, + {"SplitAfterN", Func, 0}, + {"SplitN", Func, 0}, + {"Title", Func, 0}, + {"ToLower", Func, 0}, + {"ToLowerSpecial", Func, 0}, + {"ToTitle", Func, 0}, + {"ToTitleSpecial", Func, 0}, + {"ToUpper", Func, 0}, + {"ToUpperSpecial", Func, 0}, + {"ToValidUTF8", Func, 13}, + {"Trim", Func, 0}, + {"TrimFunc", Func, 0}, + {"TrimLeft", Func, 0}, + {"TrimLeftFunc", Func, 0}, + {"TrimPrefix", Func, 1}, + {"TrimRight", Func, 0}, + {"TrimRightFunc", Func, 0}, + {"TrimSpace", Func, 0}, + {"TrimSuffix", Func, 1}, + }, + "sync": { + {"(*Cond).Broadcast", Method, 0}, + {"(*Cond).Signal", Method, 0}, + {"(*Cond).Wait", Method, 0}, + {"(*Map).CompareAndDelete", Method, 20}, + {"(*Map).CompareAndSwap", Method, 20}, + {"(*Map).Delete", Method, 9}, + {"(*Map).Load", Method, 9}, + {"(*Map).LoadAndDelete", Method, 15}, + {"(*Map).LoadOrStore", Method, 9}, + {"(*Map).Range", Method, 9}, + {"(*Map).Store", Method, 9}, + {"(*Map).Swap", Method, 20}, + {"(*Mutex).Lock", Method, 0}, + {"(*Mutex).TryLock", Method, 18}, + {"(*Mutex).Unlock", Method, 0}, + {"(*Once).Do", Method, 0}, + {"(*Pool).Get", Method, 3}, + {"(*Pool).Put", Method, 3}, + {"(*RWMutex).Lock", Method, 0}, + {"(*RWMutex).RLock", Method, 0}, + {"(*RWMutex).RLocker", Method, 0}, + {"(*RWMutex).RUnlock", Method, 0}, + {"(*RWMutex).TryLock", Method, 18}, + {"(*RWMutex).TryRLock", Method, 18}, + {"(*RWMutex).Unlock", Method, 0}, + {"(*WaitGroup).Add", Method, 0}, + {"(*WaitGroup).Done", Method, 0}, + {"(*WaitGroup).Wait", Method, 0}, + {"Cond", Type, 0}, + {"Cond.L", Field, 0}, + {"Locker", Type, 0}, + {"Map", Type, 9}, + {"Mutex", Type, 0}, + {"NewCond", Func, 0}, + {"Once", Type, 0}, + {"OnceFunc", Func, 21}, + {"OnceValue", Func, 21}, + {"OnceValues", Func, 21}, + {"Pool", Type, 3}, + {"Pool.New", Field, 3}, + {"RWMutex", Type, 0}, + {"WaitGroup", Type, 0}, + }, + "sync/atomic": { + {"(*Bool).CompareAndSwap", Method, 19}, + {"(*Bool).Load", Method, 19}, + {"(*Bool).Store", Method, 19}, + {"(*Bool).Swap", Method, 19}, + {"(*Int32).Add", Method, 19}, + {"(*Int32).CompareAndSwap", Method, 19}, + {"(*Int32).Load", Method, 19}, + {"(*Int32).Store", Method, 19}, + {"(*Int32).Swap", Method, 19}, + {"(*Int64).Add", Method, 19}, + {"(*Int64).CompareAndSwap", Method, 19}, + {"(*Int64).Load", Method, 19}, + {"(*Int64).Store", Method, 19}, + {"(*Int64).Swap", Method, 19}, + {"(*Pointer).CompareAndSwap", Method, 19}, + {"(*Pointer).Load", Method, 19}, + {"(*Pointer).Store", Method, 19}, + {"(*Pointer).Swap", Method, 19}, + {"(*Uint32).Add", Method, 19}, + {"(*Uint32).CompareAndSwap", Method, 19}, + {"(*Uint32).Load", Method, 19}, + {"(*Uint32).Store", Method, 19}, + {"(*Uint32).Swap", Method, 19}, + {"(*Uint64).Add", Method, 19}, + {"(*Uint64).CompareAndSwap", Method, 19}, + {"(*Uint64).Load", Method, 19}, + {"(*Uint64).Store", Method, 19}, + {"(*Uint64).Swap", Method, 19}, + {"(*Uintptr).Add", Method, 19}, + {"(*Uintptr).CompareAndSwap", Method, 19}, + {"(*Uintptr).Load", Method, 19}, + {"(*Uintptr).Store", Method, 19}, + {"(*Uintptr).Swap", Method, 19}, + {"(*Value).CompareAndSwap", Method, 17}, + {"(*Value).Load", Method, 4}, + {"(*Value).Store", Method, 4}, + {"(*Value).Swap", Method, 17}, + {"AddInt32", Func, 0}, + {"AddInt64", Func, 0}, + {"AddUint32", Func, 0}, + {"AddUint64", Func, 0}, + {"AddUintptr", Func, 0}, + {"Bool", Type, 19}, + {"CompareAndSwapInt32", Func, 0}, + {"CompareAndSwapInt64", Func, 0}, + {"CompareAndSwapPointer", Func, 0}, + {"CompareAndSwapUint32", Func, 0}, + {"CompareAndSwapUint64", Func, 0}, + {"CompareAndSwapUintptr", Func, 0}, + {"Int32", Type, 19}, + {"Int64", Type, 19}, + {"LoadInt32", Func, 0}, + {"LoadInt64", Func, 0}, + {"LoadPointer", Func, 0}, + {"LoadUint32", Func, 0}, + {"LoadUint64", Func, 0}, + {"LoadUintptr", Func, 0}, + {"Pointer", Type, 19}, + {"StoreInt32", Func, 0}, + {"StoreInt64", Func, 0}, + {"StorePointer", Func, 0}, + {"StoreUint32", Func, 0}, + {"StoreUint64", Func, 0}, + {"StoreUintptr", Func, 0}, + {"SwapInt32", Func, 2}, + {"SwapInt64", Func, 2}, + {"SwapPointer", Func, 2}, + {"SwapUint32", Func, 2}, + {"SwapUint64", Func, 2}, + {"SwapUintptr", Func, 2}, + {"Uint32", Type, 19}, + {"Uint64", Type, 19}, + {"Uintptr", Type, 19}, + {"Value", Type, 4}, + }, + "syscall": { + {"(*Cmsghdr).SetLen", Method, 0}, + {"(*DLL).FindProc", Method, 0}, + {"(*DLL).MustFindProc", Method, 0}, + {"(*DLL).Release", Method, 0}, + {"(*DLLError).Error", Method, 0}, + {"(*DLLError).Unwrap", Method, 16}, + {"(*Filetime).Nanoseconds", Method, 0}, + {"(*Iovec).SetLen", Method, 0}, + {"(*LazyDLL).Handle", Method, 0}, + {"(*LazyDLL).Load", Method, 0}, + {"(*LazyDLL).NewProc", Method, 0}, + {"(*LazyProc).Addr", Method, 0}, + {"(*LazyProc).Call", Method, 0}, + {"(*LazyProc).Find", Method, 0}, + {"(*Msghdr).SetControllen", Method, 0}, + {"(*Proc).Addr", Method, 0}, + {"(*Proc).Call", Method, 0}, + {"(*PtraceRegs).PC", Method, 0}, + {"(*PtraceRegs).SetPC", Method, 0}, + {"(*RawSockaddrAny).Sockaddr", Method, 0}, + {"(*SID).Copy", Method, 0}, + {"(*SID).Len", Method, 0}, + {"(*SID).LookupAccount", Method, 0}, + {"(*SID).String", Method, 0}, + {"(*Timespec).Nano", Method, 0}, + {"(*Timespec).Unix", Method, 0}, + {"(*Timeval).Nano", Method, 0}, + {"(*Timeval).Nanoseconds", Method, 0}, + {"(*Timeval).Unix", Method, 0}, + {"(Errno).Error", Method, 0}, + {"(Errno).Is", Method, 13}, + {"(Errno).Temporary", Method, 0}, + {"(Errno).Timeout", Method, 0}, + {"(Signal).Signal", Method, 0}, + {"(Signal).String", Method, 0}, + {"(Token).Close", Method, 0}, + {"(Token).GetTokenPrimaryGroup", Method, 0}, + {"(Token).GetTokenUser", Method, 0}, + {"(Token).GetUserProfileDirectory", Method, 0}, + {"(WaitStatus).Continued", Method, 0}, + {"(WaitStatus).CoreDump", Method, 0}, + {"(WaitStatus).ExitStatus", Method, 0}, + {"(WaitStatus).Exited", Method, 0}, + {"(WaitStatus).Signal", Method, 0}, + {"(WaitStatus).Signaled", Method, 0}, + {"(WaitStatus).StopSignal", Method, 0}, + {"(WaitStatus).Stopped", Method, 0}, + {"(WaitStatus).TrapCause", Method, 0}, + {"AF_ALG", Const, 0}, + {"AF_APPLETALK", Const, 0}, + {"AF_ARP", Const, 0}, + {"AF_ASH", Const, 0}, + {"AF_ATM", Const, 0}, + {"AF_ATMPVC", Const, 0}, + {"AF_ATMSVC", Const, 0}, + {"AF_AX25", Const, 0}, + {"AF_BLUETOOTH", Const, 0}, + {"AF_BRIDGE", Const, 0}, + {"AF_CAIF", Const, 0}, + {"AF_CAN", Const, 0}, + {"AF_CCITT", Const, 0}, + {"AF_CHAOS", Const, 0}, + {"AF_CNT", Const, 0}, + {"AF_COIP", Const, 0}, + {"AF_DATAKIT", Const, 0}, + {"AF_DECnet", Const, 0}, + {"AF_DLI", Const, 0}, + {"AF_E164", Const, 0}, + {"AF_ECMA", Const, 0}, + {"AF_ECONET", Const, 0}, + {"AF_ENCAP", Const, 1}, + {"AF_FILE", Const, 0}, + {"AF_HYLINK", Const, 0}, + {"AF_IEEE80211", Const, 0}, + {"AF_IEEE802154", Const, 0}, + {"AF_IMPLINK", Const, 0}, + {"AF_INET", Const, 0}, + {"AF_INET6", Const, 0}, + {"AF_INET6_SDP", Const, 3}, + {"AF_INET_SDP", Const, 3}, + {"AF_IPX", Const, 0}, + {"AF_IRDA", Const, 0}, + {"AF_ISDN", Const, 0}, + {"AF_ISO", Const, 0}, + {"AF_IUCV", Const, 0}, + {"AF_KEY", Const, 0}, + {"AF_LAT", Const, 0}, + {"AF_LINK", Const, 0}, + {"AF_LLC", Const, 0}, + {"AF_LOCAL", Const, 0}, + {"AF_MAX", Const, 0}, + {"AF_MPLS", Const, 1}, + {"AF_NATM", Const, 0}, + {"AF_NDRV", Const, 0}, + {"AF_NETBEUI", Const, 0}, + {"AF_NETBIOS", Const, 0}, + {"AF_NETGRAPH", Const, 0}, + {"AF_NETLINK", Const, 0}, + {"AF_NETROM", Const, 0}, + {"AF_NS", Const, 0}, + {"AF_OROUTE", Const, 1}, + {"AF_OSI", Const, 0}, + {"AF_PACKET", Const, 0}, + {"AF_PHONET", Const, 0}, + {"AF_PPP", Const, 0}, + {"AF_PPPOX", Const, 0}, + {"AF_PUP", Const, 0}, + {"AF_RDS", Const, 0}, + {"AF_RESERVED_36", Const, 0}, + {"AF_ROSE", Const, 0}, + {"AF_ROUTE", Const, 0}, + {"AF_RXRPC", Const, 0}, + {"AF_SCLUSTER", Const, 0}, + {"AF_SECURITY", Const, 0}, + {"AF_SIP", Const, 0}, + {"AF_SLOW", Const, 0}, + {"AF_SNA", Const, 0}, + {"AF_SYSTEM", Const, 0}, + {"AF_TIPC", Const, 0}, + {"AF_UNIX", Const, 0}, + {"AF_UNSPEC", Const, 0}, + {"AF_UTUN", Const, 16}, + {"AF_VENDOR00", Const, 0}, + {"AF_VENDOR01", Const, 0}, + {"AF_VENDOR02", Const, 0}, + {"AF_VENDOR03", Const, 0}, + {"AF_VENDOR04", Const, 0}, + {"AF_VENDOR05", Const, 0}, + {"AF_VENDOR06", Const, 0}, + {"AF_VENDOR07", Const, 0}, + {"AF_VENDOR08", Const, 0}, + {"AF_VENDOR09", Const, 0}, + {"AF_VENDOR10", Const, 0}, + {"AF_VENDOR11", Const, 0}, + {"AF_VENDOR12", Const, 0}, + {"AF_VENDOR13", Const, 0}, + {"AF_VENDOR14", Const, 0}, + {"AF_VENDOR15", Const, 0}, + {"AF_VENDOR16", Const, 0}, + {"AF_VENDOR17", Const, 0}, + {"AF_VENDOR18", Const, 0}, + {"AF_VENDOR19", Const, 0}, + {"AF_VENDOR20", Const, 0}, + {"AF_VENDOR21", Const, 0}, + {"AF_VENDOR22", Const, 0}, + {"AF_VENDOR23", Const, 0}, + {"AF_VENDOR24", Const, 0}, + {"AF_VENDOR25", Const, 0}, + {"AF_VENDOR26", Const, 0}, + {"AF_VENDOR27", Const, 0}, + {"AF_VENDOR28", Const, 0}, + {"AF_VENDOR29", Const, 0}, + {"AF_VENDOR30", Const, 0}, + {"AF_VENDOR31", Const, 0}, + {"AF_VENDOR32", Const, 0}, + {"AF_VENDOR33", Const, 0}, + {"AF_VENDOR34", Const, 0}, + {"AF_VENDOR35", Const, 0}, + {"AF_VENDOR36", Const, 0}, + {"AF_VENDOR37", Const, 0}, + {"AF_VENDOR38", Const, 0}, + {"AF_VENDOR39", Const, 0}, + {"AF_VENDOR40", Const, 0}, + {"AF_VENDOR41", Const, 0}, + {"AF_VENDOR42", Const, 0}, + {"AF_VENDOR43", Const, 0}, + {"AF_VENDOR44", Const, 0}, + {"AF_VENDOR45", Const, 0}, + {"AF_VENDOR46", Const, 0}, + {"AF_VENDOR47", Const, 0}, + {"AF_WANPIPE", Const, 0}, + {"AF_X25", Const, 0}, + {"AI_CANONNAME", Const, 1}, + {"AI_NUMERICHOST", Const, 1}, + {"AI_PASSIVE", Const, 1}, + {"APPLICATION_ERROR", Const, 0}, + {"ARPHRD_ADAPT", Const, 0}, + {"ARPHRD_APPLETLK", Const, 0}, + {"ARPHRD_ARCNET", Const, 0}, + {"ARPHRD_ASH", Const, 0}, + {"ARPHRD_ATM", Const, 0}, + {"ARPHRD_AX25", Const, 0}, + {"ARPHRD_BIF", Const, 0}, + {"ARPHRD_CHAOS", Const, 0}, + {"ARPHRD_CISCO", Const, 0}, + {"ARPHRD_CSLIP", Const, 0}, + {"ARPHRD_CSLIP6", Const, 0}, + {"ARPHRD_DDCMP", Const, 0}, + {"ARPHRD_DLCI", Const, 0}, + {"ARPHRD_ECONET", Const, 0}, + {"ARPHRD_EETHER", Const, 0}, + {"ARPHRD_ETHER", Const, 0}, + {"ARPHRD_EUI64", Const, 0}, + {"ARPHRD_FCAL", Const, 0}, + {"ARPHRD_FCFABRIC", Const, 0}, + {"ARPHRD_FCPL", Const, 0}, + {"ARPHRD_FCPP", Const, 0}, + {"ARPHRD_FDDI", Const, 0}, + {"ARPHRD_FRAD", Const, 0}, + {"ARPHRD_FRELAY", Const, 1}, + {"ARPHRD_HDLC", Const, 0}, + {"ARPHRD_HIPPI", Const, 0}, + {"ARPHRD_HWX25", Const, 0}, + {"ARPHRD_IEEE1394", Const, 0}, + {"ARPHRD_IEEE802", Const, 0}, + {"ARPHRD_IEEE80211", Const, 0}, + {"ARPHRD_IEEE80211_PRISM", Const, 0}, + {"ARPHRD_IEEE80211_RADIOTAP", Const, 0}, + {"ARPHRD_IEEE802154", Const, 0}, + {"ARPHRD_IEEE802154_PHY", Const, 0}, + {"ARPHRD_IEEE802_TR", Const, 0}, + {"ARPHRD_INFINIBAND", Const, 0}, + {"ARPHRD_IPDDP", Const, 0}, + {"ARPHRD_IPGRE", Const, 0}, + {"ARPHRD_IRDA", Const, 0}, + {"ARPHRD_LAPB", Const, 0}, + {"ARPHRD_LOCALTLK", Const, 0}, + {"ARPHRD_LOOPBACK", Const, 0}, + {"ARPHRD_METRICOM", Const, 0}, + {"ARPHRD_NETROM", Const, 0}, + {"ARPHRD_NONE", Const, 0}, + {"ARPHRD_PIMREG", Const, 0}, + {"ARPHRD_PPP", Const, 0}, + {"ARPHRD_PRONET", Const, 0}, + {"ARPHRD_RAWHDLC", Const, 0}, + {"ARPHRD_ROSE", Const, 0}, + {"ARPHRD_RSRVD", Const, 0}, + {"ARPHRD_SIT", Const, 0}, + {"ARPHRD_SKIP", Const, 0}, + {"ARPHRD_SLIP", Const, 0}, + {"ARPHRD_SLIP6", Const, 0}, + {"ARPHRD_STRIP", Const, 1}, + {"ARPHRD_TUNNEL", Const, 0}, + {"ARPHRD_TUNNEL6", Const, 0}, + {"ARPHRD_VOID", Const, 0}, + {"ARPHRD_X25", Const, 0}, + {"AUTHTYPE_CLIENT", Const, 0}, + {"AUTHTYPE_SERVER", Const, 0}, + {"Accept", Func, 0}, + {"Accept4", Func, 1}, + {"AcceptEx", Func, 0}, + {"Access", Func, 0}, + {"Acct", Func, 0}, + {"AddrinfoW", Type, 1}, + {"AddrinfoW.Addr", Field, 1}, + {"AddrinfoW.Addrlen", Field, 1}, + {"AddrinfoW.Canonname", Field, 1}, + {"AddrinfoW.Family", Field, 1}, + {"AddrinfoW.Flags", Field, 1}, + {"AddrinfoW.Next", Field, 1}, + {"AddrinfoW.Protocol", Field, 1}, + {"AddrinfoW.Socktype", Field, 1}, + {"Adjtime", Func, 0}, + {"Adjtimex", Func, 0}, + {"AllThreadsSyscall", Func, 16}, + {"AllThreadsSyscall6", Func, 16}, + {"AttachLsf", Func, 0}, + {"B0", Const, 0}, + {"B1000000", Const, 0}, + {"B110", Const, 0}, + {"B115200", Const, 0}, + {"B1152000", Const, 0}, + {"B1200", Const, 0}, + {"B134", Const, 0}, + {"B14400", Const, 1}, + {"B150", Const, 0}, + {"B1500000", Const, 0}, + {"B1800", Const, 0}, + {"B19200", Const, 0}, + {"B200", Const, 0}, + {"B2000000", Const, 0}, + {"B230400", Const, 0}, + {"B2400", Const, 0}, + {"B2500000", Const, 0}, + {"B28800", Const, 1}, + {"B300", Const, 0}, + {"B3000000", Const, 0}, + {"B3500000", Const, 0}, + {"B38400", Const, 0}, + {"B4000000", Const, 0}, + {"B460800", Const, 0}, + {"B4800", Const, 0}, + {"B50", Const, 0}, + {"B500000", Const, 0}, + {"B57600", Const, 0}, + {"B576000", Const, 0}, + {"B600", Const, 0}, + {"B7200", Const, 1}, + {"B75", Const, 0}, + {"B76800", Const, 1}, + {"B921600", Const, 0}, + {"B9600", Const, 0}, + {"BASE_PROTOCOL", Const, 2}, + {"BIOCFEEDBACK", Const, 0}, + {"BIOCFLUSH", Const, 0}, + {"BIOCGBLEN", Const, 0}, + {"BIOCGDIRECTION", Const, 0}, + {"BIOCGDIRFILT", Const, 1}, + {"BIOCGDLT", Const, 0}, + {"BIOCGDLTLIST", Const, 0}, + {"BIOCGETBUFMODE", Const, 0}, + {"BIOCGETIF", Const, 0}, + {"BIOCGETZMAX", Const, 0}, + {"BIOCGFEEDBACK", Const, 1}, + {"BIOCGFILDROP", Const, 1}, + {"BIOCGHDRCMPLT", Const, 0}, + {"BIOCGRSIG", Const, 0}, + {"BIOCGRTIMEOUT", Const, 0}, + {"BIOCGSEESENT", Const, 0}, + {"BIOCGSTATS", Const, 0}, + {"BIOCGSTATSOLD", Const, 1}, + {"BIOCGTSTAMP", Const, 1}, + {"BIOCIMMEDIATE", Const, 0}, + {"BIOCLOCK", Const, 0}, + {"BIOCPROMISC", Const, 0}, + {"BIOCROTZBUF", Const, 0}, + {"BIOCSBLEN", Const, 0}, + {"BIOCSDIRECTION", Const, 0}, + {"BIOCSDIRFILT", Const, 1}, + {"BIOCSDLT", Const, 0}, + {"BIOCSETBUFMODE", Const, 0}, + {"BIOCSETF", Const, 0}, + {"BIOCSETFNR", Const, 0}, + {"BIOCSETIF", Const, 0}, + {"BIOCSETWF", Const, 0}, + {"BIOCSETZBUF", Const, 0}, + {"BIOCSFEEDBACK", Const, 1}, + {"BIOCSFILDROP", Const, 1}, + {"BIOCSHDRCMPLT", Const, 0}, + {"BIOCSRSIG", Const, 0}, + {"BIOCSRTIMEOUT", Const, 0}, + {"BIOCSSEESENT", Const, 0}, + {"BIOCSTCPF", Const, 1}, + {"BIOCSTSTAMP", Const, 1}, + {"BIOCSUDPF", Const, 1}, + {"BIOCVERSION", Const, 0}, + {"BPF_A", Const, 0}, + {"BPF_ABS", Const, 0}, + {"BPF_ADD", Const, 0}, + {"BPF_ALIGNMENT", Const, 0}, + {"BPF_ALIGNMENT32", Const, 1}, + {"BPF_ALU", Const, 0}, + {"BPF_AND", Const, 0}, + {"BPF_B", Const, 0}, + {"BPF_BUFMODE_BUFFER", Const, 0}, + {"BPF_BUFMODE_ZBUF", Const, 0}, + {"BPF_DFLTBUFSIZE", Const, 1}, + {"BPF_DIRECTION_IN", Const, 1}, + {"BPF_DIRECTION_OUT", Const, 1}, + {"BPF_DIV", Const, 0}, + {"BPF_H", Const, 0}, + {"BPF_IMM", Const, 0}, + {"BPF_IND", Const, 0}, + {"BPF_JA", Const, 0}, + {"BPF_JEQ", Const, 0}, + {"BPF_JGE", Const, 0}, + {"BPF_JGT", Const, 0}, + {"BPF_JMP", Const, 0}, + {"BPF_JSET", Const, 0}, + {"BPF_K", Const, 0}, + {"BPF_LD", Const, 0}, + {"BPF_LDX", Const, 0}, + {"BPF_LEN", Const, 0}, + {"BPF_LSH", Const, 0}, + {"BPF_MAJOR_VERSION", Const, 0}, + {"BPF_MAXBUFSIZE", Const, 0}, + {"BPF_MAXINSNS", Const, 0}, + {"BPF_MEM", Const, 0}, + {"BPF_MEMWORDS", Const, 0}, + {"BPF_MINBUFSIZE", Const, 0}, + {"BPF_MINOR_VERSION", Const, 0}, + {"BPF_MISC", Const, 0}, + {"BPF_MSH", Const, 0}, + {"BPF_MUL", Const, 0}, + {"BPF_NEG", Const, 0}, + {"BPF_OR", Const, 0}, + {"BPF_RELEASE", Const, 0}, + {"BPF_RET", Const, 0}, + {"BPF_RSH", Const, 0}, + {"BPF_ST", Const, 0}, + {"BPF_STX", Const, 0}, + {"BPF_SUB", Const, 0}, + {"BPF_TAX", Const, 0}, + {"BPF_TXA", Const, 0}, + {"BPF_T_BINTIME", Const, 1}, + {"BPF_T_BINTIME_FAST", Const, 1}, + {"BPF_T_BINTIME_MONOTONIC", Const, 1}, + {"BPF_T_BINTIME_MONOTONIC_FAST", Const, 1}, + {"BPF_T_FAST", Const, 1}, + {"BPF_T_FLAG_MASK", Const, 1}, + {"BPF_T_FORMAT_MASK", Const, 1}, + {"BPF_T_MICROTIME", Const, 1}, + {"BPF_T_MICROTIME_FAST", Const, 1}, + {"BPF_T_MICROTIME_MONOTONIC", Const, 1}, + {"BPF_T_MICROTIME_MONOTONIC_FAST", Const, 1}, + {"BPF_T_MONOTONIC", Const, 1}, + {"BPF_T_MONOTONIC_FAST", Const, 1}, + {"BPF_T_NANOTIME", Const, 1}, + {"BPF_T_NANOTIME_FAST", Const, 1}, + {"BPF_T_NANOTIME_MONOTONIC", Const, 1}, + {"BPF_T_NANOTIME_MONOTONIC_FAST", Const, 1}, + {"BPF_T_NONE", Const, 1}, + {"BPF_T_NORMAL", Const, 1}, + {"BPF_W", Const, 0}, + {"BPF_X", Const, 0}, + {"BRKINT", Const, 0}, + {"Bind", Func, 0}, + {"BindToDevice", Func, 0}, + {"BpfBuflen", Func, 0}, + {"BpfDatalink", Func, 0}, + {"BpfHdr", Type, 0}, + {"BpfHdr.Caplen", Field, 0}, + {"BpfHdr.Datalen", Field, 0}, + {"BpfHdr.Hdrlen", Field, 0}, + {"BpfHdr.Pad_cgo_0", Field, 0}, + {"BpfHdr.Tstamp", Field, 0}, + {"BpfHeadercmpl", Func, 0}, + {"BpfInsn", Type, 0}, + {"BpfInsn.Code", Field, 0}, + {"BpfInsn.Jf", Field, 0}, + {"BpfInsn.Jt", Field, 0}, + {"BpfInsn.K", Field, 0}, + {"BpfInterface", Func, 0}, + {"BpfJump", Func, 0}, + {"BpfProgram", Type, 0}, + {"BpfProgram.Insns", Field, 0}, + {"BpfProgram.Len", Field, 0}, + {"BpfProgram.Pad_cgo_0", Field, 0}, + {"BpfStat", Type, 0}, + {"BpfStat.Capt", Field, 2}, + {"BpfStat.Drop", Field, 0}, + {"BpfStat.Padding", Field, 2}, + {"BpfStat.Recv", Field, 0}, + {"BpfStats", Func, 0}, + {"BpfStmt", Func, 0}, + {"BpfTimeout", Func, 0}, + {"BpfTimeval", Type, 2}, + {"BpfTimeval.Sec", Field, 2}, + {"BpfTimeval.Usec", Field, 2}, + {"BpfVersion", Type, 0}, + {"BpfVersion.Major", Field, 0}, + {"BpfVersion.Minor", Field, 0}, + {"BpfZbuf", Type, 0}, + {"BpfZbuf.Bufa", Field, 0}, + {"BpfZbuf.Bufb", Field, 0}, + {"BpfZbuf.Buflen", Field, 0}, + {"BpfZbufHeader", Type, 0}, + {"BpfZbufHeader.Kernel_gen", Field, 0}, + {"BpfZbufHeader.Kernel_len", Field, 0}, + {"BpfZbufHeader.User_gen", Field, 0}, + {"BpfZbufHeader.X_bzh_pad", Field, 0}, + {"ByHandleFileInformation", Type, 0}, + {"ByHandleFileInformation.CreationTime", Field, 0}, + {"ByHandleFileInformation.FileAttributes", Field, 0}, + {"ByHandleFileInformation.FileIndexHigh", Field, 0}, + {"ByHandleFileInformation.FileIndexLow", Field, 0}, + {"ByHandleFileInformation.FileSizeHigh", Field, 0}, + {"ByHandleFileInformation.FileSizeLow", Field, 0}, + {"ByHandleFileInformation.LastAccessTime", Field, 0}, + {"ByHandleFileInformation.LastWriteTime", Field, 0}, + {"ByHandleFileInformation.NumberOfLinks", Field, 0}, + {"ByHandleFileInformation.VolumeSerialNumber", Field, 0}, + {"BytePtrFromString", Func, 1}, + {"ByteSliceFromString", Func, 1}, + {"CCR0_FLUSH", Const, 1}, + {"CERT_CHAIN_POLICY_AUTHENTICODE", Const, 0}, + {"CERT_CHAIN_POLICY_AUTHENTICODE_TS", Const, 0}, + {"CERT_CHAIN_POLICY_BASE", Const, 0}, + {"CERT_CHAIN_POLICY_BASIC_CONSTRAINTS", Const, 0}, + {"CERT_CHAIN_POLICY_EV", Const, 0}, + {"CERT_CHAIN_POLICY_MICROSOFT_ROOT", Const, 0}, + {"CERT_CHAIN_POLICY_NT_AUTH", Const, 0}, + {"CERT_CHAIN_POLICY_SSL", Const, 0}, + {"CERT_E_CN_NO_MATCH", Const, 0}, + {"CERT_E_EXPIRED", Const, 0}, + {"CERT_E_PURPOSE", Const, 0}, + {"CERT_E_ROLE", Const, 0}, + {"CERT_E_UNTRUSTEDROOT", Const, 0}, + {"CERT_STORE_ADD_ALWAYS", Const, 0}, + {"CERT_STORE_DEFER_CLOSE_UNTIL_LAST_FREE_FLAG", Const, 0}, + {"CERT_STORE_PROV_MEMORY", Const, 0}, + {"CERT_TRUST_HAS_EXCLUDED_NAME_CONSTRAINT", Const, 0}, + {"CERT_TRUST_HAS_NOT_DEFINED_NAME_CONSTRAINT", Const, 0}, + {"CERT_TRUST_HAS_NOT_PERMITTED_NAME_CONSTRAINT", Const, 0}, + {"CERT_TRUST_HAS_NOT_SUPPORTED_CRITICAL_EXT", Const, 0}, + {"CERT_TRUST_HAS_NOT_SUPPORTED_NAME_CONSTRAINT", Const, 0}, + {"CERT_TRUST_INVALID_BASIC_CONSTRAINTS", Const, 0}, + {"CERT_TRUST_INVALID_EXTENSION", Const, 0}, + {"CERT_TRUST_INVALID_NAME_CONSTRAINTS", Const, 0}, + {"CERT_TRUST_INVALID_POLICY_CONSTRAINTS", Const, 0}, + {"CERT_TRUST_IS_CYCLIC", Const, 0}, + {"CERT_TRUST_IS_EXPLICIT_DISTRUST", Const, 0}, + {"CERT_TRUST_IS_NOT_SIGNATURE_VALID", Const, 0}, + {"CERT_TRUST_IS_NOT_TIME_VALID", Const, 0}, + {"CERT_TRUST_IS_NOT_VALID_FOR_USAGE", Const, 0}, + {"CERT_TRUST_IS_OFFLINE_REVOCATION", Const, 0}, + {"CERT_TRUST_IS_REVOKED", Const, 0}, + {"CERT_TRUST_IS_UNTRUSTED_ROOT", Const, 0}, + {"CERT_TRUST_NO_ERROR", Const, 0}, + {"CERT_TRUST_NO_ISSUANCE_CHAIN_POLICY", Const, 0}, + {"CERT_TRUST_REVOCATION_STATUS_UNKNOWN", Const, 0}, + {"CFLUSH", Const, 1}, + {"CLOCAL", Const, 0}, + {"CLONE_CHILD_CLEARTID", Const, 2}, + {"CLONE_CHILD_SETTID", Const, 2}, + {"CLONE_CLEAR_SIGHAND", Const, 20}, + {"CLONE_CSIGNAL", Const, 3}, + {"CLONE_DETACHED", Const, 2}, + {"CLONE_FILES", Const, 2}, + {"CLONE_FS", Const, 2}, + {"CLONE_INTO_CGROUP", Const, 20}, + {"CLONE_IO", Const, 2}, + {"CLONE_NEWCGROUP", Const, 20}, + {"CLONE_NEWIPC", Const, 2}, + {"CLONE_NEWNET", Const, 2}, + {"CLONE_NEWNS", Const, 2}, + {"CLONE_NEWPID", Const, 2}, + {"CLONE_NEWTIME", Const, 20}, + {"CLONE_NEWUSER", Const, 2}, + {"CLONE_NEWUTS", Const, 2}, + {"CLONE_PARENT", Const, 2}, + {"CLONE_PARENT_SETTID", Const, 2}, + {"CLONE_PID", Const, 3}, + {"CLONE_PIDFD", Const, 20}, + {"CLONE_PTRACE", Const, 2}, + {"CLONE_SETTLS", Const, 2}, + {"CLONE_SIGHAND", Const, 2}, + {"CLONE_SYSVSEM", Const, 2}, + {"CLONE_THREAD", Const, 2}, + {"CLONE_UNTRACED", Const, 2}, + {"CLONE_VFORK", Const, 2}, + {"CLONE_VM", Const, 2}, + {"CPUID_CFLUSH", Const, 1}, + {"CREAD", Const, 0}, + {"CREATE_ALWAYS", Const, 0}, + {"CREATE_NEW", Const, 0}, + {"CREATE_NEW_PROCESS_GROUP", Const, 1}, + {"CREATE_UNICODE_ENVIRONMENT", Const, 0}, + {"CRYPT_DEFAULT_CONTAINER_OPTIONAL", Const, 0}, + {"CRYPT_DELETEKEYSET", Const, 0}, + {"CRYPT_MACHINE_KEYSET", Const, 0}, + {"CRYPT_NEWKEYSET", Const, 0}, + {"CRYPT_SILENT", Const, 0}, + {"CRYPT_VERIFYCONTEXT", Const, 0}, + {"CS5", Const, 0}, + {"CS6", Const, 0}, + {"CS7", Const, 0}, + {"CS8", Const, 0}, + {"CSIZE", Const, 0}, + {"CSTART", Const, 1}, + {"CSTATUS", Const, 1}, + {"CSTOP", Const, 1}, + {"CSTOPB", Const, 0}, + {"CSUSP", Const, 1}, + {"CTL_MAXNAME", Const, 0}, + {"CTL_NET", Const, 0}, + {"CTL_QUERY", Const, 1}, + {"CTRL_BREAK_EVENT", Const, 1}, + {"CTRL_CLOSE_EVENT", Const, 14}, + {"CTRL_C_EVENT", Const, 1}, + {"CTRL_LOGOFF_EVENT", Const, 14}, + {"CTRL_SHUTDOWN_EVENT", Const, 14}, + {"CancelIo", Func, 0}, + {"CancelIoEx", Func, 1}, + {"CertAddCertificateContextToStore", Func, 0}, + {"CertChainContext", Type, 0}, + {"CertChainContext.ChainCount", Field, 0}, + {"CertChainContext.Chains", Field, 0}, + {"CertChainContext.HasRevocationFreshnessTime", Field, 0}, + {"CertChainContext.LowerQualityChainCount", Field, 0}, + {"CertChainContext.LowerQualityChains", Field, 0}, + {"CertChainContext.RevocationFreshnessTime", Field, 0}, + {"CertChainContext.Size", Field, 0}, + {"CertChainContext.TrustStatus", Field, 0}, + {"CertChainElement", Type, 0}, + {"CertChainElement.ApplicationUsage", Field, 0}, + {"CertChainElement.CertContext", Field, 0}, + {"CertChainElement.ExtendedErrorInfo", Field, 0}, + {"CertChainElement.IssuanceUsage", Field, 0}, + {"CertChainElement.RevocationInfo", Field, 0}, + {"CertChainElement.Size", Field, 0}, + {"CertChainElement.TrustStatus", Field, 0}, + {"CertChainPara", Type, 0}, + {"CertChainPara.CacheResync", Field, 0}, + {"CertChainPara.CheckRevocationFreshnessTime", Field, 0}, + {"CertChainPara.RequestedUsage", Field, 0}, + {"CertChainPara.RequstedIssuancePolicy", Field, 0}, + {"CertChainPara.RevocationFreshnessTime", Field, 0}, + {"CertChainPara.Size", Field, 0}, + {"CertChainPara.URLRetrievalTimeout", Field, 0}, + {"CertChainPolicyPara", Type, 0}, + {"CertChainPolicyPara.ExtraPolicyPara", Field, 0}, + {"CertChainPolicyPara.Flags", Field, 0}, + {"CertChainPolicyPara.Size", Field, 0}, + {"CertChainPolicyStatus", Type, 0}, + {"CertChainPolicyStatus.ChainIndex", Field, 0}, + {"CertChainPolicyStatus.ElementIndex", Field, 0}, + {"CertChainPolicyStatus.Error", Field, 0}, + {"CertChainPolicyStatus.ExtraPolicyStatus", Field, 0}, + {"CertChainPolicyStatus.Size", Field, 0}, + {"CertCloseStore", Func, 0}, + {"CertContext", Type, 0}, + {"CertContext.CertInfo", Field, 0}, + {"CertContext.EncodedCert", Field, 0}, + {"CertContext.EncodingType", Field, 0}, + {"CertContext.Length", Field, 0}, + {"CertContext.Store", Field, 0}, + {"CertCreateCertificateContext", Func, 0}, + {"CertEnhKeyUsage", Type, 0}, + {"CertEnhKeyUsage.Length", Field, 0}, + {"CertEnhKeyUsage.UsageIdentifiers", Field, 0}, + {"CertEnumCertificatesInStore", Func, 0}, + {"CertFreeCertificateChain", Func, 0}, + {"CertFreeCertificateContext", Func, 0}, + {"CertGetCertificateChain", Func, 0}, + {"CertInfo", Type, 11}, + {"CertOpenStore", Func, 0}, + {"CertOpenSystemStore", Func, 0}, + {"CertRevocationCrlInfo", Type, 11}, + {"CertRevocationInfo", Type, 0}, + {"CertRevocationInfo.CrlInfo", Field, 0}, + {"CertRevocationInfo.FreshnessTime", Field, 0}, + {"CertRevocationInfo.HasFreshnessTime", Field, 0}, + {"CertRevocationInfo.OidSpecificInfo", Field, 0}, + {"CertRevocationInfo.RevocationOid", Field, 0}, + {"CertRevocationInfo.RevocationResult", Field, 0}, + {"CertRevocationInfo.Size", Field, 0}, + {"CertSimpleChain", Type, 0}, + {"CertSimpleChain.Elements", Field, 0}, + {"CertSimpleChain.HasRevocationFreshnessTime", Field, 0}, + {"CertSimpleChain.NumElements", Field, 0}, + {"CertSimpleChain.RevocationFreshnessTime", Field, 0}, + {"CertSimpleChain.Size", Field, 0}, + {"CertSimpleChain.TrustListInfo", Field, 0}, + {"CertSimpleChain.TrustStatus", Field, 0}, + {"CertTrustListInfo", Type, 11}, + {"CertTrustStatus", Type, 0}, + {"CertTrustStatus.ErrorStatus", Field, 0}, + {"CertTrustStatus.InfoStatus", Field, 0}, + {"CertUsageMatch", Type, 0}, + {"CertUsageMatch.Type", Field, 0}, + {"CertUsageMatch.Usage", Field, 0}, + {"CertVerifyCertificateChainPolicy", Func, 0}, + {"Chdir", Func, 0}, + {"CheckBpfVersion", Func, 0}, + {"Chflags", Func, 0}, + {"Chmod", Func, 0}, + {"Chown", Func, 0}, + {"Chroot", Func, 0}, + {"Clearenv", Func, 0}, + {"Close", Func, 0}, + {"CloseHandle", Func, 0}, + {"CloseOnExec", Func, 0}, + {"Closesocket", Func, 0}, + {"CmsgLen", Func, 0}, + {"CmsgSpace", Func, 0}, + {"Cmsghdr", Type, 0}, + {"Cmsghdr.Len", Field, 0}, + {"Cmsghdr.Level", Field, 0}, + {"Cmsghdr.Type", Field, 0}, + {"Cmsghdr.X__cmsg_data", Field, 0}, + {"CommandLineToArgv", Func, 0}, + {"ComputerName", Func, 0}, + {"Conn", Type, 9}, + {"Connect", Func, 0}, + {"ConnectEx", Func, 1}, + {"ConvertSidToStringSid", Func, 0}, + {"ConvertStringSidToSid", Func, 0}, + {"CopySid", Func, 0}, + {"Creat", Func, 0}, + {"CreateDirectory", Func, 0}, + {"CreateFile", Func, 0}, + {"CreateFileMapping", Func, 0}, + {"CreateHardLink", Func, 4}, + {"CreateIoCompletionPort", Func, 0}, + {"CreatePipe", Func, 0}, + {"CreateProcess", Func, 0}, + {"CreateProcessAsUser", Func, 10}, + {"CreateSymbolicLink", Func, 4}, + {"CreateToolhelp32Snapshot", Func, 4}, + {"Credential", Type, 0}, + {"Credential.Gid", Field, 0}, + {"Credential.Groups", Field, 0}, + {"Credential.NoSetGroups", Field, 9}, + {"Credential.Uid", Field, 0}, + {"CryptAcquireContext", Func, 0}, + {"CryptGenRandom", Func, 0}, + {"CryptReleaseContext", Func, 0}, + {"DIOCBSFLUSH", Const, 1}, + {"DIOCOSFPFLUSH", Const, 1}, + {"DLL", Type, 0}, + {"DLL.Handle", Field, 0}, + {"DLL.Name", Field, 0}, + {"DLLError", Type, 0}, + {"DLLError.Err", Field, 0}, + {"DLLError.Msg", Field, 0}, + {"DLLError.ObjName", Field, 0}, + {"DLT_A429", Const, 0}, + {"DLT_A653_ICM", Const, 0}, + {"DLT_AIRONET_HEADER", Const, 0}, + {"DLT_AOS", Const, 1}, + {"DLT_APPLE_IP_OVER_IEEE1394", Const, 0}, + {"DLT_ARCNET", Const, 0}, + {"DLT_ARCNET_LINUX", Const, 0}, + {"DLT_ATM_CLIP", Const, 0}, + {"DLT_ATM_RFC1483", Const, 0}, + {"DLT_AURORA", Const, 0}, + {"DLT_AX25", Const, 0}, + {"DLT_AX25_KISS", Const, 0}, + {"DLT_BACNET_MS_TP", Const, 0}, + {"DLT_BLUETOOTH_HCI_H4", Const, 0}, + {"DLT_BLUETOOTH_HCI_H4_WITH_PHDR", Const, 0}, + {"DLT_CAN20B", Const, 0}, + {"DLT_CAN_SOCKETCAN", Const, 1}, + {"DLT_CHAOS", Const, 0}, + {"DLT_CHDLC", Const, 0}, + {"DLT_CISCO_IOS", Const, 0}, + {"DLT_C_HDLC", Const, 0}, + {"DLT_C_HDLC_WITH_DIR", Const, 0}, + {"DLT_DBUS", Const, 1}, + {"DLT_DECT", Const, 1}, + {"DLT_DOCSIS", Const, 0}, + {"DLT_DVB_CI", Const, 1}, + {"DLT_ECONET", Const, 0}, + {"DLT_EN10MB", Const, 0}, + {"DLT_EN3MB", Const, 0}, + {"DLT_ENC", Const, 0}, + {"DLT_ERF", Const, 0}, + {"DLT_ERF_ETH", Const, 0}, + {"DLT_ERF_POS", Const, 0}, + {"DLT_FC_2", Const, 1}, + {"DLT_FC_2_WITH_FRAME_DELIMS", Const, 1}, + {"DLT_FDDI", Const, 0}, + {"DLT_FLEXRAY", Const, 0}, + {"DLT_FRELAY", Const, 0}, + {"DLT_FRELAY_WITH_DIR", Const, 0}, + {"DLT_GCOM_SERIAL", Const, 0}, + {"DLT_GCOM_T1E1", Const, 0}, + {"DLT_GPF_F", Const, 0}, + {"DLT_GPF_T", Const, 0}, + {"DLT_GPRS_LLC", Const, 0}, + {"DLT_GSMTAP_ABIS", Const, 1}, + {"DLT_GSMTAP_UM", Const, 1}, + {"DLT_HDLC", Const, 1}, + {"DLT_HHDLC", Const, 0}, + {"DLT_HIPPI", Const, 1}, + {"DLT_IBM_SN", Const, 0}, + {"DLT_IBM_SP", Const, 0}, + {"DLT_IEEE802", Const, 0}, + {"DLT_IEEE802_11", Const, 0}, + {"DLT_IEEE802_11_RADIO", Const, 0}, + {"DLT_IEEE802_11_RADIO_AVS", Const, 0}, + {"DLT_IEEE802_15_4", Const, 0}, + {"DLT_IEEE802_15_4_LINUX", Const, 0}, + {"DLT_IEEE802_15_4_NOFCS", Const, 1}, + {"DLT_IEEE802_15_4_NONASK_PHY", Const, 0}, + {"DLT_IEEE802_16_MAC_CPS", Const, 0}, + {"DLT_IEEE802_16_MAC_CPS_RADIO", Const, 0}, + {"DLT_IPFILTER", Const, 0}, + {"DLT_IPMB", Const, 0}, + {"DLT_IPMB_LINUX", Const, 0}, + {"DLT_IPNET", Const, 1}, + {"DLT_IPOIB", Const, 1}, + {"DLT_IPV4", Const, 1}, + {"DLT_IPV6", Const, 1}, + {"DLT_IP_OVER_FC", Const, 0}, + {"DLT_JUNIPER_ATM1", Const, 0}, + {"DLT_JUNIPER_ATM2", Const, 0}, + {"DLT_JUNIPER_ATM_CEMIC", Const, 1}, + {"DLT_JUNIPER_CHDLC", Const, 0}, + {"DLT_JUNIPER_ES", Const, 0}, + {"DLT_JUNIPER_ETHER", Const, 0}, + {"DLT_JUNIPER_FIBRECHANNEL", Const, 1}, + {"DLT_JUNIPER_FRELAY", Const, 0}, + {"DLT_JUNIPER_GGSN", Const, 0}, + {"DLT_JUNIPER_ISM", Const, 0}, + {"DLT_JUNIPER_MFR", Const, 0}, + {"DLT_JUNIPER_MLFR", Const, 0}, + {"DLT_JUNIPER_MLPPP", Const, 0}, + {"DLT_JUNIPER_MONITOR", Const, 0}, + {"DLT_JUNIPER_PIC_PEER", Const, 0}, + {"DLT_JUNIPER_PPP", Const, 0}, + {"DLT_JUNIPER_PPPOE", Const, 0}, + {"DLT_JUNIPER_PPPOE_ATM", Const, 0}, + {"DLT_JUNIPER_SERVICES", Const, 0}, + {"DLT_JUNIPER_SRX_E2E", Const, 1}, + {"DLT_JUNIPER_ST", Const, 0}, + {"DLT_JUNIPER_VP", Const, 0}, + {"DLT_JUNIPER_VS", Const, 1}, + {"DLT_LAPB_WITH_DIR", Const, 0}, + {"DLT_LAPD", Const, 0}, + {"DLT_LIN", Const, 0}, + {"DLT_LINUX_EVDEV", Const, 1}, + {"DLT_LINUX_IRDA", Const, 0}, + {"DLT_LINUX_LAPD", Const, 0}, + {"DLT_LINUX_PPP_WITHDIRECTION", Const, 0}, + {"DLT_LINUX_SLL", Const, 0}, + {"DLT_LOOP", Const, 0}, + {"DLT_LTALK", Const, 0}, + {"DLT_MATCHING_MAX", Const, 1}, + {"DLT_MATCHING_MIN", Const, 1}, + {"DLT_MFR", Const, 0}, + {"DLT_MOST", Const, 0}, + {"DLT_MPEG_2_TS", Const, 1}, + {"DLT_MPLS", Const, 1}, + {"DLT_MTP2", Const, 0}, + {"DLT_MTP2_WITH_PHDR", Const, 0}, + {"DLT_MTP3", Const, 0}, + {"DLT_MUX27010", Const, 1}, + {"DLT_NETANALYZER", Const, 1}, + {"DLT_NETANALYZER_TRANSPARENT", Const, 1}, + {"DLT_NFC_LLCP", Const, 1}, + {"DLT_NFLOG", Const, 1}, + {"DLT_NG40", Const, 1}, + {"DLT_NULL", Const, 0}, + {"DLT_PCI_EXP", Const, 0}, + {"DLT_PFLOG", Const, 0}, + {"DLT_PFSYNC", Const, 0}, + {"DLT_PPI", Const, 0}, + {"DLT_PPP", Const, 0}, + {"DLT_PPP_BSDOS", Const, 0}, + {"DLT_PPP_ETHER", Const, 0}, + {"DLT_PPP_PPPD", Const, 0}, + {"DLT_PPP_SERIAL", Const, 0}, + {"DLT_PPP_WITH_DIR", Const, 0}, + {"DLT_PPP_WITH_DIRECTION", Const, 0}, + {"DLT_PRISM_HEADER", Const, 0}, + {"DLT_PRONET", Const, 0}, + {"DLT_RAIF1", Const, 0}, + {"DLT_RAW", Const, 0}, + {"DLT_RAWAF_MASK", Const, 1}, + {"DLT_RIO", Const, 0}, + {"DLT_SCCP", Const, 0}, + {"DLT_SITA", Const, 0}, + {"DLT_SLIP", Const, 0}, + {"DLT_SLIP_BSDOS", Const, 0}, + {"DLT_STANAG_5066_D_PDU", Const, 1}, + {"DLT_SUNATM", Const, 0}, + {"DLT_SYMANTEC_FIREWALL", Const, 0}, + {"DLT_TZSP", Const, 0}, + {"DLT_USB", Const, 0}, + {"DLT_USB_LINUX", Const, 0}, + {"DLT_USB_LINUX_MMAPPED", Const, 1}, + {"DLT_USER0", Const, 0}, + {"DLT_USER1", Const, 0}, + {"DLT_USER10", Const, 0}, + {"DLT_USER11", Const, 0}, + {"DLT_USER12", Const, 0}, + {"DLT_USER13", Const, 0}, + {"DLT_USER14", Const, 0}, + {"DLT_USER15", Const, 0}, + {"DLT_USER2", Const, 0}, + {"DLT_USER3", Const, 0}, + {"DLT_USER4", Const, 0}, + {"DLT_USER5", Const, 0}, + {"DLT_USER6", Const, 0}, + {"DLT_USER7", Const, 0}, + {"DLT_USER8", Const, 0}, + {"DLT_USER9", Const, 0}, + {"DLT_WIHART", Const, 1}, + {"DLT_X2E_SERIAL", Const, 0}, + {"DLT_X2E_XORAYA", Const, 0}, + {"DNSMXData", Type, 0}, + {"DNSMXData.NameExchange", Field, 0}, + {"DNSMXData.Pad", Field, 0}, + {"DNSMXData.Preference", Field, 0}, + {"DNSPTRData", Type, 0}, + {"DNSPTRData.Host", Field, 0}, + {"DNSRecord", Type, 0}, + {"DNSRecord.Data", Field, 0}, + {"DNSRecord.Dw", Field, 0}, + {"DNSRecord.Length", Field, 0}, + {"DNSRecord.Name", Field, 0}, + {"DNSRecord.Next", Field, 0}, + {"DNSRecord.Reserved", Field, 0}, + {"DNSRecord.Ttl", Field, 0}, + {"DNSRecord.Type", Field, 0}, + {"DNSSRVData", Type, 0}, + {"DNSSRVData.Pad", Field, 0}, + {"DNSSRVData.Port", Field, 0}, + {"DNSSRVData.Priority", Field, 0}, + {"DNSSRVData.Target", Field, 0}, + {"DNSSRVData.Weight", Field, 0}, + {"DNSTXTData", Type, 0}, + {"DNSTXTData.StringArray", Field, 0}, + {"DNSTXTData.StringCount", Field, 0}, + {"DNS_INFO_NO_RECORDS", Const, 4}, + {"DNS_TYPE_A", Const, 0}, + {"DNS_TYPE_A6", Const, 0}, + {"DNS_TYPE_AAAA", Const, 0}, + {"DNS_TYPE_ADDRS", Const, 0}, + {"DNS_TYPE_AFSDB", Const, 0}, + {"DNS_TYPE_ALL", Const, 0}, + {"DNS_TYPE_ANY", Const, 0}, + {"DNS_TYPE_ATMA", Const, 0}, + {"DNS_TYPE_AXFR", Const, 0}, + {"DNS_TYPE_CERT", Const, 0}, + {"DNS_TYPE_CNAME", Const, 0}, + {"DNS_TYPE_DHCID", Const, 0}, + {"DNS_TYPE_DNAME", Const, 0}, + {"DNS_TYPE_DNSKEY", Const, 0}, + {"DNS_TYPE_DS", Const, 0}, + {"DNS_TYPE_EID", Const, 0}, + {"DNS_TYPE_GID", Const, 0}, + {"DNS_TYPE_GPOS", Const, 0}, + {"DNS_TYPE_HINFO", Const, 0}, + {"DNS_TYPE_ISDN", Const, 0}, + {"DNS_TYPE_IXFR", Const, 0}, + {"DNS_TYPE_KEY", Const, 0}, + {"DNS_TYPE_KX", Const, 0}, + {"DNS_TYPE_LOC", Const, 0}, + {"DNS_TYPE_MAILA", Const, 0}, + {"DNS_TYPE_MAILB", Const, 0}, + {"DNS_TYPE_MB", Const, 0}, + {"DNS_TYPE_MD", Const, 0}, + {"DNS_TYPE_MF", Const, 0}, + {"DNS_TYPE_MG", Const, 0}, + {"DNS_TYPE_MINFO", Const, 0}, + {"DNS_TYPE_MR", Const, 0}, + {"DNS_TYPE_MX", Const, 0}, + {"DNS_TYPE_NAPTR", Const, 0}, + {"DNS_TYPE_NBSTAT", Const, 0}, + {"DNS_TYPE_NIMLOC", Const, 0}, + {"DNS_TYPE_NS", Const, 0}, + {"DNS_TYPE_NSAP", Const, 0}, + {"DNS_TYPE_NSAPPTR", Const, 0}, + {"DNS_TYPE_NSEC", Const, 0}, + {"DNS_TYPE_NULL", Const, 0}, + {"DNS_TYPE_NXT", Const, 0}, + {"DNS_TYPE_OPT", Const, 0}, + {"DNS_TYPE_PTR", Const, 0}, + {"DNS_TYPE_PX", Const, 0}, + {"DNS_TYPE_RP", Const, 0}, + {"DNS_TYPE_RRSIG", Const, 0}, + {"DNS_TYPE_RT", Const, 0}, + {"DNS_TYPE_SIG", Const, 0}, + {"DNS_TYPE_SINK", Const, 0}, + {"DNS_TYPE_SOA", Const, 0}, + {"DNS_TYPE_SRV", Const, 0}, + {"DNS_TYPE_TEXT", Const, 0}, + {"DNS_TYPE_TKEY", Const, 0}, + {"DNS_TYPE_TSIG", Const, 0}, + {"DNS_TYPE_UID", Const, 0}, + {"DNS_TYPE_UINFO", Const, 0}, + {"DNS_TYPE_UNSPEC", Const, 0}, + {"DNS_TYPE_WINS", Const, 0}, + {"DNS_TYPE_WINSR", Const, 0}, + {"DNS_TYPE_WKS", Const, 0}, + {"DNS_TYPE_X25", Const, 0}, + {"DT_BLK", Const, 0}, + {"DT_CHR", Const, 0}, + {"DT_DIR", Const, 0}, + {"DT_FIFO", Const, 0}, + {"DT_LNK", Const, 0}, + {"DT_REG", Const, 0}, + {"DT_SOCK", Const, 0}, + {"DT_UNKNOWN", Const, 0}, + {"DT_WHT", Const, 0}, + {"DUPLICATE_CLOSE_SOURCE", Const, 0}, + {"DUPLICATE_SAME_ACCESS", Const, 0}, + {"DeleteFile", Func, 0}, + {"DetachLsf", Func, 0}, + {"DeviceIoControl", Func, 4}, + {"Dirent", Type, 0}, + {"Dirent.Fileno", Field, 0}, + {"Dirent.Ino", Field, 0}, + {"Dirent.Name", Field, 0}, + {"Dirent.Namlen", Field, 0}, + {"Dirent.Off", Field, 0}, + {"Dirent.Pad0", Field, 12}, + {"Dirent.Pad1", Field, 12}, + {"Dirent.Pad_cgo_0", Field, 0}, + {"Dirent.Reclen", Field, 0}, + {"Dirent.Seekoff", Field, 0}, + {"Dirent.Type", Field, 0}, + {"Dirent.X__d_padding", Field, 3}, + {"DnsNameCompare", Func, 4}, + {"DnsQuery", Func, 0}, + {"DnsRecordListFree", Func, 0}, + {"DnsSectionAdditional", Const, 4}, + {"DnsSectionAnswer", Const, 4}, + {"DnsSectionAuthority", Const, 4}, + {"DnsSectionQuestion", Const, 4}, + {"Dup", Func, 0}, + {"Dup2", Func, 0}, + {"Dup3", Func, 2}, + {"DuplicateHandle", Func, 0}, + {"E2BIG", Const, 0}, + {"EACCES", Const, 0}, + {"EADDRINUSE", Const, 0}, + {"EADDRNOTAVAIL", Const, 0}, + {"EADV", Const, 0}, + {"EAFNOSUPPORT", Const, 0}, + {"EAGAIN", Const, 0}, + {"EALREADY", Const, 0}, + {"EAUTH", Const, 0}, + {"EBADARCH", Const, 0}, + {"EBADE", Const, 0}, + {"EBADEXEC", Const, 0}, + {"EBADF", Const, 0}, + {"EBADFD", Const, 0}, + {"EBADMACHO", Const, 0}, + {"EBADMSG", Const, 0}, + {"EBADR", Const, 0}, + {"EBADRPC", Const, 0}, + {"EBADRQC", Const, 0}, + {"EBADSLT", Const, 0}, + {"EBFONT", Const, 0}, + {"EBUSY", Const, 0}, + {"ECANCELED", Const, 0}, + {"ECAPMODE", Const, 1}, + {"ECHILD", Const, 0}, + {"ECHO", Const, 0}, + {"ECHOCTL", Const, 0}, + {"ECHOE", Const, 0}, + {"ECHOK", Const, 0}, + {"ECHOKE", Const, 0}, + {"ECHONL", Const, 0}, + {"ECHOPRT", Const, 0}, + {"ECHRNG", Const, 0}, + {"ECOMM", Const, 0}, + {"ECONNABORTED", Const, 0}, + {"ECONNREFUSED", Const, 0}, + {"ECONNRESET", Const, 0}, + {"EDEADLK", Const, 0}, + {"EDEADLOCK", Const, 0}, + {"EDESTADDRREQ", Const, 0}, + {"EDEVERR", Const, 0}, + {"EDOM", Const, 0}, + {"EDOOFUS", Const, 0}, + {"EDOTDOT", Const, 0}, + {"EDQUOT", Const, 0}, + {"EEXIST", Const, 0}, + {"EFAULT", Const, 0}, + {"EFBIG", Const, 0}, + {"EFER_LMA", Const, 1}, + {"EFER_LME", Const, 1}, + {"EFER_NXE", Const, 1}, + {"EFER_SCE", Const, 1}, + {"EFTYPE", Const, 0}, + {"EHOSTDOWN", Const, 0}, + {"EHOSTUNREACH", Const, 0}, + {"EHWPOISON", Const, 0}, + {"EIDRM", Const, 0}, + {"EILSEQ", Const, 0}, + {"EINPROGRESS", Const, 0}, + {"EINTR", Const, 0}, + {"EINVAL", Const, 0}, + {"EIO", Const, 0}, + {"EIPSEC", Const, 1}, + {"EISCONN", Const, 0}, + {"EISDIR", Const, 0}, + {"EISNAM", Const, 0}, + {"EKEYEXPIRED", Const, 0}, + {"EKEYREJECTED", Const, 0}, + {"EKEYREVOKED", Const, 0}, + {"EL2HLT", Const, 0}, + {"EL2NSYNC", Const, 0}, + {"EL3HLT", Const, 0}, + {"EL3RST", Const, 0}, + {"ELAST", Const, 0}, + {"ELF_NGREG", Const, 0}, + {"ELF_PRARGSZ", Const, 0}, + {"ELIBACC", Const, 0}, + {"ELIBBAD", Const, 0}, + {"ELIBEXEC", Const, 0}, + {"ELIBMAX", Const, 0}, + {"ELIBSCN", Const, 0}, + {"ELNRNG", Const, 0}, + {"ELOOP", Const, 0}, + {"EMEDIUMTYPE", Const, 0}, + {"EMFILE", Const, 0}, + {"EMLINK", Const, 0}, + {"EMSGSIZE", Const, 0}, + {"EMT_TAGOVF", Const, 1}, + {"EMULTIHOP", Const, 0}, + {"EMUL_ENABLED", Const, 1}, + {"EMUL_LINUX", Const, 1}, + {"EMUL_LINUX32", Const, 1}, + {"EMUL_MAXID", Const, 1}, + {"EMUL_NATIVE", Const, 1}, + {"ENAMETOOLONG", Const, 0}, + {"ENAVAIL", Const, 0}, + {"ENDRUNDISC", Const, 1}, + {"ENEEDAUTH", Const, 0}, + {"ENETDOWN", Const, 0}, + {"ENETRESET", Const, 0}, + {"ENETUNREACH", Const, 0}, + {"ENFILE", Const, 0}, + {"ENOANO", Const, 0}, + {"ENOATTR", Const, 0}, + {"ENOBUFS", Const, 0}, + {"ENOCSI", Const, 0}, + {"ENODATA", Const, 0}, + {"ENODEV", Const, 0}, + {"ENOENT", Const, 0}, + {"ENOEXEC", Const, 0}, + {"ENOKEY", Const, 0}, + {"ENOLCK", Const, 0}, + {"ENOLINK", Const, 0}, + {"ENOMEDIUM", Const, 0}, + {"ENOMEM", Const, 0}, + {"ENOMSG", Const, 0}, + {"ENONET", Const, 0}, + {"ENOPKG", Const, 0}, + {"ENOPOLICY", Const, 0}, + {"ENOPROTOOPT", Const, 0}, + {"ENOSPC", Const, 0}, + {"ENOSR", Const, 0}, + {"ENOSTR", Const, 0}, + {"ENOSYS", Const, 0}, + {"ENOTBLK", Const, 0}, + {"ENOTCAPABLE", Const, 0}, + {"ENOTCONN", Const, 0}, + {"ENOTDIR", Const, 0}, + {"ENOTEMPTY", Const, 0}, + {"ENOTNAM", Const, 0}, + {"ENOTRECOVERABLE", Const, 0}, + {"ENOTSOCK", Const, 0}, + {"ENOTSUP", Const, 0}, + {"ENOTTY", Const, 0}, + {"ENOTUNIQ", Const, 0}, + {"ENXIO", Const, 0}, + {"EN_SW_CTL_INF", Const, 1}, + {"EN_SW_CTL_PREC", Const, 1}, + {"EN_SW_CTL_ROUND", Const, 1}, + {"EN_SW_DATACHAIN", Const, 1}, + {"EN_SW_DENORM", Const, 1}, + {"EN_SW_INVOP", Const, 1}, + {"EN_SW_OVERFLOW", Const, 1}, + {"EN_SW_PRECLOSS", Const, 1}, + {"EN_SW_UNDERFLOW", Const, 1}, + {"EN_SW_ZERODIV", Const, 1}, + {"EOPNOTSUPP", Const, 0}, + {"EOVERFLOW", Const, 0}, + {"EOWNERDEAD", Const, 0}, + {"EPERM", Const, 0}, + {"EPFNOSUPPORT", Const, 0}, + {"EPIPE", Const, 0}, + {"EPOLLERR", Const, 0}, + {"EPOLLET", Const, 0}, + {"EPOLLHUP", Const, 0}, + {"EPOLLIN", Const, 0}, + {"EPOLLMSG", Const, 0}, + {"EPOLLONESHOT", Const, 0}, + {"EPOLLOUT", Const, 0}, + {"EPOLLPRI", Const, 0}, + {"EPOLLRDBAND", Const, 0}, + {"EPOLLRDHUP", Const, 0}, + {"EPOLLRDNORM", Const, 0}, + {"EPOLLWRBAND", Const, 0}, + {"EPOLLWRNORM", Const, 0}, + {"EPOLL_CLOEXEC", Const, 0}, + {"EPOLL_CTL_ADD", Const, 0}, + {"EPOLL_CTL_DEL", Const, 0}, + {"EPOLL_CTL_MOD", Const, 0}, + {"EPOLL_NONBLOCK", Const, 0}, + {"EPROCLIM", Const, 0}, + {"EPROCUNAVAIL", Const, 0}, + {"EPROGMISMATCH", Const, 0}, + {"EPROGUNAVAIL", Const, 0}, + {"EPROTO", Const, 0}, + {"EPROTONOSUPPORT", Const, 0}, + {"EPROTOTYPE", Const, 0}, + {"EPWROFF", Const, 0}, + {"EQFULL", Const, 16}, + {"ERANGE", Const, 0}, + {"EREMCHG", Const, 0}, + {"EREMOTE", Const, 0}, + {"EREMOTEIO", Const, 0}, + {"ERESTART", Const, 0}, + {"ERFKILL", Const, 0}, + {"EROFS", Const, 0}, + {"ERPCMISMATCH", Const, 0}, + {"ERROR_ACCESS_DENIED", Const, 0}, + {"ERROR_ALREADY_EXISTS", Const, 0}, + {"ERROR_BROKEN_PIPE", Const, 0}, + {"ERROR_BUFFER_OVERFLOW", Const, 0}, + {"ERROR_DIR_NOT_EMPTY", Const, 8}, + {"ERROR_ENVVAR_NOT_FOUND", Const, 0}, + {"ERROR_FILE_EXISTS", Const, 0}, + {"ERROR_FILE_NOT_FOUND", Const, 0}, + {"ERROR_HANDLE_EOF", Const, 2}, + {"ERROR_INSUFFICIENT_BUFFER", Const, 0}, + {"ERROR_IO_PENDING", Const, 0}, + {"ERROR_MOD_NOT_FOUND", Const, 0}, + {"ERROR_MORE_DATA", Const, 3}, + {"ERROR_NETNAME_DELETED", Const, 3}, + {"ERROR_NOT_FOUND", Const, 1}, + {"ERROR_NO_MORE_FILES", Const, 0}, + {"ERROR_OPERATION_ABORTED", Const, 0}, + {"ERROR_PATH_NOT_FOUND", Const, 0}, + {"ERROR_PRIVILEGE_NOT_HELD", Const, 4}, + {"ERROR_PROC_NOT_FOUND", Const, 0}, + {"ESHLIBVERS", Const, 0}, + {"ESHUTDOWN", Const, 0}, + {"ESOCKTNOSUPPORT", Const, 0}, + {"ESPIPE", Const, 0}, + {"ESRCH", Const, 0}, + {"ESRMNT", Const, 0}, + {"ESTALE", Const, 0}, + {"ESTRPIPE", Const, 0}, + {"ETHERCAP_JUMBO_MTU", Const, 1}, + {"ETHERCAP_VLAN_HWTAGGING", Const, 1}, + {"ETHERCAP_VLAN_MTU", Const, 1}, + {"ETHERMIN", Const, 1}, + {"ETHERMTU", Const, 1}, + {"ETHERMTU_JUMBO", Const, 1}, + {"ETHERTYPE_8023", Const, 1}, + {"ETHERTYPE_AARP", Const, 1}, + {"ETHERTYPE_ACCTON", Const, 1}, + {"ETHERTYPE_AEONIC", Const, 1}, + {"ETHERTYPE_ALPHA", Const, 1}, + {"ETHERTYPE_AMBER", Const, 1}, + {"ETHERTYPE_AMOEBA", Const, 1}, + {"ETHERTYPE_AOE", Const, 1}, + {"ETHERTYPE_APOLLO", Const, 1}, + {"ETHERTYPE_APOLLODOMAIN", Const, 1}, + {"ETHERTYPE_APPLETALK", Const, 1}, + {"ETHERTYPE_APPLITEK", Const, 1}, + {"ETHERTYPE_ARGONAUT", Const, 1}, + {"ETHERTYPE_ARP", Const, 1}, + {"ETHERTYPE_AT", Const, 1}, + {"ETHERTYPE_ATALK", Const, 1}, + {"ETHERTYPE_ATOMIC", Const, 1}, + {"ETHERTYPE_ATT", Const, 1}, + {"ETHERTYPE_ATTSTANFORD", Const, 1}, + {"ETHERTYPE_AUTOPHON", Const, 1}, + {"ETHERTYPE_AXIS", Const, 1}, + {"ETHERTYPE_BCLOOP", Const, 1}, + {"ETHERTYPE_BOFL", Const, 1}, + {"ETHERTYPE_CABLETRON", Const, 1}, + {"ETHERTYPE_CHAOS", Const, 1}, + {"ETHERTYPE_COMDESIGN", Const, 1}, + {"ETHERTYPE_COMPUGRAPHIC", Const, 1}, + {"ETHERTYPE_COUNTERPOINT", Const, 1}, + {"ETHERTYPE_CRONUS", Const, 1}, + {"ETHERTYPE_CRONUSVLN", Const, 1}, + {"ETHERTYPE_DCA", Const, 1}, + {"ETHERTYPE_DDE", Const, 1}, + {"ETHERTYPE_DEBNI", Const, 1}, + {"ETHERTYPE_DECAM", Const, 1}, + {"ETHERTYPE_DECCUST", Const, 1}, + {"ETHERTYPE_DECDIAG", Const, 1}, + {"ETHERTYPE_DECDNS", Const, 1}, + {"ETHERTYPE_DECDTS", Const, 1}, + {"ETHERTYPE_DECEXPER", Const, 1}, + {"ETHERTYPE_DECLAST", Const, 1}, + {"ETHERTYPE_DECLTM", Const, 1}, + {"ETHERTYPE_DECMUMPS", Const, 1}, + {"ETHERTYPE_DECNETBIOS", Const, 1}, + {"ETHERTYPE_DELTACON", Const, 1}, + {"ETHERTYPE_DIDDLE", Const, 1}, + {"ETHERTYPE_DLOG1", Const, 1}, + {"ETHERTYPE_DLOG2", Const, 1}, + {"ETHERTYPE_DN", Const, 1}, + {"ETHERTYPE_DOGFIGHT", Const, 1}, + {"ETHERTYPE_DSMD", Const, 1}, + {"ETHERTYPE_ECMA", Const, 1}, + {"ETHERTYPE_ENCRYPT", Const, 1}, + {"ETHERTYPE_ES", Const, 1}, + {"ETHERTYPE_EXCELAN", Const, 1}, + {"ETHERTYPE_EXPERDATA", Const, 1}, + {"ETHERTYPE_FLIP", Const, 1}, + {"ETHERTYPE_FLOWCONTROL", Const, 1}, + {"ETHERTYPE_FRARP", Const, 1}, + {"ETHERTYPE_GENDYN", Const, 1}, + {"ETHERTYPE_HAYES", Const, 1}, + {"ETHERTYPE_HIPPI_FP", Const, 1}, + {"ETHERTYPE_HITACHI", Const, 1}, + {"ETHERTYPE_HP", Const, 1}, + {"ETHERTYPE_IEEEPUP", Const, 1}, + {"ETHERTYPE_IEEEPUPAT", Const, 1}, + {"ETHERTYPE_IMLBL", Const, 1}, + {"ETHERTYPE_IMLBLDIAG", Const, 1}, + {"ETHERTYPE_IP", Const, 1}, + {"ETHERTYPE_IPAS", Const, 1}, + {"ETHERTYPE_IPV6", Const, 1}, + {"ETHERTYPE_IPX", Const, 1}, + {"ETHERTYPE_IPXNEW", Const, 1}, + {"ETHERTYPE_KALPANA", Const, 1}, + {"ETHERTYPE_LANBRIDGE", Const, 1}, + {"ETHERTYPE_LANPROBE", Const, 1}, + {"ETHERTYPE_LAT", Const, 1}, + {"ETHERTYPE_LBACK", Const, 1}, + {"ETHERTYPE_LITTLE", Const, 1}, + {"ETHERTYPE_LLDP", Const, 1}, + {"ETHERTYPE_LOGICRAFT", Const, 1}, + {"ETHERTYPE_LOOPBACK", Const, 1}, + {"ETHERTYPE_MATRA", Const, 1}, + {"ETHERTYPE_MAX", Const, 1}, + {"ETHERTYPE_MERIT", Const, 1}, + {"ETHERTYPE_MICP", Const, 1}, + {"ETHERTYPE_MOPDL", Const, 1}, + {"ETHERTYPE_MOPRC", Const, 1}, + {"ETHERTYPE_MOTOROLA", Const, 1}, + {"ETHERTYPE_MPLS", Const, 1}, + {"ETHERTYPE_MPLS_MCAST", Const, 1}, + {"ETHERTYPE_MUMPS", Const, 1}, + {"ETHERTYPE_NBPCC", Const, 1}, + {"ETHERTYPE_NBPCLAIM", Const, 1}, + {"ETHERTYPE_NBPCLREQ", Const, 1}, + {"ETHERTYPE_NBPCLRSP", Const, 1}, + {"ETHERTYPE_NBPCREQ", Const, 1}, + {"ETHERTYPE_NBPCRSP", Const, 1}, + {"ETHERTYPE_NBPDG", Const, 1}, + {"ETHERTYPE_NBPDGB", Const, 1}, + {"ETHERTYPE_NBPDLTE", Const, 1}, + {"ETHERTYPE_NBPRAR", Const, 1}, + {"ETHERTYPE_NBPRAS", Const, 1}, + {"ETHERTYPE_NBPRST", Const, 1}, + {"ETHERTYPE_NBPSCD", Const, 1}, + {"ETHERTYPE_NBPVCD", Const, 1}, + {"ETHERTYPE_NBS", Const, 1}, + {"ETHERTYPE_NCD", Const, 1}, + {"ETHERTYPE_NESTAR", Const, 1}, + {"ETHERTYPE_NETBEUI", Const, 1}, + {"ETHERTYPE_NOVELL", Const, 1}, + {"ETHERTYPE_NS", Const, 1}, + {"ETHERTYPE_NSAT", Const, 1}, + {"ETHERTYPE_NSCOMPAT", Const, 1}, + {"ETHERTYPE_NTRAILER", Const, 1}, + {"ETHERTYPE_OS9", Const, 1}, + {"ETHERTYPE_OS9NET", Const, 1}, + {"ETHERTYPE_PACER", Const, 1}, + {"ETHERTYPE_PAE", Const, 1}, + {"ETHERTYPE_PCS", Const, 1}, + {"ETHERTYPE_PLANNING", Const, 1}, + {"ETHERTYPE_PPP", Const, 1}, + {"ETHERTYPE_PPPOE", Const, 1}, + {"ETHERTYPE_PPPOEDISC", Const, 1}, + {"ETHERTYPE_PRIMENTS", Const, 1}, + {"ETHERTYPE_PUP", Const, 1}, + {"ETHERTYPE_PUPAT", Const, 1}, + {"ETHERTYPE_QINQ", Const, 1}, + {"ETHERTYPE_RACAL", Const, 1}, + {"ETHERTYPE_RATIONAL", Const, 1}, + {"ETHERTYPE_RAWFR", Const, 1}, + {"ETHERTYPE_RCL", Const, 1}, + {"ETHERTYPE_RDP", Const, 1}, + {"ETHERTYPE_RETIX", Const, 1}, + {"ETHERTYPE_REVARP", Const, 1}, + {"ETHERTYPE_SCA", Const, 1}, + {"ETHERTYPE_SECTRA", Const, 1}, + {"ETHERTYPE_SECUREDATA", Const, 1}, + {"ETHERTYPE_SGITW", Const, 1}, + {"ETHERTYPE_SG_BOUNCE", Const, 1}, + {"ETHERTYPE_SG_DIAG", Const, 1}, + {"ETHERTYPE_SG_NETGAMES", Const, 1}, + {"ETHERTYPE_SG_RESV", Const, 1}, + {"ETHERTYPE_SIMNET", Const, 1}, + {"ETHERTYPE_SLOW", Const, 1}, + {"ETHERTYPE_SLOWPROTOCOLS", Const, 1}, + {"ETHERTYPE_SNA", Const, 1}, + {"ETHERTYPE_SNMP", Const, 1}, + {"ETHERTYPE_SONIX", Const, 1}, + {"ETHERTYPE_SPIDER", Const, 1}, + {"ETHERTYPE_SPRITE", Const, 1}, + {"ETHERTYPE_STP", Const, 1}, + {"ETHERTYPE_TALARIS", Const, 1}, + {"ETHERTYPE_TALARISMC", Const, 1}, + {"ETHERTYPE_TCPCOMP", Const, 1}, + {"ETHERTYPE_TCPSM", Const, 1}, + {"ETHERTYPE_TEC", Const, 1}, + {"ETHERTYPE_TIGAN", Const, 1}, + {"ETHERTYPE_TRAIL", Const, 1}, + {"ETHERTYPE_TRANSETHER", Const, 1}, + {"ETHERTYPE_TYMSHARE", Const, 1}, + {"ETHERTYPE_UBBST", Const, 1}, + {"ETHERTYPE_UBDEBUG", Const, 1}, + {"ETHERTYPE_UBDIAGLOOP", Const, 1}, + {"ETHERTYPE_UBDL", Const, 1}, + {"ETHERTYPE_UBNIU", Const, 1}, + {"ETHERTYPE_UBNMC", Const, 1}, + {"ETHERTYPE_VALID", Const, 1}, + {"ETHERTYPE_VARIAN", Const, 1}, + {"ETHERTYPE_VAXELN", Const, 1}, + {"ETHERTYPE_VEECO", Const, 1}, + {"ETHERTYPE_VEXP", Const, 1}, + {"ETHERTYPE_VGLAB", Const, 1}, + {"ETHERTYPE_VINES", Const, 1}, + {"ETHERTYPE_VINESECHO", Const, 1}, + {"ETHERTYPE_VINESLOOP", Const, 1}, + {"ETHERTYPE_VITAL", Const, 1}, + {"ETHERTYPE_VLAN", Const, 1}, + {"ETHERTYPE_VLTLMAN", Const, 1}, + {"ETHERTYPE_VPROD", Const, 1}, + {"ETHERTYPE_VURESERVED", Const, 1}, + {"ETHERTYPE_WATERLOO", Const, 1}, + {"ETHERTYPE_WELLFLEET", Const, 1}, + {"ETHERTYPE_X25", Const, 1}, + {"ETHERTYPE_X75", Const, 1}, + {"ETHERTYPE_XNSSM", Const, 1}, + {"ETHERTYPE_XTP", Const, 1}, + {"ETHER_ADDR_LEN", Const, 1}, + {"ETHER_ALIGN", Const, 1}, + {"ETHER_CRC_LEN", Const, 1}, + {"ETHER_CRC_POLY_BE", Const, 1}, + {"ETHER_CRC_POLY_LE", Const, 1}, + {"ETHER_HDR_LEN", Const, 1}, + {"ETHER_MAX_DIX_LEN", Const, 1}, + {"ETHER_MAX_LEN", Const, 1}, + {"ETHER_MAX_LEN_JUMBO", Const, 1}, + {"ETHER_MIN_LEN", Const, 1}, + {"ETHER_PPPOE_ENCAP_LEN", Const, 1}, + {"ETHER_TYPE_LEN", Const, 1}, + {"ETHER_VLAN_ENCAP_LEN", Const, 1}, + {"ETH_P_1588", Const, 0}, + {"ETH_P_8021Q", Const, 0}, + {"ETH_P_802_2", Const, 0}, + {"ETH_P_802_3", Const, 0}, + {"ETH_P_AARP", Const, 0}, + {"ETH_P_ALL", Const, 0}, + {"ETH_P_AOE", Const, 0}, + {"ETH_P_ARCNET", Const, 0}, + {"ETH_P_ARP", Const, 0}, + {"ETH_P_ATALK", Const, 0}, + {"ETH_P_ATMFATE", Const, 0}, + {"ETH_P_ATMMPOA", Const, 0}, + {"ETH_P_AX25", Const, 0}, + {"ETH_P_BPQ", Const, 0}, + {"ETH_P_CAIF", Const, 0}, + {"ETH_P_CAN", Const, 0}, + {"ETH_P_CONTROL", Const, 0}, + {"ETH_P_CUST", Const, 0}, + {"ETH_P_DDCMP", Const, 0}, + {"ETH_P_DEC", Const, 0}, + {"ETH_P_DIAG", Const, 0}, + {"ETH_P_DNA_DL", Const, 0}, + {"ETH_P_DNA_RC", Const, 0}, + {"ETH_P_DNA_RT", Const, 0}, + {"ETH_P_DSA", Const, 0}, + {"ETH_P_ECONET", Const, 0}, + {"ETH_P_EDSA", Const, 0}, + {"ETH_P_FCOE", Const, 0}, + {"ETH_P_FIP", Const, 0}, + {"ETH_P_HDLC", Const, 0}, + {"ETH_P_IEEE802154", Const, 0}, + {"ETH_P_IEEEPUP", Const, 0}, + {"ETH_P_IEEEPUPAT", Const, 0}, + {"ETH_P_IP", Const, 0}, + {"ETH_P_IPV6", Const, 0}, + {"ETH_P_IPX", Const, 0}, + {"ETH_P_IRDA", Const, 0}, + {"ETH_P_LAT", Const, 0}, + {"ETH_P_LINK_CTL", Const, 0}, + {"ETH_P_LOCALTALK", Const, 0}, + {"ETH_P_LOOP", Const, 0}, + {"ETH_P_MOBITEX", Const, 0}, + {"ETH_P_MPLS_MC", Const, 0}, + {"ETH_P_MPLS_UC", Const, 0}, + {"ETH_P_PAE", Const, 0}, + {"ETH_P_PAUSE", Const, 0}, + {"ETH_P_PHONET", Const, 0}, + {"ETH_P_PPPTALK", Const, 0}, + {"ETH_P_PPP_DISC", Const, 0}, + {"ETH_P_PPP_MP", Const, 0}, + {"ETH_P_PPP_SES", Const, 0}, + {"ETH_P_PUP", Const, 0}, + {"ETH_P_PUPAT", Const, 0}, + {"ETH_P_RARP", Const, 0}, + {"ETH_P_SCA", Const, 0}, + {"ETH_P_SLOW", Const, 0}, + {"ETH_P_SNAP", Const, 0}, + {"ETH_P_TEB", Const, 0}, + {"ETH_P_TIPC", Const, 0}, + {"ETH_P_TRAILER", Const, 0}, + {"ETH_P_TR_802_2", Const, 0}, + {"ETH_P_WAN_PPP", Const, 0}, + {"ETH_P_WCCP", Const, 0}, + {"ETH_P_X25", Const, 0}, + {"ETIME", Const, 0}, + {"ETIMEDOUT", Const, 0}, + {"ETOOMANYREFS", Const, 0}, + {"ETXTBSY", Const, 0}, + {"EUCLEAN", Const, 0}, + {"EUNATCH", Const, 0}, + {"EUSERS", Const, 0}, + {"EVFILT_AIO", Const, 0}, + {"EVFILT_FS", Const, 0}, + {"EVFILT_LIO", Const, 0}, + {"EVFILT_MACHPORT", Const, 0}, + {"EVFILT_PROC", Const, 0}, + {"EVFILT_READ", Const, 0}, + {"EVFILT_SIGNAL", Const, 0}, + {"EVFILT_SYSCOUNT", Const, 0}, + {"EVFILT_THREADMARKER", Const, 0}, + {"EVFILT_TIMER", Const, 0}, + {"EVFILT_USER", Const, 0}, + {"EVFILT_VM", Const, 0}, + {"EVFILT_VNODE", Const, 0}, + {"EVFILT_WRITE", Const, 0}, + {"EV_ADD", Const, 0}, + {"EV_CLEAR", Const, 0}, + {"EV_DELETE", Const, 0}, + {"EV_DISABLE", Const, 0}, + {"EV_DISPATCH", Const, 0}, + {"EV_DROP", Const, 3}, + {"EV_ENABLE", Const, 0}, + {"EV_EOF", Const, 0}, + {"EV_ERROR", Const, 0}, + {"EV_FLAG0", Const, 0}, + {"EV_FLAG1", Const, 0}, + {"EV_ONESHOT", Const, 0}, + {"EV_OOBAND", Const, 0}, + {"EV_POLL", Const, 0}, + {"EV_RECEIPT", Const, 0}, + {"EV_SYSFLAGS", Const, 0}, + {"EWINDOWS", Const, 0}, + {"EWOULDBLOCK", Const, 0}, + {"EXDEV", Const, 0}, + {"EXFULL", Const, 0}, + {"EXTA", Const, 0}, + {"EXTB", Const, 0}, + {"EXTPROC", Const, 0}, + {"Environ", Func, 0}, + {"EpollCreate", Func, 0}, + {"EpollCreate1", Func, 0}, + {"EpollCtl", Func, 0}, + {"EpollEvent", Type, 0}, + {"EpollEvent.Events", Field, 0}, + {"EpollEvent.Fd", Field, 0}, + {"EpollEvent.Pad", Field, 0}, + {"EpollEvent.PadFd", Field, 0}, + {"EpollWait", Func, 0}, + {"Errno", Type, 0}, + {"EscapeArg", Func, 0}, + {"Exchangedata", Func, 0}, + {"Exec", Func, 0}, + {"Exit", Func, 0}, + {"ExitProcess", Func, 0}, + {"FD_CLOEXEC", Const, 0}, + {"FD_SETSIZE", Const, 0}, + {"FILE_ACTION_ADDED", Const, 0}, + {"FILE_ACTION_MODIFIED", Const, 0}, + {"FILE_ACTION_REMOVED", Const, 0}, + {"FILE_ACTION_RENAMED_NEW_NAME", Const, 0}, + {"FILE_ACTION_RENAMED_OLD_NAME", Const, 0}, + {"FILE_APPEND_DATA", Const, 0}, + {"FILE_ATTRIBUTE_ARCHIVE", Const, 0}, + {"FILE_ATTRIBUTE_DIRECTORY", Const, 0}, + {"FILE_ATTRIBUTE_HIDDEN", Const, 0}, + {"FILE_ATTRIBUTE_NORMAL", Const, 0}, + {"FILE_ATTRIBUTE_READONLY", Const, 0}, + {"FILE_ATTRIBUTE_REPARSE_POINT", Const, 4}, + {"FILE_ATTRIBUTE_SYSTEM", Const, 0}, + {"FILE_BEGIN", Const, 0}, + {"FILE_CURRENT", Const, 0}, + {"FILE_END", Const, 0}, + {"FILE_FLAG_BACKUP_SEMANTICS", Const, 0}, + {"FILE_FLAG_OPEN_REPARSE_POINT", Const, 4}, + {"FILE_FLAG_OVERLAPPED", Const, 0}, + {"FILE_LIST_DIRECTORY", Const, 0}, + {"FILE_MAP_COPY", Const, 0}, + {"FILE_MAP_EXECUTE", Const, 0}, + {"FILE_MAP_READ", Const, 0}, + {"FILE_MAP_WRITE", Const, 0}, + {"FILE_NOTIFY_CHANGE_ATTRIBUTES", Const, 0}, + {"FILE_NOTIFY_CHANGE_CREATION", Const, 0}, + {"FILE_NOTIFY_CHANGE_DIR_NAME", Const, 0}, + {"FILE_NOTIFY_CHANGE_FILE_NAME", Const, 0}, + {"FILE_NOTIFY_CHANGE_LAST_ACCESS", Const, 0}, + {"FILE_NOTIFY_CHANGE_LAST_WRITE", Const, 0}, + {"FILE_NOTIFY_CHANGE_SIZE", Const, 0}, + {"FILE_SHARE_DELETE", Const, 0}, + {"FILE_SHARE_READ", Const, 0}, + {"FILE_SHARE_WRITE", Const, 0}, + {"FILE_SKIP_COMPLETION_PORT_ON_SUCCESS", Const, 2}, + {"FILE_SKIP_SET_EVENT_ON_HANDLE", Const, 2}, + {"FILE_TYPE_CHAR", Const, 0}, + {"FILE_TYPE_DISK", Const, 0}, + {"FILE_TYPE_PIPE", Const, 0}, + {"FILE_TYPE_REMOTE", Const, 0}, + {"FILE_TYPE_UNKNOWN", Const, 0}, + {"FILE_WRITE_ATTRIBUTES", Const, 0}, + {"FLUSHO", Const, 0}, + {"FORMAT_MESSAGE_ALLOCATE_BUFFER", Const, 0}, + {"FORMAT_MESSAGE_ARGUMENT_ARRAY", Const, 0}, + {"FORMAT_MESSAGE_FROM_HMODULE", Const, 0}, + {"FORMAT_MESSAGE_FROM_STRING", Const, 0}, + {"FORMAT_MESSAGE_FROM_SYSTEM", Const, 0}, + {"FORMAT_MESSAGE_IGNORE_INSERTS", Const, 0}, + {"FORMAT_MESSAGE_MAX_WIDTH_MASK", Const, 0}, + {"FSCTL_GET_REPARSE_POINT", Const, 4}, + {"F_ADDFILESIGS", Const, 0}, + {"F_ADDSIGS", Const, 0}, + {"F_ALLOCATEALL", Const, 0}, + {"F_ALLOCATECONTIG", Const, 0}, + {"F_CANCEL", Const, 0}, + {"F_CHKCLEAN", Const, 0}, + {"F_CLOSEM", Const, 1}, + {"F_DUP2FD", Const, 0}, + {"F_DUP2FD_CLOEXEC", Const, 1}, + {"F_DUPFD", Const, 0}, + {"F_DUPFD_CLOEXEC", Const, 0}, + {"F_EXLCK", Const, 0}, + {"F_FINDSIGS", Const, 16}, + {"F_FLUSH_DATA", Const, 0}, + {"F_FREEZE_FS", Const, 0}, + {"F_FSCTL", Const, 1}, + {"F_FSDIRMASK", Const, 1}, + {"F_FSIN", Const, 1}, + {"F_FSINOUT", Const, 1}, + {"F_FSOUT", Const, 1}, + {"F_FSPRIV", Const, 1}, + {"F_FSVOID", Const, 1}, + {"F_FULLFSYNC", Const, 0}, + {"F_GETCODEDIR", Const, 16}, + {"F_GETFD", Const, 0}, + {"F_GETFL", Const, 0}, + {"F_GETLEASE", Const, 0}, + {"F_GETLK", Const, 0}, + {"F_GETLK64", Const, 0}, + {"F_GETLKPID", Const, 0}, + {"F_GETNOSIGPIPE", Const, 0}, + {"F_GETOWN", Const, 0}, + {"F_GETOWN_EX", Const, 0}, + {"F_GETPATH", Const, 0}, + {"F_GETPATH_MTMINFO", Const, 0}, + {"F_GETPIPE_SZ", Const, 0}, + {"F_GETPROTECTIONCLASS", Const, 0}, + {"F_GETPROTECTIONLEVEL", Const, 16}, + {"F_GETSIG", Const, 0}, + {"F_GLOBAL_NOCACHE", Const, 0}, + {"F_LOCK", Const, 0}, + {"F_LOG2PHYS", Const, 0}, + {"F_LOG2PHYS_EXT", Const, 0}, + {"F_MARKDEPENDENCY", Const, 0}, + {"F_MAXFD", Const, 1}, + {"F_NOCACHE", Const, 0}, + {"F_NODIRECT", Const, 0}, + {"F_NOTIFY", Const, 0}, + {"F_OGETLK", Const, 0}, + {"F_OK", Const, 0}, + {"F_OSETLK", Const, 0}, + {"F_OSETLKW", Const, 0}, + {"F_PARAM_MASK", Const, 1}, + {"F_PARAM_MAX", Const, 1}, + {"F_PATHPKG_CHECK", Const, 0}, + {"F_PEOFPOSMODE", Const, 0}, + {"F_PREALLOCATE", Const, 0}, + {"F_RDADVISE", Const, 0}, + {"F_RDAHEAD", Const, 0}, + {"F_RDLCK", Const, 0}, + {"F_READAHEAD", Const, 0}, + {"F_READBOOTSTRAP", Const, 0}, + {"F_SETBACKINGSTORE", Const, 0}, + {"F_SETFD", Const, 0}, + {"F_SETFL", Const, 0}, + {"F_SETLEASE", Const, 0}, + {"F_SETLK", Const, 0}, + {"F_SETLK64", Const, 0}, + {"F_SETLKW", Const, 0}, + {"F_SETLKW64", Const, 0}, + {"F_SETLKWTIMEOUT", Const, 16}, + {"F_SETLK_REMOTE", Const, 0}, + {"F_SETNOSIGPIPE", Const, 0}, + {"F_SETOWN", Const, 0}, + {"F_SETOWN_EX", Const, 0}, + {"F_SETPIPE_SZ", Const, 0}, + {"F_SETPROTECTIONCLASS", Const, 0}, + {"F_SETSIG", Const, 0}, + {"F_SETSIZE", Const, 0}, + {"F_SHLCK", Const, 0}, + {"F_SINGLE_WRITER", Const, 16}, + {"F_TEST", Const, 0}, + {"F_THAW_FS", Const, 0}, + {"F_TLOCK", Const, 0}, + {"F_TRANSCODEKEY", Const, 16}, + {"F_ULOCK", Const, 0}, + {"F_UNLCK", Const, 0}, + {"F_UNLCKSYS", Const, 0}, + {"F_VOLPOSMODE", Const, 0}, + {"F_WRITEBOOTSTRAP", Const, 0}, + {"F_WRLCK", Const, 0}, + {"Faccessat", Func, 0}, + {"Fallocate", Func, 0}, + {"Fbootstraptransfer_t", Type, 0}, + {"Fbootstraptransfer_t.Buffer", Field, 0}, + {"Fbootstraptransfer_t.Length", Field, 0}, + {"Fbootstraptransfer_t.Offset", Field, 0}, + {"Fchdir", Func, 0}, + {"Fchflags", Func, 0}, + {"Fchmod", Func, 0}, + {"Fchmodat", Func, 0}, + {"Fchown", Func, 0}, + {"Fchownat", Func, 0}, + {"FcntlFlock", Func, 3}, + {"FdSet", Type, 0}, + {"FdSet.Bits", Field, 0}, + {"FdSet.X__fds_bits", Field, 0}, + {"Fdatasync", Func, 0}, + {"FileNotifyInformation", Type, 0}, + {"FileNotifyInformation.Action", Field, 0}, + {"FileNotifyInformation.FileName", Field, 0}, + {"FileNotifyInformation.FileNameLength", Field, 0}, + {"FileNotifyInformation.NextEntryOffset", Field, 0}, + {"Filetime", Type, 0}, + {"Filetime.HighDateTime", Field, 0}, + {"Filetime.LowDateTime", Field, 0}, + {"FindClose", Func, 0}, + {"FindFirstFile", Func, 0}, + {"FindNextFile", Func, 0}, + {"Flock", Func, 0}, + {"Flock_t", Type, 0}, + {"Flock_t.Len", Field, 0}, + {"Flock_t.Pad_cgo_0", Field, 0}, + {"Flock_t.Pad_cgo_1", Field, 3}, + {"Flock_t.Pid", Field, 0}, + {"Flock_t.Start", Field, 0}, + {"Flock_t.Sysid", Field, 0}, + {"Flock_t.Type", Field, 0}, + {"Flock_t.Whence", Field, 0}, + {"FlushBpf", Func, 0}, + {"FlushFileBuffers", Func, 0}, + {"FlushViewOfFile", Func, 0}, + {"ForkExec", Func, 0}, + {"ForkLock", Var, 0}, + {"FormatMessage", Func, 0}, + {"Fpathconf", Func, 0}, + {"FreeAddrInfoW", Func, 1}, + {"FreeEnvironmentStrings", Func, 0}, + {"FreeLibrary", Func, 0}, + {"Fsid", Type, 0}, + {"Fsid.Val", Field, 0}, + {"Fsid.X__fsid_val", Field, 2}, + {"Fsid.X__val", Field, 0}, + {"Fstat", Func, 0}, + {"Fstatat", Func, 12}, + {"Fstatfs", Func, 0}, + {"Fstore_t", Type, 0}, + {"Fstore_t.Bytesalloc", Field, 0}, + {"Fstore_t.Flags", Field, 0}, + {"Fstore_t.Length", Field, 0}, + {"Fstore_t.Offset", Field, 0}, + {"Fstore_t.Posmode", Field, 0}, + {"Fsync", Func, 0}, + {"Ftruncate", Func, 0}, + {"FullPath", Func, 4}, + {"Futimes", Func, 0}, + {"Futimesat", Func, 0}, + {"GENERIC_ALL", Const, 0}, + {"GENERIC_EXECUTE", Const, 0}, + {"GENERIC_READ", Const, 0}, + {"GENERIC_WRITE", Const, 0}, + {"GUID", Type, 1}, + {"GUID.Data1", Field, 1}, + {"GUID.Data2", Field, 1}, + {"GUID.Data3", Field, 1}, + {"GUID.Data4", Field, 1}, + {"GetAcceptExSockaddrs", Func, 0}, + {"GetAdaptersInfo", Func, 0}, + {"GetAddrInfoW", Func, 1}, + {"GetCommandLine", Func, 0}, + {"GetComputerName", Func, 0}, + {"GetConsoleMode", Func, 1}, + {"GetCurrentDirectory", Func, 0}, + {"GetCurrentProcess", Func, 0}, + {"GetEnvironmentStrings", Func, 0}, + {"GetEnvironmentVariable", Func, 0}, + {"GetExitCodeProcess", Func, 0}, + {"GetFileAttributes", Func, 0}, + {"GetFileAttributesEx", Func, 0}, + {"GetFileExInfoStandard", Const, 0}, + {"GetFileExMaxInfoLevel", Const, 0}, + {"GetFileInformationByHandle", Func, 0}, + {"GetFileType", Func, 0}, + {"GetFullPathName", Func, 0}, + {"GetHostByName", Func, 0}, + {"GetIfEntry", Func, 0}, + {"GetLastError", Func, 0}, + {"GetLengthSid", Func, 0}, + {"GetLongPathName", Func, 0}, + {"GetProcAddress", Func, 0}, + {"GetProcessTimes", Func, 0}, + {"GetProtoByName", Func, 0}, + {"GetQueuedCompletionStatus", Func, 0}, + {"GetServByName", Func, 0}, + {"GetShortPathName", Func, 0}, + {"GetStartupInfo", Func, 0}, + {"GetStdHandle", Func, 0}, + {"GetSystemTimeAsFileTime", Func, 0}, + {"GetTempPath", Func, 0}, + {"GetTimeZoneInformation", Func, 0}, + {"GetTokenInformation", Func, 0}, + {"GetUserNameEx", Func, 0}, + {"GetUserProfileDirectory", Func, 0}, + {"GetVersion", Func, 0}, + {"Getcwd", Func, 0}, + {"Getdents", Func, 0}, + {"Getdirentries", Func, 0}, + {"Getdtablesize", Func, 0}, + {"Getegid", Func, 0}, + {"Getenv", Func, 0}, + {"Geteuid", Func, 0}, + {"Getfsstat", Func, 0}, + {"Getgid", Func, 0}, + {"Getgroups", Func, 0}, + {"Getpagesize", Func, 0}, + {"Getpeername", Func, 0}, + {"Getpgid", Func, 0}, + {"Getpgrp", Func, 0}, + {"Getpid", Func, 0}, + {"Getppid", Func, 0}, + {"Getpriority", Func, 0}, + {"Getrlimit", Func, 0}, + {"Getrusage", Func, 0}, + {"Getsid", Func, 0}, + {"Getsockname", Func, 0}, + {"Getsockopt", Func, 1}, + {"GetsockoptByte", Func, 0}, + {"GetsockoptICMPv6Filter", Func, 2}, + {"GetsockoptIPMreq", Func, 0}, + {"GetsockoptIPMreqn", Func, 0}, + {"GetsockoptIPv6MTUInfo", Func, 2}, + {"GetsockoptIPv6Mreq", Func, 0}, + {"GetsockoptInet4Addr", Func, 0}, + {"GetsockoptInt", Func, 0}, + {"GetsockoptUcred", Func, 1}, + {"Gettid", Func, 0}, + {"Gettimeofday", Func, 0}, + {"Getuid", Func, 0}, + {"Getwd", Func, 0}, + {"Getxattr", Func, 1}, + {"HANDLE_FLAG_INHERIT", Const, 0}, + {"HKEY_CLASSES_ROOT", Const, 0}, + {"HKEY_CURRENT_CONFIG", Const, 0}, + {"HKEY_CURRENT_USER", Const, 0}, + {"HKEY_DYN_DATA", Const, 0}, + {"HKEY_LOCAL_MACHINE", Const, 0}, + {"HKEY_PERFORMANCE_DATA", Const, 0}, + {"HKEY_USERS", Const, 0}, + {"HUPCL", Const, 0}, + {"Handle", Type, 0}, + {"Hostent", Type, 0}, + {"Hostent.AddrList", Field, 0}, + {"Hostent.AddrType", Field, 0}, + {"Hostent.Aliases", Field, 0}, + {"Hostent.Length", Field, 0}, + {"Hostent.Name", Field, 0}, + {"ICANON", Const, 0}, + {"ICMP6_FILTER", Const, 2}, + {"ICMPV6_FILTER", Const, 2}, + {"ICMPv6Filter", Type, 2}, + {"ICMPv6Filter.Data", Field, 2}, + {"ICMPv6Filter.Filt", Field, 2}, + {"ICRNL", Const, 0}, + {"IEXTEN", Const, 0}, + {"IFAN_ARRIVAL", Const, 1}, + {"IFAN_DEPARTURE", Const, 1}, + {"IFA_ADDRESS", Const, 0}, + {"IFA_ANYCAST", Const, 0}, + {"IFA_BROADCAST", Const, 0}, + {"IFA_CACHEINFO", Const, 0}, + {"IFA_F_DADFAILED", Const, 0}, + {"IFA_F_DEPRECATED", Const, 0}, + {"IFA_F_HOMEADDRESS", Const, 0}, + {"IFA_F_NODAD", Const, 0}, + {"IFA_F_OPTIMISTIC", Const, 0}, + {"IFA_F_PERMANENT", Const, 0}, + {"IFA_F_SECONDARY", Const, 0}, + {"IFA_F_TEMPORARY", Const, 0}, + {"IFA_F_TENTATIVE", Const, 0}, + {"IFA_LABEL", Const, 0}, + {"IFA_LOCAL", Const, 0}, + {"IFA_MAX", Const, 0}, + {"IFA_MULTICAST", Const, 0}, + {"IFA_ROUTE", Const, 1}, + {"IFA_UNSPEC", Const, 0}, + {"IFF_ALLMULTI", Const, 0}, + {"IFF_ALTPHYS", Const, 0}, + {"IFF_AUTOMEDIA", Const, 0}, + {"IFF_BROADCAST", Const, 0}, + {"IFF_CANTCHANGE", Const, 0}, + {"IFF_CANTCONFIG", Const, 1}, + {"IFF_DEBUG", Const, 0}, + {"IFF_DRV_OACTIVE", Const, 0}, + {"IFF_DRV_RUNNING", Const, 0}, + {"IFF_DYING", Const, 0}, + {"IFF_DYNAMIC", Const, 0}, + {"IFF_LINK0", Const, 0}, + {"IFF_LINK1", Const, 0}, + {"IFF_LINK2", Const, 0}, + {"IFF_LOOPBACK", Const, 0}, + {"IFF_MASTER", Const, 0}, + {"IFF_MONITOR", Const, 0}, + {"IFF_MULTICAST", Const, 0}, + {"IFF_NOARP", Const, 0}, + {"IFF_NOTRAILERS", Const, 0}, + {"IFF_NO_PI", Const, 0}, + {"IFF_OACTIVE", Const, 0}, + {"IFF_ONE_QUEUE", Const, 0}, + {"IFF_POINTOPOINT", Const, 0}, + {"IFF_POINTTOPOINT", Const, 0}, + {"IFF_PORTSEL", Const, 0}, + {"IFF_PPROMISC", Const, 0}, + {"IFF_PROMISC", Const, 0}, + {"IFF_RENAMING", Const, 0}, + {"IFF_RUNNING", Const, 0}, + {"IFF_SIMPLEX", Const, 0}, + {"IFF_SLAVE", Const, 0}, + {"IFF_SMART", Const, 0}, + {"IFF_STATICARP", Const, 0}, + {"IFF_TAP", Const, 0}, + {"IFF_TUN", Const, 0}, + {"IFF_TUN_EXCL", Const, 0}, + {"IFF_UP", Const, 0}, + {"IFF_VNET_HDR", Const, 0}, + {"IFLA_ADDRESS", Const, 0}, + {"IFLA_BROADCAST", Const, 0}, + {"IFLA_COST", Const, 0}, + {"IFLA_IFALIAS", Const, 0}, + {"IFLA_IFNAME", Const, 0}, + {"IFLA_LINK", Const, 0}, + {"IFLA_LINKINFO", Const, 0}, + {"IFLA_LINKMODE", Const, 0}, + {"IFLA_MAP", Const, 0}, + {"IFLA_MASTER", Const, 0}, + {"IFLA_MAX", Const, 0}, + {"IFLA_MTU", Const, 0}, + {"IFLA_NET_NS_PID", Const, 0}, + {"IFLA_OPERSTATE", Const, 0}, + {"IFLA_PRIORITY", Const, 0}, + {"IFLA_PROTINFO", Const, 0}, + {"IFLA_QDISC", Const, 0}, + {"IFLA_STATS", Const, 0}, + {"IFLA_TXQLEN", Const, 0}, + {"IFLA_UNSPEC", Const, 0}, + {"IFLA_WEIGHT", Const, 0}, + {"IFLA_WIRELESS", Const, 0}, + {"IFNAMSIZ", Const, 0}, + {"IFT_1822", Const, 0}, + {"IFT_A12MPPSWITCH", Const, 0}, + {"IFT_AAL2", Const, 0}, + {"IFT_AAL5", Const, 0}, + {"IFT_ADSL", Const, 0}, + {"IFT_AFLANE8023", Const, 0}, + {"IFT_AFLANE8025", Const, 0}, + {"IFT_ARAP", Const, 0}, + {"IFT_ARCNET", Const, 0}, + {"IFT_ARCNETPLUS", Const, 0}, + {"IFT_ASYNC", Const, 0}, + {"IFT_ATM", Const, 0}, + {"IFT_ATMDXI", Const, 0}, + {"IFT_ATMFUNI", Const, 0}, + {"IFT_ATMIMA", Const, 0}, + {"IFT_ATMLOGICAL", Const, 0}, + {"IFT_ATMRADIO", Const, 0}, + {"IFT_ATMSUBINTERFACE", Const, 0}, + {"IFT_ATMVCIENDPT", Const, 0}, + {"IFT_ATMVIRTUAL", Const, 0}, + {"IFT_BGPPOLICYACCOUNTING", Const, 0}, + {"IFT_BLUETOOTH", Const, 1}, + {"IFT_BRIDGE", Const, 0}, + {"IFT_BSC", Const, 0}, + {"IFT_CARP", Const, 0}, + {"IFT_CCTEMUL", Const, 0}, + {"IFT_CELLULAR", Const, 0}, + {"IFT_CEPT", Const, 0}, + {"IFT_CES", Const, 0}, + {"IFT_CHANNEL", Const, 0}, + {"IFT_CNR", Const, 0}, + {"IFT_COFFEE", Const, 0}, + {"IFT_COMPOSITELINK", Const, 0}, + {"IFT_DCN", Const, 0}, + {"IFT_DIGITALPOWERLINE", Const, 0}, + {"IFT_DIGITALWRAPPEROVERHEADCHANNEL", Const, 0}, + {"IFT_DLSW", Const, 0}, + {"IFT_DOCSCABLEDOWNSTREAM", Const, 0}, + {"IFT_DOCSCABLEMACLAYER", Const, 0}, + {"IFT_DOCSCABLEUPSTREAM", Const, 0}, + {"IFT_DOCSCABLEUPSTREAMCHANNEL", Const, 1}, + {"IFT_DS0", Const, 0}, + {"IFT_DS0BUNDLE", Const, 0}, + {"IFT_DS1FDL", Const, 0}, + {"IFT_DS3", Const, 0}, + {"IFT_DTM", Const, 0}, + {"IFT_DUMMY", Const, 1}, + {"IFT_DVBASILN", Const, 0}, + {"IFT_DVBASIOUT", Const, 0}, + {"IFT_DVBRCCDOWNSTREAM", Const, 0}, + {"IFT_DVBRCCMACLAYER", Const, 0}, + {"IFT_DVBRCCUPSTREAM", Const, 0}, + {"IFT_ECONET", Const, 1}, + {"IFT_ENC", Const, 0}, + {"IFT_EON", Const, 0}, + {"IFT_EPLRS", Const, 0}, + {"IFT_ESCON", Const, 0}, + {"IFT_ETHER", Const, 0}, + {"IFT_FAITH", Const, 0}, + {"IFT_FAST", Const, 0}, + {"IFT_FASTETHER", Const, 0}, + {"IFT_FASTETHERFX", Const, 0}, + {"IFT_FDDI", Const, 0}, + {"IFT_FIBRECHANNEL", Const, 0}, + {"IFT_FRAMERELAYINTERCONNECT", Const, 0}, + {"IFT_FRAMERELAYMPI", Const, 0}, + {"IFT_FRDLCIENDPT", Const, 0}, + {"IFT_FRELAY", Const, 0}, + {"IFT_FRELAYDCE", Const, 0}, + {"IFT_FRF16MFRBUNDLE", Const, 0}, + {"IFT_FRFORWARD", Const, 0}, + {"IFT_G703AT2MB", Const, 0}, + {"IFT_G703AT64K", Const, 0}, + {"IFT_GIF", Const, 0}, + {"IFT_GIGABITETHERNET", Const, 0}, + {"IFT_GR303IDT", Const, 0}, + {"IFT_GR303RDT", Const, 0}, + {"IFT_H323GATEKEEPER", Const, 0}, + {"IFT_H323PROXY", Const, 0}, + {"IFT_HDH1822", Const, 0}, + {"IFT_HDLC", Const, 0}, + {"IFT_HDSL2", Const, 0}, + {"IFT_HIPERLAN2", Const, 0}, + {"IFT_HIPPI", Const, 0}, + {"IFT_HIPPIINTERFACE", Const, 0}, + {"IFT_HOSTPAD", Const, 0}, + {"IFT_HSSI", Const, 0}, + {"IFT_HY", Const, 0}, + {"IFT_IBM370PARCHAN", Const, 0}, + {"IFT_IDSL", Const, 0}, + {"IFT_IEEE1394", Const, 0}, + {"IFT_IEEE80211", Const, 0}, + {"IFT_IEEE80212", Const, 0}, + {"IFT_IEEE8023ADLAG", Const, 0}, + {"IFT_IFGSN", Const, 0}, + {"IFT_IMT", Const, 0}, + {"IFT_INFINIBAND", Const, 1}, + {"IFT_INTERLEAVE", Const, 0}, + {"IFT_IP", Const, 0}, + {"IFT_IPFORWARD", Const, 0}, + {"IFT_IPOVERATM", Const, 0}, + {"IFT_IPOVERCDLC", Const, 0}, + {"IFT_IPOVERCLAW", Const, 0}, + {"IFT_IPSWITCH", Const, 0}, + {"IFT_IPXIP", Const, 0}, + {"IFT_ISDN", Const, 0}, + {"IFT_ISDNBASIC", Const, 0}, + {"IFT_ISDNPRIMARY", Const, 0}, + {"IFT_ISDNS", Const, 0}, + {"IFT_ISDNU", Const, 0}, + {"IFT_ISO88022LLC", Const, 0}, + {"IFT_ISO88023", Const, 0}, + {"IFT_ISO88024", Const, 0}, + {"IFT_ISO88025", Const, 0}, + {"IFT_ISO88025CRFPINT", Const, 0}, + {"IFT_ISO88025DTR", Const, 0}, + {"IFT_ISO88025FIBER", Const, 0}, + {"IFT_ISO88026", Const, 0}, + {"IFT_ISUP", Const, 0}, + {"IFT_L2VLAN", Const, 0}, + {"IFT_L3IPVLAN", Const, 0}, + {"IFT_L3IPXVLAN", Const, 0}, + {"IFT_LAPB", Const, 0}, + {"IFT_LAPD", Const, 0}, + {"IFT_LAPF", Const, 0}, + {"IFT_LINEGROUP", Const, 1}, + {"IFT_LOCALTALK", Const, 0}, + {"IFT_LOOP", Const, 0}, + {"IFT_MEDIAMAILOVERIP", Const, 0}, + {"IFT_MFSIGLINK", Const, 0}, + {"IFT_MIOX25", Const, 0}, + {"IFT_MODEM", Const, 0}, + {"IFT_MPC", Const, 0}, + {"IFT_MPLS", Const, 0}, + {"IFT_MPLSTUNNEL", Const, 0}, + {"IFT_MSDSL", Const, 0}, + {"IFT_MVL", Const, 0}, + {"IFT_MYRINET", Const, 0}, + {"IFT_NFAS", Const, 0}, + {"IFT_NSIP", Const, 0}, + {"IFT_OPTICALCHANNEL", Const, 0}, + {"IFT_OPTICALTRANSPORT", Const, 0}, + {"IFT_OTHER", Const, 0}, + {"IFT_P10", Const, 0}, + {"IFT_P80", Const, 0}, + {"IFT_PARA", Const, 0}, + {"IFT_PDP", Const, 0}, + {"IFT_PFLOG", Const, 0}, + {"IFT_PFLOW", Const, 1}, + {"IFT_PFSYNC", Const, 0}, + {"IFT_PLC", Const, 0}, + {"IFT_PON155", Const, 1}, + {"IFT_PON622", Const, 1}, + {"IFT_POS", Const, 0}, + {"IFT_PPP", Const, 0}, + {"IFT_PPPMULTILINKBUNDLE", Const, 0}, + {"IFT_PROPATM", Const, 1}, + {"IFT_PROPBWAP2MP", Const, 0}, + {"IFT_PROPCNLS", Const, 0}, + {"IFT_PROPDOCSWIRELESSDOWNSTREAM", Const, 0}, + {"IFT_PROPDOCSWIRELESSMACLAYER", Const, 0}, + {"IFT_PROPDOCSWIRELESSUPSTREAM", Const, 0}, + {"IFT_PROPMUX", Const, 0}, + {"IFT_PROPVIRTUAL", Const, 0}, + {"IFT_PROPWIRELESSP2P", Const, 0}, + {"IFT_PTPSERIAL", Const, 0}, + {"IFT_PVC", Const, 0}, + {"IFT_Q2931", Const, 1}, + {"IFT_QLLC", Const, 0}, + {"IFT_RADIOMAC", Const, 0}, + {"IFT_RADSL", Const, 0}, + {"IFT_REACHDSL", Const, 0}, + {"IFT_RFC1483", Const, 0}, + {"IFT_RS232", Const, 0}, + {"IFT_RSRB", Const, 0}, + {"IFT_SDLC", Const, 0}, + {"IFT_SDSL", Const, 0}, + {"IFT_SHDSL", Const, 0}, + {"IFT_SIP", Const, 0}, + {"IFT_SIPSIG", Const, 1}, + {"IFT_SIPTG", Const, 1}, + {"IFT_SLIP", Const, 0}, + {"IFT_SMDSDXI", Const, 0}, + {"IFT_SMDSICIP", Const, 0}, + {"IFT_SONET", Const, 0}, + {"IFT_SONETOVERHEADCHANNEL", Const, 0}, + {"IFT_SONETPATH", Const, 0}, + {"IFT_SONETVT", Const, 0}, + {"IFT_SRP", Const, 0}, + {"IFT_SS7SIGLINK", Const, 0}, + {"IFT_STACKTOSTACK", Const, 0}, + {"IFT_STARLAN", Const, 0}, + {"IFT_STF", Const, 0}, + {"IFT_T1", Const, 0}, + {"IFT_TDLC", Const, 0}, + {"IFT_TELINK", Const, 1}, + {"IFT_TERMPAD", Const, 0}, + {"IFT_TR008", Const, 0}, + {"IFT_TRANSPHDLC", Const, 0}, + {"IFT_TUNNEL", Const, 0}, + {"IFT_ULTRA", Const, 0}, + {"IFT_USB", Const, 0}, + {"IFT_V11", Const, 0}, + {"IFT_V35", Const, 0}, + {"IFT_V36", Const, 0}, + {"IFT_V37", Const, 0}, + {"IFT_VDSL", Const, 0}, + {"IFT_VIRTUALIPADDRESS", Const, 0}, + {"IFT_VIRTUALTG", Const, 1}, + {"IFT_VOICEDID", Const, 1}, + {"IFT_VOICEEM", Const, 0}, + {"IFT_VOICEEMFGD", Const, 1}, + {"IFT_VOICEENCAP", Const, 0}, + {"IFT_VOICEFGDEANA", Const, 1}, + {"IFT_VOICEFXO", Const, 0}, + {"IFT_VOICEFXS", Const, 0}, + {"IFT_VOICEOVERATM", Const, 0}, + {"IFT_VOICEOVERCABLE", Const, 1}, + {"IFT_VOICEOVERFRAMERELAY", Const, 0}, + {"IFT_VOICEOVERIP", Const, 0}, + {"IFT_X213", Const, 0}, + {"IFT_X25", Const, 0}, + {"IFT_X25DDN", Const, 0}, + {"IFT_X25HUNTGROUP", Const, 0}, + {"IFT_X25MLP", Const, 0}, + {"IFT_X25PLE", Const, 0}, + {"IFT_XETHER", Const, 0}, + {"IGNBRK", Const, 0}, + {"IGNCR", Const, 0}, + {"IGNORE", Const, 0}, + {"IGNPAR", Const, 0}, + {"IMAXBEL", Const, 0}, + {"INFINITE", Const, 0}, + {"INLCR", Const, 0}, + {"INPCK", Const, 0}, + {"INVALID_FILE_ATTRIBUTES", Const, 0}, + {"IN_ACCESS", Const, 0}, + {"IN_ALL_EVENTS", Const, 0}, + {"IN_ATTRIB", Const, 0}, + {"IN_CLASSA_HOST", Const, 0}, + {"IN_CLASSA_MAX", Const, 0}, + {"IN_CLASSA_NET", Const, 0}, + {"IN_CLASSA_NSHIFT", Const, 0}, + {"IN_CLASSB_HOST", Const, 0}, + {"IN_CLASSB_MAX", Const, 0}, + {"IN_CLASSB_NET", Const, 0}, + {"IN_CLASSB_NSHIFT", Const, 0}, + {"IN_CLASSC_HOST", Const, 0}, + {"IN_CLASSC_NET", Const, 0}, + {"IN_CLASSC_NSHIFT", Const, 0}, + {"IN_CLASSD_HOST", Const, 0}, + {"IN_CLASSD_NET", Const, 0}, + {"IN_CLASSD_NSHIFT", Const, 0}, + {"IN_CLOEXEC", Const, 0}, + {"IN_CLOSE", Const, 0}, + {"IN_CLOSE_NOWRITE", Const, 0}, + {"IN_CLOSE_WRITE", Const, 0}, + {"IN_CREATE", Const, 0}, + {"IN_DELETE", Const, 0}, + {"IN_DELETE_SELF", Const, 0}, + {"IN_DONT_FOLLOW", Const, 0}, + {"IN_EXCL_UNLINK", Const, 0}, + {"IN_IGNORED", Const, 0}, + {"IN_ISDIR", Const, 0}, + {"IN_LINKLOCALNETNUM", Const, 0}, + {"IN_LOOPBACKNET", Const, 0}, + {"IN_MASK_ADD", Const, 0}, + {"IN_MODIFY", Const, 0}, + {"IN_MOVE", Const, 0}, + {"IN_MOVED_FROM", Const, 0}, + {"IN_MOVED_TO", Const, 0}, + {"IN_MOVE_SELF", Const, 0}, + {"IN_NONBLOCK", Const, 0}, + {"IN_ONESHOT", Const, 0}, + {"IN_ONLYDIR", Const, 0}, + {"IN_OPEN", Const, 0}, + {"IN_Q_OVERFLOW", Const, 0}, + {"IN_RFC3021_HOST", Const, 1}, + {"IN_RFC3021_MASK", Const, 1}, + {"IN_RFC3021_NET", Const, 1}, + {"IN_RFC3021_NSHIFT", Const, 1}, + {"IN_UNMOUNT", Const, 0}, + {"IOC_IN", Const, 1}, + {"IOC_INOUT", Const, 1}, + {"IOC_OUT", Const, 1}, + {"IOC_VENDOR", Const, 3}, + {"IOC_WS2", Const, 1}, + {"IO_REPARSE_TAG_SYMLINK", Const, 4}, + {"IPMreq", Type, 0}, + {"IPMreq.Interface", Field, 0}, + {"IPMreq.Multiaddr", Field, 0}, + {"IPMreqn", Type, 0}, + {"IPMreqn.Address", Field, 0}, + {"IPMreqn.Ifindex", Field, 0}, + {"IPMreqn.Multiaddr", Field, 0}, + {"IPPROTO_3PC", Const, 0}, + {"IPPROTO_ADFS", Const, 0}, + {"IPPROTO_AH", Const, 0}, + {"IPPROTO_AHIP", Const, 0}, + {"IPPROTO_APES", Const, 0}, + {"IPPROTO_ARGUS", Const, 0}, + {"IPPROTO_AX25", Const, 0}, + {"IPPROTO_BHA", Const, 0}, + {"IPPROTO_BLT", Const, 0}, + {"IPPROTO_BRSATMON", Const, 0}, + {"IPPROTO_CARP", Const, 0}, + {"IPPROTO_CFTP", Const, 0}, + {"IPPROTO_CHAOS", Const, 0}, + {"IPPROTO_CMTP", Const, 0}, + {"IPPROTO_COMP", Const, 0}, + {"IPPROTO_CPHB", Const, 0}, + {"IPPROTO_CPNX", Const, 0}, + {"IPPROTO_DCCP", Const, 0}, + {"IPPROTO_DDP", Const, 0}, + {"IPPROTO_DGP", Const, 0}, + {"IPPROTO_DIVERT", Const, 0}, + {"IPPROTO_DIVERT_INIT", Const, 3}, + {"IPPROTO_DIVERT_RESP", Const, 3}, + {"IPPROTO_DONE", Const, 0}, + {"IPPROTO_DSTOPTS", Const, 0}, + {"IPPROTO_EGP", Const, 0}, + {"IPPROTO_EMCON", Const, 0}, + {"IPPROTO_ENCAP", Const, 0}, + {"IPPROTO_EON", Const, 0}, + {"IPPROTO_ESP", Const, 0}, + {"IPPROTO_ETHERIP", Const, 0}, + {"IPPROTO_FRAGMENT", Const, 0}, + {"IPPROTO_GGP", Const, 0}, + {"IPPROTO_GMTP", Const, 0}, + {"IPPROTO_GRE", Const, 0}, + {"IPPROTO_HELLO", Const, 0}, + {"IPPROTO_HMP", Const, 0}, + {"IPPROTO_HOPOPTS", Const, 0}, + {"IPPROTO_ICMP", Const, 0}, + {"IPPROTO_ICMPV6", Const, 0}, + {"IPPROTO_IDP", Const, 0}, + {"IPPROTO_IDPR", Const, 0}, + {"IPPROTO_IDRP", Const, 0}, + {"IPPROTO_IGMP", Const, 0}, + {"IPPROTO_IGP", Const, 0}, + {"IPPROTO_IGRP", Const, 0}, + {"IPPROTO_IL", Const, 0}, + {"IPPROTO_INLSP", Const, 0}, + {"IPPROTO_INP", Const, 0}, + {"IPPROTO_IP", Const, 0}, + {"IPPROTO_IPCOMP", Const, 0}, + {"IPPROTO_IPCV", Const, 0}, + {"IPPROTO_IPEIP", Const, 0}, + {"IPPROTO_IPIP", Const, 0}, + {"IPPROTO_IPPC", Const, 0}, + {"IPPROTO_IPV4", Const, 0}, + {"IPPROTO_IPV6", Const, 0}, + {"IPPROTO_IPV6_ICMP", Const, 1}, + {"IPPROTO_IRTP", Const, 0}, + {"IPPROTO_KRYPTOLAN", Const, 0}, + {"IPPROTO_LARP", Const, 0}, + {"IPPROTO_LEAF1", Const, 0}, + {"IPPROTO_LEAF2", Const, 0}, + {"IPPROTO_MAX", Const, 0}, + {"IPPROTO_MAXID", Const, 0}, + {"IPPROTO_MEAS", Const, 0}, + {"IPPROTO_MH", Const, 1}, + {"IPPROTO_MHRP", Const, 0}, + {"IPPROTO_MICP", Const, 0}, + {"IPPROTO_MOBILE", Const, 0}, + {"IPPROTO_MPLS", Const, 1}, + {"IPPROTO_MTP", Const, 0}, + {"IPPROTO_MUX", Const, 0}, + {"IPPROTO_ND", Const, 0}, + {"IPPROTO_NHRP", Const, 0}, + {"IPPROTO_NONE", Const, 0}, + {"IPPROTO_NSP", Const, 0}, + {"IPPROTO_NVPII", Const, 0}, + {"IPPROTO_OLD_DIVERT", Const, 0}, + {"IPPROTO_OSPFIGP", Const, 0}, + {"IPPROTO_PFSYNC", Const, 0}, + {"IPPROTO_PGM", Const, 0}, + {"IPPROTO_PIGP", Const, 0}, + {"IPPROTO_PIM", Const, 0}, + {"IPPROTO_PRM", Const, 0}, + {"IPPROTO_PUP", Const, 0}, + {"IPPROTO_PVP", Const, 0}, + {"IPPROTO_RAW", Const, 0}, + {"IPPROTO_RCCMON", Const, 0}, + {"IPPROTO_RDP", Const, 0}, + {"IPPROTO_ROUTING", Const, 0}, + {"IPPROTO_RSVP", Const, 0}, + {"IPPROTO_RVD", Const, 0}, + {"IPPROTO_SATEXPAK", Const, 0}, + {"IPPROTO_SATMON", Const, 0}, + {"IPPROTO_SCCSP", Const, 0}, + {"IPPROTO_SCTP", Const, 0}, + {"IPPROTO_SDRP", Const, 0}, + {"IPPROTO_SEND", Const, 1}, + {"IPPROTO_SEP", Const, 0}, + {"IPPROTO_SKIP", Const, 0}, + {"IPPROTO_SPACER", Const, 0}, + {"IPPROTO_SRPC", Const, 0}, + {"IPPROTO_ST", Const, 0}, + {"IPPROTO_SVMTP", Const, 0}, + {"IPPROTO_SWIPE", Const, 0}, + {"IPPROTO_TCF", Const, 0}, + {"IPPROTO_TCP", Const, 0}, + {"IPPROTO_TLSP", Const, 0}, + {"IPPROTO_TP", Const, 0}, + {"IPPROTO_TPXX", Const, 0}, + {"IPPROTO_TRUNK1", Const, 0}, + {"IPPROTO_TRUNK2", Const, 0}, + {"IPPROTO_TTP", Const, 0}, + {"IPPROTO_UDP", Const, 0}, + {"IPPROTO_UDPLITE", Const, 0}, + {"IPPROTO_VINES", Const, 0}, + {"IPPROTO_VISA", Const, 0}, + {"IPPROTO_VMTP", Const, 0}, + {"IPPROTO_VRRP", Const, 1}, + {"IPPROTO_WBEXPAK", Const, 0}, + {"IPPROTO_WBMON", Const, 0}, + {"IPPROTO_WSN", Const, 0}, + {"IPPROTO_XNET", Const, 0}, + {"IPPROTO_XTP", Const, 0}, + {"IPV6_2292DSTOPTS", Const, 0}, + {"IPV6_2292HOPLIMIT", Const, 0}, + {"IPV6_2292HOPOPTS", Const, 0}, + {"IPV6_2292NEXTHOP", Const, 0}, + {"IPV6_2292PKTINFO", Const, 0}, + {"IPV6_2292PKTOPTIONS", Const, 0}, + {"IPV6_2292RTHDR", Const, 0}, + {"IPV6_ADDRFORM", Const, 0}, + {"IPV6_ADD_MEMBERSHIP", Const, 0}, + {"IPV6_AUTHHDR", Const, 0}, + {"IPV6_AUTH_LEVEL", Const, 1}, + {"IPV6_AUTOFLOWLABEL", Const, 0}, + {"IPV6_BINDANY", Const, 0}, + {"IPV6_BINDV6ONLY", Const, 0}, + {"IPV6_BOUND_IF", Const, 0}, + {"IPV6_CHECKSUM", Const, 0}, + {"IPV6_DEFAULT_MULTICAST_HOPS", Const, 0}, + {"IPV6_DEFAULT_MULTICAST_LOOP", Const, 0}, + {"IPV6_DEFHLIM", Const, 0}, + {"IPV6_DONTFRAG", Const, 0}, + {"IPV6_DROP_MEMBERSHIP", Const, 0}, + {"IPV6_DSTOPTS", Const, 0}, + {"IPV6_ESP_NETWORK_LEVEL", Const, 1}, + {"IPV6_ESP_TRANS_LEVEL", Const, 1}, + {"IPV6_FAITH", Const, 0}, + {"IPV6_FLOWINFO_MASK", Const, 0}, + {"IPV6_FLOWLABEL_MASK", Const, 0}, + {"IPV6_FRAGTTL", Const, 0}, + {"IPV6_FW_ADD", Const, 0}, + {"IPV6_FW_DEL", Const, 0}, + {"IPV6_FW_FLUSH", Const, 0}, + {"IPV6_FW_GET", Const, 0}, + {"IPV6_FW_ZERO", Const, 0}, + {"IPV6_HLIMDEC", Const, 0}, + {"IPV6_HOPLIMIT", Const, 0}, + {"IPV6_HOPOPTS", Const, 0}, + {"IPV6_IPCOMP_LEVEL", Const, 1}, + {"IPV6_IPSEC_POLICY", Const, 0}, + {"IPV6_JOIN_ANYCAST", Const, 0}, + {"IPV6_JOIN_GROUP", Const, 0}, + {"IPV6_LEAVE_ANYCAST", Const, 0}, + {"IPV6_LEAVE_GROUP", Const, 0}, + {"IPV6_MAXHLIM", Const, 0}, + {"IPV6_MAXOPTHDR", Const, 0}, + {"IPV6_MAXPACKET", Const, 0}, + {"IPV6_MAX_GROUP_SRC_FILTER", Const, 0}, + {"IPV6_MAX_MEMBERSHIPS", Const, 0}, + {"IPV6_MAX_SOCK_SRC_FILTER", Const, 0}, + {"IPV6_MIN_MEMBERSHIPS", Const, 0}, + {"IPV6_MMTU", Const, 0}, + {"IPV6_MSFILTER", Const, 0}, + {"IPV6_MTU", Const, 0}, + {"IPV6_MTU_DISCOVER", Const, 0}, + {"IPV6_MULTICAST_HOPS", Const, 0}, + {"IPV6_MULTICAST_IF", Const, 0}, + {"IPV6_MULTICAST_LOOP", Const, 0}, + {"IPV6_NEXTHOP", Const, 0}, + {"IPV6_OPTIONS", Const, 1}, + {"IPV6_PATHMTU", Const, 0}, + {"IPV6_PIPEX", Const, 1}, + {"IPV6_PKTINFO", Const, 0}, + {"IPV6_PMTUDISC_DO", Const, 0}, + {"IPV6_PMTUDISC_DONT", Const, 0}, + {"IPV6_PMTUDISC_PROBE", Const, 0}, + {"IPV6_PMTUDISC_WANT", Const, 0}, + {"IPV6_PORTRANGE", Const, 0}, + {"IPV6_PORTRANGE_DEFAULT", Const, 0}, + {"IPV6_PORTRANGE_HIGH", Const, 0}, + {"IPV6_PORTRANGE_LOW", Const, 0}, + {"IPV6_PREFER_TEMPADDR", Const, 0}, + {"IPV6_RECVDSTOPTS", Const, 0}, + {"IPV6_RECVDSTPORT", Const, 3}, + {"IPV6_RECVERR", Const, 0}, + {"IPV6_RECVHOPLIMIT", Const, 0}, + {"IPV6_RECVHOPOPTS", Const, 0}, + {"IPV6_RECVPATHMTU", Const, 0}, + {"IPV6_RECVPKTINFO", Const, 0}, + {"IPV6_RECVRTHDR", Const, 0}, + {"IPV6_RECVTCLASS", Const, 0}, + {"IPV6_ROUTER_ALERT", Const, 0}, + {"IPV6_RTABLE", Const, 1}, + {"IPV6_RTHDR", Const, 0}, + {"IPV6_RTHDRDSTOPTS", Const, 0}, + {"IPV6_RTHDR_LOOSE", Const, 0}, + {"IPV6_RTHDR_STRICT", Const, 0}, + {"IPV6_RTHDR_TYPE_0", Const, 0}, + {"IPV6_RXDSTOPTS", Const, 0}, + {"IPV6_RXHOPOPTS", Const, 0}, + {"IPV6_SOCKOPT_RESERVED1", Const, 0}, + {"IPV6_TCLASS", Const, 0}, + {"IPV6_UNICAST_HOPS", Const, 0}, + {"IPV6_USE_MIN_MTU", Const, 0}, + {"IPV6_V6ONLY", Const, 0}, + {"IPV6_VERSION", Const, 0}, + {"IPV6_VERSION_MASK", Const, 0}, + {"IPV6_XFRM_POLICY", Const, 0}, + {"IP_ADD_MEMBERSHIP", Const, 0}, + {"IP_ADD_SOURCE_MEMBERSHIP", Const, 0}, + {"IP_AUTH_LEVEL", Const, 1}, + {"IP_BINDANY", Const, 0}, + {"IP_BLOCK_SOURCE", Const, 0}, + {"IP_BOUND_IF", Const, 0}, + {"IP_DEFAULT_MULTICAST_LOOP", Const, 0}, + {"IP_DEFAULT_MULTICAST_TTL", Const, 0}, + {"IP_DF", Const, 0}, + {"IP_DIVERTFL", Const, 3}, + {"IP_DONTFRAG", Const, 0}, + {"IP_DROP_MEMBERSHIP", Const, 0}, + {"IP_DROP_SOURCE_MEMBERSHIP", Const, 0}, + {"IP_DUMMYNET3", Const, 0}, + {"IP_DUMMYNET_CONFIGURE", Const, 0}, + {"IP_DUMMYNET_DEL", Const, 0}, + {"IP_DUMMYNET_FLUSH", Const, 0}, + {"IP_DUMMYNET_GET", Const, 0}, + {"IP_EF", Const, 1}, + {"IP_ERRORMTU", Const, 1}, + {"IP_ESP_NETWORK_LEVEL", Const, 1}, + {"IP_ESP_TRANS_LEVEL", Const, 1}, + {"IP_FAITH", Const, 0}, + {"IP_FREEBIND", Const, 0}, + {"IP_FW3", Const, 0}, + {"IP_FW_ADD", Const, 0}, + {"IP_FW_DEL", Const, 0}, + {"IP_FW_FLUSH", Const, 0}, + {"IP_FW_GET", Const, 0}, + {"IP_FW_NAT_CFG", Const, 0}, + {"IP_FW_NAT_DEL", Const, 0}, + {"IP_FW_NAT_GET_CONFIG", Const, 0}, + {"IP_FW_NAT_GET_LOG", Const, 0}, + {"IP_FW_RESETLOG", Const, 0}, + {"IP_FW_TABLE_ADD", Const, 0}, + {"IP_FW_TABLE_DEL", Const, 0}, + {"IP_FW_TABLE_FLUSH", Const, 0}, + {"IP_FW_TABLE_GETSIZE", Const, 0}, + {"IP_FW_TABLE_LIST", Const, 0}, + {"IP_FW_ZERO", Const, 0}, + {"IP_HDRINCL", Const, 0}, + {"IP_IPCOMP_LEVEL", Const, 1}, + {"IP_IPSECFLOWINFO", Const, 1}, + {"IP_IPSEC_LOCAL_AUTH", Const, 1}, + {"IP_IPSEC_LOCAL_CRED", Const, 1}, + {"IP_IPSEC_LOCAL_ID", Const, 1}, + {"IP_IPSEC_POLICY", Const, 0}, + {"IP_IPSEC_REMOTE_AUTH", Const, 1}, + {"IP_IPSEC_REMOTE_CRED", Const, 1}, + {"IP_IPSEC_REMOTE_ID", Const, 1}, + {"IP_MAXPACKET", Const, 0}, + {"IP_MAX_GROUP_SRC_FILTER", Const, 0}, + {"IP_MAX_MEMBERSHIPS", Const, 0}, + {"IP_MAX_SOCK_MUTE_FILTER", Const, 0}, + {"IP_MAX_SOCK_SRC_FILTER", Const, 0}, + {"IP_MAX_SOURCE_FILTER", Const, 0}, + {"IP_MF", Const, 0}, + {"IP_MINFRAGSIZE", Const, 1}, + {"IP_MINTTL", Const, 0}, + {"IP_MIN_MEMBERSHIPS", Const, 0}, + {"IP_MSFILTER", Const, 0}, + {"IP_MSS", Const, 0}, + {"IP_MTU", Const, 0}, + {"IP_MTU_DISCOVER", Const, 0}, + {"IP_MULTICAST_IF", Const, 0}, + {"IP_MULTICAST_IFINDEX", Const, 0}, + {"IP_MULTICAST_LOOP", Const, 0}, + {"IP_MULTICAST_TTL", Const, 0}, + {"IP_MULTICAST_VIF", Const, 0}, + {"IP_NAT__XXX", Const, 0}, + {"IP_OFFMASK", Const, 0}, + {"IP_OLD_FW_ADD", Const, 0}, + {"IP_OLD_FW_DEL", Const, 0}, + {"IP_OLD_FW_FLUSH", Const, 0}, + {"IP_OLD_FW_GET", Const, 0}, + {"IP_OLD_FW_RESETLOG", Const, 0}, + {"IP_OLD_FW_ZERO", Const, 0}, + {"IP_ONESBCAST", Const, 0}, + {"IP_OPTIONS", Const, 0}, + {"IP_ORIGDSTADDR", Const, 0}, + {"IP_PASSSEC", Const, 0}, + {"IP_PIPEX", Const, 1}, + {"IP_PKTINFO", Const, 0}, + {"IP_PKTOPTIONS", Const, 0}, + {"IP_PMTUDISC", Const, 0}, + {"IP_PMTUDISC_DO", Const, 0}, + {"IP_PMTUDISC_DONT", Const, 0}, + {"IP_PMTUDISC_PROBE", Const, 0}, + {"IP_PMTUDISC_WANT", Const, 0}, + {"IP_PORTRANGE", Const, 0}, + {"IP_PORTRANGE_DEFAULT", Const, 0}, + {"IP_PORTRANGE_HIGH", Const, 0}, + {"IP_PORTRANGE_LOW", Const, 0}, + {"IP_RECVDSTADDR", Const, 0}, + {"IP_RECVDSTPORT", Const, 1}, + {"IP_RECVERR", Const, 0}, + {"IP_RECVIF", Const, 0}, + {"IP_RECVOPTS", Const, 0}, + {"IP_RECVORIGDSTADDR", Const, 0}, + {"IP_RECVPKTINFO", Const, 0}, + {"IP_RECVRETOPTS", Const, 0}, + {"IP_RECVRTABLE", Const, 1}, + {"IP_RECVTOS", Const, 0}, + {"IP_RECVTTL", Const, 0}, + {"IP_RETOPTS", Const, 0}, + {"IP_RF", Const, 0}, + {"IP_ROUTER_ALERT", Const, 0}, + {"IP_RSVP_OFF", Const, 0}, + {"IP_RSVP_ON", Const, 0}, + {"IP_RSVP_VIF_OFF", Const, 0}, + {"IP_RSVP_VIF_ON", Const, 0}, + {"IP_RTABLE", Const, 1}, + {"IP_SENDSRCADDR", Const, 0}, + {"IP_STRIPHDR", Const, 0}, + {"IP_TOS", Const, 0}, + {"IP_TRAFFIC_MGT_BACKGROUND", Const, 0}, + {"IP_TRANSPARENT", Const, 0}, + {"IP_TTL", Const, 0}, + {"IP_UNBLOCK_SOURCE", Const, 0}, + {"IP_XFRM_POLICY", Const, 0}, + {"IPv6MTUInfo", Type, 2}, + {"IPv6MTUInfo.Addr", Field, 2}, + {"IPv6MTUInfo.Mtu", Field, 2}, + {"IPv6Mreq", Type, 0}, + {"IPv6Mreq.Interface", Field, 0}, + {"IPv6Mreq.Multiaddr", Field, 0}, + {"ISIG", Const, 0}, + {"ISTRIP", Const, 0}, + {"IUCLC", Const, 0}, + {"IUTF8", Const, 0}, + {"IXANY", Const, 0}, + {"IXOFF", Const, 0}, + {"IXON", Const, 0}, + {"IfAddrmsg", Type, 0}, + {"IfAddrmsg.Family", Field, 0}, + {"IfAddrmsg.Flags", Field, 0}, + {"IfAddrmsg.Index", Field, 0}, + {"IfAddrmsg.Prefixlen", Field, 0}, + {"IfAddrmsg.Scope", Field, 0}, + {"IfAnnounceMsghdr", Type, 1}, + {"IfAnnounceMsghdr.Hdrlen", Field, 2}, + {"IfAnnounceMsghdr.Index", Field, 1}, + {"IfAnnounceMsghdr.Msglen", Field, 1}, + {"IfAnnounceMsghdr.Name", Field, 1}, + {"IfAnnounceMsghdr.Type", Field, 1}, + {"IfAnnounceMsghdr.Version", Field, 1}, + {"IfAnnounceMsghdr.What", Field, 1}, + {"IfData", Type, 0}, + {"IfData.Addrlen", Field, 0}, + {"IfData.Baudrate", Field, 0}, + {"IfData.Capabilities", Field, 2}, + {"IfData.Collisions", Field, 0}, + {"IfData.Datalen", Field, 0}, + {"IfData.Epoch", Field, 0}, + {"IfData.Hdrlen", Field, 0}, + {"IfData.Hwassist", Field, 0}, + {"IfData.Ibytes", Field, 0}, + {"IfData.Ierrors", Field, 0}, + {"IfData.Imcasts", Field, 0}, + {"IfData.Ipackets", Field, 0}, + {"IfData.Iqdrops", Field, 0}, + {"IfData.Lastchange", Field, 0}, + {"IfData.Link_state", Field, 0}, + {"IfData.Mclpool", Field, 2}, + {"IfData.Metric", Field, 0}, + {"IfData.Mtu", Field, 0}, + {"IfData.Noproto", Field, 0}, + {"IfData.Obytes", Field, 0}, + {"IfData.Oerrors", Field, 0}, + {"IfData.Omcasts", Field, 0}, + {"IfData.Opackets", Field, 0}, + {"IfData.Pad", Field, 2}, + {"IfData.Pad_cgo_0", Field, 2}, + {"IfData.Pad_cgo_1", Field, 2}, + {"IfData.Physical", Field, 0}, + {"IfData.Recvquota", Field, 0}, + {"IfData.Recvtiming", Field, 0}, + {"IfData.Reserved1", Field, 0}, + {"IfData.Reserved2", Field, 0}, + {"IfData.Spare_char1", Field, 0}, + {"IfData.Spare_char2", Field, 0}, + {"IfData.Type", Field, 0}, + {"IfData.Typelen", Field, 0}, + {"IfData.Unused1", Field, 0}, + {"IfData.Unused2", Field, 0}, + {"IfData.Xmitquota", Field, 0}, + {"IfData.Xmittiming", Field, 0}, + {"IfInfomsg", Type, 0}, + {"IfInfomsg.Change", Field, 0}, + {"IfInfomsg.Family", Field, 0}, + {"IfInfomsg.Flags", Field, 0}, + {"IfInfomsg.Index", Field, 0}, + {"IfInfomsg.Type", Field, 0}, + {"IfInfomsg.X__ifi_pad", Field, 0}, + {"IfMsghdr", Type, 0}, + {"IfMsghdr.Addrs", Field, 0}, + {"IfMsghdr.Data", Field, 0}, + {"IfMsghdr.Flags", Field, 0}, + {"IfMsghdr.Hdrlen", Field, 2}, + {"IfMsghdr.Index", Field, 0}, + {"IfMsghdr.Msglen", Field, 0}, + {"IfMsghdr.Pad1", Field, 2}, + {"IfMsghdr.Pad2", Field, 2}, + {"IfMsghdr.Pad_cgo_0", Field, 0}, + {"IfMsghdr.Pad_cgo_1", Field, 2}, + {"IfMsghdr.Tableid", Field, 2}, + {"IfMsghdr.Type", Field, 0}, + {"IfMsghdr.Version", Field, 0}, + {"IfMsghdr.Xflags", Field, 2}, + {"IfaMsghdr", Type, 0}, + {"IfaMsghdr.Addrs", Field, 0}, + {"IfaMsghdr.Flags", Field, 0}, + {"IfaMsghdr.Hdrlen", Field, 2}, + {"IfaMsghdr.Index", Field, 0}, + {"IfaMsghdr.Metric", Field, 0}, + {"IfaMsghdr.Msglen", Field, 0}, + {"IfaMsghdr.Pad1", Field, 2}, + {"IfaMsghdr.Pad2", Field, 2}, + {"IfaMsghdr.Pad_cgo_0", Field, 0}, + {"IfaMsghdr.Tableid", Field, 2}, + {"IfaMsghdr.Type", Field, 0}, + {"IfaMsghdr.Version", Field, 0}, + {"IfmaMsghdr", Type, 0}, + {"IfmaMsghdr.Addrs", Field, 0}, + {"IfmaMsghdr.Flags", Field, 0}, + {"IfmaMsghdr.Index", Field, 0}, + {"IfmaMsghdr.Msglen", Field, 0}, + {"IfmaMsghdr.Pad_cgo_0", Field, 0}, + {"IfmaMsghdr.Type", Field, 0}, + {"IfmaMsghdr.Version", Field, 0}, + {"IfmaMsghdr2", Type, 0}, + {"IfmaMsghdr2.Addrs", Field, 0}, + {"IfmaMsghdr2.Flags", Field, 0}, + {"IfmaMsghdr2.Index", Field, 0}, + {"IfmaMsghdr2.Msglen", Field, 0}, + {"IfmaMsghdr2.Pad_cgo_0", Field, 0}, + {"IfmaMsghdr2.Refcount", Field, 0}, + {"IfmaMsghdr2.Type", Field, 0}, + {"IfmaMsghdr2.Version", Field, 0}, + {"ImplementsGetwd", Const, 0}, + {"Inet4Pktinfo", Type, 0}, + {"Inet4Pktinfo.Addr", Field, 0}, + {"Inet4Pktinfo.Ifindex", Field, 0}, + {"Inet4Pktinfo.Spec_dst", Field, 0}, + {"Inet6Pktinfo", Type, 0}, + {"Inet6Pktinfo.Addr", Field, 0}, + {"Inet6Pktinfo.Ifindex", Field, 0}, + {"InotifyAddWatch", Func, 0}, + {"InotifyEvent", Type, 0}, + {"InotifyEvent.Cookie", Field, 0}, + {"InotifyEvent.Len", Field, 0}, + {"InotifyEvent.Mask", Field, 0}, + {"InotifyEvent.Name", Field, 0}, + {"InotifyEvent.Wd", Field, 0}, + {"InotifyInit", Func, 0}, + {"InotifyInit1", Func, 0}, + {"InotifyRmWatch", Func, 0}, + {"InterfaceAddrMessage", Type, 0}, + {"InterfaceAddrMessage.Data", Field, 0}, + {"InterfaceAddrMessage.Header", Field, 0}, + {"InterfaceAnnounceMessage", Type, 1}, + {"InterfaceAnnounceMessage.Header", Field, 1}, + {"InterfaceInfo", Type, 0}, + {"InterfaceInfo.Address", Field, 0}, + {"InterfaceInfo.BroadcastAddress", Field, 0}, + {"InterfaceInfo.Flags", Field, 0}, + {"InterfaceInfo.Netmask", Field, 0}, + {"InterfaceMessage", Type, 0}, + {"InterfaceMessage.Data", Field, 0}, + {"InterfaceMessage.Header", Field, 0}, + {"InterfaceMulticastAddrMessage", Type, 0}, + {"InterfaceMulticastAddrMessage.Data", Field, 0}, + {"InterfaceMulticastAddrMessage.Header", Field, 0}, + {"InvalidHandle", Const, 0}, + {"Ioperm", Func, 0}, + {"Iopl", Func, 0}, + {"Iovec", Type, 0}, + {"Iovec.Base", Field, 0}, + {"Iovec.Len", Field, 0}, + {"IpAdapterInfo", Type, 0}, + {"IpAdapterInfo.AdapterName", Field, 0}, + {"IpAdapterInfo.Address", Field, 0}, + {"IpAdapterInfo.AddressLength", Field, 0}, + {"IpAdapterInfo.ComboIndex", Field, 0}, + {"IpAdapterInfo.CurrentIpAddress", Field, 0}, + {"IpAdapterInfo.Description", Field, 0}, + {"IpAdapterInfo.DhcpEnabled", Field, 0}, + {"IpAdapterInfo.DhcpServer", Field, 0}, + {"IpAdapterInfo.GatewayList", Field, 0}, + {"IpAdapterInfo.HaveWins", Field, 0}, + {"IpAdapterInfo.Index", Field, 0}, + {"IpAdapterInfo.IpAddressList", Field, 0}, + {"IpAdapterInfo.LeaseExpires", Field, 0}, + {"IpAdapterInfo.LeaseObtained", Field, 0}, + {"IpAdapterInfo.Next", Field, 0}, + {"IpAdapterInfo.PrimaryWinsServer", Field, 0}, + {"IpAdapterInfo.SecondaryWinsServer", Field, 0}, + {"IpAdapterInfo.Type", Field, 0}, + {"IpAddrString", Type, 0}, + {"IpAddrString.Context", Field, 0}, + {"IpAddrString.IpAddress", Field, 0}, + {"IpAddrString.IpMask", Field, 0}, + {"IpAddrString.Next", Field, 0}, + {"IpAddressString", Type, 0}, + {"IpAddressString.String", Field, 0}, + {"IpMaskString", Type, 0}, + {"IpMaskString.String", Field, 2}, + {"Issetugid", Func, 0}, + {"KEY_ALL_ACCESS", Const, 0}, + {"KEY_CREATE_LINK", Const, 0}, + {"KEY_CREATE_SUB_KEY", Const, 0}, + {"KEY_ENUMERATE_SUB_KEYS", Const, 0}, + {"KEY_EXECUTE", Const, 0}, + {"KEY_NOTIFY", Const, 0}, + {"KEY_QUERY_VALUE", Const, 0}, + {"KEY_READ", Const, 0}, + {"KEY_SET_VALUE", Const, 0}, + {"KEY_WOW64_32KEY", Const, 0}, + {"KEY_WOW64_64KEY", Const, 0}, + {"KEY_WRITE", Const, 0}, + {"Kevent", Func, 0}, + {"Kevent_t", Type, 0}, + {"Kevent_t.Data", Field, 0}, + {"Kevent_t.Fflags", Field, 0}, + {"Kevent_t.Filter", Field, 0}, + {"Kevent_t.Flags", Field, 0}, + {"Kevent_t.Ident", Field, 0}, + {"Kevent_t.Pad_cgo_0", Field, 2}, + {"Kevent_t.Udata", Field, 0}, + {"Kill", Func, 0}, + {"Klogctl", Func, 0}, + {"Kqueue", Func, 0}, + {"LANG_ENGLISH", Const, 0}, + {"LAYERED_PROTOCOL", Const, 2}, + {"LCNT_OVERLOAD_FLUSH", Const, 1}, + {"LINUX_REBOOT_CMD_CAD_OFF", Const, 0}, + {"LINUX_REBOOT_CMD_CAD_ON", Const, 0}, + {"LINUX_REBOOT_CMD_HALT", Const, 0}, + {"LINUX_REBOOT_CMD_KEXEC", Const, 0}, + {"LINUX_REBOOT_CMD_POWER_OFF", Const, 0}, + {"LINUX_REBOOT_CMD_RESTART", Const, 0}, + {"LINUX_REBOOT_CMD_RESTART2", Const, 0}, + {"LINUX_REBOOT_CMD_SW_SUSPEND", Const, 0}, + {"LINUX_REBOOT_MAGIC1", Const, 0}, + {"LINUX_REBOOT_MAGIC2", Const, 0}, + {"LOCK_EX", Const, 0}, + {"LOCK_NB", Const, 0}, + {"LOCK_SH", Const, 0}, + {"LOCK_UN", Const, 0}, + {"LazyDLL", Type, 0}, + {"LazyDLL.Name", Field, 0}, + {"LazyProc", Type, 0}, + {"LazyProc.Name", Field, 0}, + {"Lchown", Func, 0}, + {"Linger", Type, 0}, + {"Linger.Linger", Field, 0}, + {"Linger.Onoff", Field, 0}, + {"Link", Func, 0}, + {"Listen", Func, 0}, + {"Listxattr", Func, 1}, + {"LoadCancelIoEx", Func, 1}, + {"LoadConnectEx", Func, 1}, + {"LoadCreateSymbolicLink", Func, 4}, + {"LoadDLL", Func, 0}, + {"LoadGetAddrInfo", Func, 1}, + {"LoadLibrary", Func, 0}, + {"LoadSetFileCompletionNotificationModes", Func, 2}, + {"LocalFree", Func, 0}, + {"Log2phys_t", Type, 0}, + {"Log2phys_t.Contigbytes", Field, 0}, + {"Log2phys_t.Devoffset", Field, 0}, + {"Log2phys_t.Flags", Field, 0}, + {"LookupAccountName", Func, 0}, + {"LookupAccountSid", Func, 0}, + {"LookupSID", Func, 0}, + {"LsfJump", Func, 0}, + {"LsfSocket", Func, 0}, + {"LsfStmt", Func, 0}, + {"Lstat", Func, 0}, + {"MADV_AUTOSYNC", Const, 1}, + {"MADV_CAN_REUSE", Const, 0}, + {"MADV_CORE", Const, 1}, + {"MADV_DOFORK", Const, 0}, + {"MADV_DONTFORK", Const, 0}, + {"MADV_DONTNEED", Const, 0}, + {"MADV_FREE", Const, 0}, + {"MADV_FREE_REUSABLE", Const, 0}, + {"MADV_FREE_REUSE", Const, 0}, + {"MADV_HUGEPAGE", Const, 0}, + {"MADV_HWPOISON", Const, 0}, + {"MADV_MERGEABLE", Const, 0}, + {"MADV_NOCORE", Const, 1}, + {"MADV_NOHUGEPAGE", Const, 0}, + {"MADV_NORMAL", Const, 0}, + {"MADV_NOSYNC", Const, 1}, + {"MADV_PROTECT", Const, 1}, + {"MADV_RANDOM", Const, 0}, + {"MADV_REMOVE", Const, 0}, + {"MADV_SEQUENTIAL", Const, 0}, + {"MADV_SPACEAVAIL", Const, 3}, + {"MADV_UNMERGEABLE", Const, 0}, + {"MADV_WILLNEED", Const, 0}, + {"MADV_ZERO_WIRED_PAGES", Const, 0}, + {"MAP_32BIT", Const, 0}, + {"MAP_ALIGNED_SUPER", Const, 3}, + {"MAP_ALIGNMENT_16MB", Const, 3}, + {"MAP_ALIGNMENT_1TB", Const, 3}, + {"MAP_ALIGNMENT_256TB", Const, 3}, + {"MAP_ALIGNMENT_4GB", Const, 3}, + {"MAP_ALIGNMENT_64KB", Const, 3}, + {"MAP_ALIGNMENT_64PB", Const, 3}, + {"MAP_ALIGNMENT_MASK", Const, 3}, + {"MAP_ALIGNMENT_SHIFT", Const, 3}, + {"MAP_ANON", Const, 0}, + {"MAP_ANONYMOUS", Const, 0}, + {"MAP_COPY", Const, 0}, + {"MAP_DENYWRITE", Const, 0}, + {"MAP_EXECUTABLE", Const, 0}, + {"MAP_FILE", Const, 0}, + {"MAP_FIXED", Const, 0}, + {"MAP_FLAGMASK", Const, 3}, + {"MAP_GROWSDOWN", Const, 0}, + {"MAP_HASSEMAPHORE", Const, 0}, + {"MAP_HUGETLB", Const, 0}, + {"MAP_INHERIT", Const, 3}, + {"MAP_INHERIT_COPY", Const, 3}, + {"MAP_INHERIT_DEFAULT", Const, 3}, + {"MAP_INHERIT_DONATE_COPY", Const, 3}, + {"MAP_INHERIT_NONE", Const, 3}, + {"MAP_INHERIT_SHARE", Const, 3}, + {"MAP_JIT", Const, 0}, + {"MAP_LOCKED", Const, 0}, + {"MAP_NOCACHE", Const, 0}, + {"MAP_NOCORE", Const, 1}, + {"MAP_NOEXTEND", Const, 0}, + {"MAP_NONBLOCK", Const, 0}, + {"MAP_NORESERVE", Const, 0}, + {"MAP_NOSYNC", Const, 1}, + {"MAP_POPULATE", Const, 0}, + {"MAP_PREFAULT_READ", Const, 1}, + {"MAP_PRIVATE", Const, 0}, + {"MAP_RENAME", Const, 0}, + {"MAP_RESERVED0080", Const, 0}, + {"MAP_RESERVED0100", Const, 1}, + {"MAP_SHARED", Const, 0}, + {"MAP_STACK", Const, 0}, + {"MAP_TRYFIXED", Const, 3}, + {"MAP_TYPE", Const, 0}, + {"MAP_WIRED", Const, 3}, + {"MAXIMUM_REPARSE_DATA_BUFFER_SIZE", Const, 4}, + {"MAXLEN_IFDESCR", Const, 0}, + {"MAXLEN_PHYSADDR", Const, 0}, + {"MAX_ADAPTER_ADDRESS_LENGTH", Const, 0}, + {"MAX_ADAPTER_DESCRIPTION_LENGTH", Const, 0}, + {"MAX_ADAPTER_NAME_LENGTH", Const, 0}, + {"MAX_COMPUTERNAME_LENGTH", Const, 0}, + {"MAX_INTERFACE_NAME_LEN", Const, 0}, + {"MAX_LONG_PATH", Const, 0}, + {"MAX_PATH", Const, 0}, + {"MAX_PROTOCOL_CHAIN", Const, 2}, + {"MCL_CURRENT", Const, 0}, + {"MCL_FUTURE", Const, 0}, + {"MNT_DETACH", Const, 0}, + {"MNT_EXPIRE", Const, 0}, + {"MNT_FORCE", Const, 0}, + {"MSG_BCAST", Const, 1}, + {"MSG_CMSG_CLOEXEC", Const, 0}, + {"MSG_COMPAT", Const, 0}, + {"MSG_CONFIRM", Const, 0}, + {"MSG_CONTROLMBUF", Const, 1}, + {"MSG_CTRUNC", Const, 0}, + {"MSG_DONTROUTE", Const, 0}, + {"MSG_DONTWAIT", Const, 0}, + {"MSG_EOF", Const, 0}, + {"MSG_EOR", Const, 0}, + {"MSG_ERRQUEUE", Const, 0}, + {"MSG_FASTOPEN", Const, 1}, + {"MSG_FIN", Const, 0}, + {"MSG_FLUSH", Const, 0}, + {"MSG_HAVEMORE", Const, 0}, + {"MSG_HOLD", Const, 0}, + {"MSG_IOVUSRSPACE", Const, 1}, + {"MSG_LENUSRSPACE", Const, 1}, + {"MSG_MCAST", Const, 1}, + {"MSG_MORE", Const, 0}, + {"MSG_NAMEMBUF", Const, 1}, + {"MSG_NBIO", Const, 0}, + {"MSG_NEEDSA", Const, 0}, + {"MSG_NOSIGNAL", Const, 0}, + {"MSG_NOTIFICATION", Const, 0}, + {"MSG_OOB", Const, 0}, + {"MSG_PEEK", Const, 0}, + {"MSG_PROXY", Const, 0}, + {"MSG_RCVMORE", Const, 0}, + {"MSG_RST", Const, 0}, + {"MSG_SEND", Const, 0}, + {"MSG_SYN", Const, 0}, + {"MSG_TRUNC", Const, 0}, + {"MSG_TRYHARD", Const, 0}, + {"MSG_USERFLAGS", Const, 1}, + {"MSG_WAITALL", Const, 0}, + {"MSG_WAITFORONE", Const, 0}, + {"MSG_WAITSTREAM", Const, 0}, + {"MS_ACTIVE", Const, 0}, + {"MS_ASYNC", Const, 0}, + {"MS_BIND", Const, 0}, + {"MS_DEACTIVATE", Const, 0}, + {"MS_DIRSYNC", Const, 0}, + {"MS_INVALIDATE", Const, 0}, + {"MS_I_VERSION", Const, 0}, + {"MS_KERNMOUNT", Const, 0}, + {"MS_KILLPAGES", Const, 0}, + {"MS_MANDLOCK", Const, 0}, + {"MS_MGC_MSK", Const, 0}, + {"MS_MGC_VAL", Const, 0}, + {"MS_MOVE", Const, 0}, + {"MS_NOATIME", Const, 0}, + {"MS_NODEV", Const, 0}, + {"MS_NODIRATIME", Const, 0}, + {"MS_NOEXEC", Const, 0}, + {"MS_NOSUID", Const, 0}, + {"MS_NOUSER", Const, 0}, + {"MS_POSIXACL", Const, 0}, + {"MS_PRIVATE", Const, 0}, + {"MS_RDONLY", Const, 0}, + {"MS_REC", Const, 0}, + {"MS_RELATIME", Const, 0}, + {"MS_REMOUNT", Const, 0}, + {"MS_RMT_MASK", Const, 0}, + {"MS_SHARED", Const, 0}, + {"MS_SILENT", Const, 0}, + {"MS_SLAVE", Const, 0}, + {"MS_STRICTATIME", Const, 0}, + {"MS_SYNC", Const, 0}, + {"MS_SYNCHRONOUS", Const, 0}, + {"MS_UNBINDABLE", Const, 0}, + {"Madvise", Func, 0}, + {"MapViewOfFile", Func, 0}, + {"MaxTokenInfoClass", Const, 0}, + {"Mclpool", Type, 2}, + {"Mclpool.Alive", Field, 2}, + {"Mclpool.Cwm", Field, 2}, + {"Mclpool.Grown", Field, 2}, + {"Mclpool.Hwm", Field, 2}, + {"Mclpool.Lwm", Field, 2}, + {"MibIfRow", Type, 0}, + {"MibIfRow.AdminStatus", Field, 0}, + {"MibIfRow.Descr", Field, 0}, + {"MibIfRow.DescrLen", Field, 0}, + {"MibIfRow.InDiscards", Field, 0}, + {"MibIfRow.InErrors", Field, 0}, + {"MibIfRow.InNUcastPkts", Field, 0}, + {"MibIfRow.InOctets", Field, 0}, + {"MibIfRow.InUcastPkts", Field, 0}, + {"MibIfRow.InUnknownProtos", Field, 0}, + {"MibIfRow.Index", Field, 0}, + {"MibIfRow.LastChange", Field, 0}, + {"MibIfRow.Mtu", Field, 0}, + {"MibIfRow.Name", Field, 0}, + {"MibIfRow.OperStatus", Field, 0}, + {"MibIfRow.OutDiscards", Field, 0}, + {"MibIfRow.OutErrors", Field, 0}, + {"MibIfRow.OutNUcastPkts", Field, 0}, + {"MibIfRow.OutOctets", Field, 0}, + {"MibIfRow.OutQLen", Field, 0}, + {"MibIfRow.OutUcastPkts", Field, 0}, + {"MibIfRow.PhysAddr", Field, 0}, + {"MibIfRow.PhysAddrLen", Field, 0}, + {"MibIfRow.Speed", Field, 0}, + {"MibIfRow.Type", Field, 0}, + {"Mkdir", Func, 0}, + {"Mkdirat", Func, 0}, + {"Mkfifo", Func, 0}, + {"Mknod", Func, 0}, + {"Mknodat", Func, 0}, + {"Mlock", Func, 0}, + {"Mlockall", Func, 0}, + {"Mmap", Func, 0}, + {"Mount", Func, 0}, + {"MoveFile", Func, 0}, + {"Mprotect", Func, 0}, + {"Msghdr", Type, 0}, + {"Msghdr.Control", Field, 0}, + {"Msghdr.Controllen", Field, 0}, + {"Msghdr.Flags", Field, 0}, + {"Msghdr.Iov", Field, 0}, + {"Msghdr.Iovlen", Field, 0}, + {"Msghdr.Name", Field, 0}, + {"Msghdr.Namelen", Field, 0}, + {"Msghdr.Pad_cgo_0", Field, 0}, + {"Msghdr.Pad_cgo_1", Field, 0}, + {"Munlock", Func, 0}, + {"Munlockall", Func, 0}, + {"Munmap", Func, 0}, + {"MustLoadDLL", Func, 0}, + {"NAME_MAX", Const, 0}, + {"NETLINK_ADD_MEMBERSHIP", Const, 0}, + {"NETLINK_AUDIT", Const, 0}, + {"NETLINK_BROADCAST_ERROR", Const, 0}, + {"NETLINK_CONNECTOR", Const, 0}, + {"NETLINK_DNRTMSG", Const, 0}, + {"NETLINK_DROP_MEMBERSHIP", Const, 0}, + {"NETLINK_ECRYPTFS", Const, 0}, + {"NETLINK_FIB_LOOKUP", Const, 0}, + {"NETLINK_FIREWALL", Const, 0}, + {"NETLINK_GENERIC", Const, 0}, + {"NETLINK_INET_DIAG", Const, 0}, + {"NETLINK_IP6_FW", Const, 0}, + {"NETLINK_ISCSI", Const, 0}, + {"NETLINK_KOBJECT_UEVENT", Const, 0}, + {"NETLINK_NETFILTER", Const, 0}, + {"NETLINK_NFLOG", Const, 0}, + {"NETLINK_NO_ENOBUFS", Const, 0}, + {"NETLINK_PKTINFO", Const, 0}, + {"NETLINK_RDMA", Const, 0}, + {"NETLINK_ROUTE", Const, 0}, + {"NETLINK_SCSITRANSPORT", Const, 0}, + {"NETLINK_SELINUX", Const, 0}, + {"NETLINK_UNUSED", Const, 0}, + {"NETLINK_USERSOCK", Const, 0}, + {"NETLINK_XFRM", Const, 0}, + {"NET_RT_DUMP", Const, 0}, + {"NET_RT_DUMP2", Const, 0}, + {"NET_RT_FLAGS", Const, 0}, + {"NET_RT_IFLIST", Const, 0}, + {"NET_RT_IFLIST2", Const, 0}, + {"NET_RT_IFLISTL", Const, 1}, + {"NET_RT_IFMALIST", Const, 0}, + {"NET_RT_MAXID", Const, 0}, + {"NET_RT_OIFLIST", Const, 1}, + {"NET_RT_OOIFLIST", Const, 1}, + {"NET_RT_STAT", Const, 0}, + {"NET_RT_STATS", Const, 1}, + {"NET_RT_TABLE", Const, 1}, + {"NET_RT_TRASH", Const, 0}, + {"NLA_ALIGNTO", Const, 0}, + {"NLA_F_NESTED", Const, 0}, + {"NLA_F_NET_BYTEORDER", Const, 0}, + {"NLA_HDRLEN", Const, 0}, + {"NLMSG_ALIGNTO", Const, 0}, + {"NLMSG_DONE", Const, 0}, + {"NLMSG_ERROR", Const, 0}, + {"NLMSG_HDRLEN", Const, 0}, + {"NLMSG_MIN_TYPE", Const, 0}, + {"NLMSG_NOOP", Const, 0}, + {"NLMSG_OVERRUN", Const, 0}, + {"NLM_F_ACK", Const, 0}, + {"NLM_F_APPEND", Const, 0}, + {"NLM_F_ATOMIC", Const, 0}, + {"NLM_F_CREATE", Const, 0}, + {"NLM_F_DUMP", Const, 0}, + {"NLM_F_ECHO", Const, 0}, + {"NLM_F_EXCL", Const, 0}, + {"NLM_F_MATCH", Const, 0}, + {"NLM_F_MULTI", Const, 0}, + {"NLM_F_REPLACE", Const, 0}, + {"NLM_F_REQUEST", Const, 0}, + {"NLM_F_ROOT", Const, 0}, + {"NOFLSH", Const, 0}, + {"NOTE_ABSOLUTE", Const, 0}, + {"NOTE_ATTRIB", Const, 0}, + {"NOTE_BACKGROUND", Const, 16}, + {"NOTE_CHILD", Const, 0}, + {"NOTE_CRITICAL", Const, 16}, + {"NOTE_DELETE", Const, 0}, + {"NOTE_EOF", Const, 1}, + {"NOTE_EXEC", Const, 0}, + {"NOTE_EXIT", Const, 0}, + {"NOTE_EXITSTATUS", Const, 0}, + {"NOTE_EXIT_CSERROR", Const, 16}, + {"NOTE_EXIT_DECRYPTFAIL", Const, 16}, + {"NOTE_EXIT_DETAIL", Const, 16}, + {"NOTE_EXIT_DETAIL_MASK", Const, 16}, + {"NOTE_EXIT_MEMORY", Const, 16}, + {"NOTE_EXIT_REPARENTED", Const, 16}, + {"NOTE_EXTEND", Const, 0}, + {"NOTE_FFAND", Const, 0}, + {"NOTE_FFCOPY", Const, 0}, + {"NOTE_FFCTRLMASK", Const, 0}, + {"NOTE_FFLAGSMASK", Const, 0}, + {"NOTE_FFNOP", Const, 0}, + {"NOTE_FFOR", Const, 0}, + {"NOTE_FORK", Const, 0}, + {"NOTE_LEEWAY", Const, 16}, + {"NOTE_LINK", Const, 0}, + {"NOTE_LOWAT", Const, 0}, + {"NOTE_NONE", Const, 0}, + {"NOTE_NSECONDS", Const, 0}, + {"NOTE_PCTRLMASK", Const, 0}, + {"NOTE_PDATAMASK", Const, 0}, + {"NOTE_REAP", Const, 0}, + {"NOTE_RENAME", Const, 0}, + {"NOTE_RESOURCEEND", Const, 0}, + {"NOTE_REVOKE", Const, 0}, + {"NOTE_SECONDS", Const, 0}, + {"NOTE_SIGNAL", Const, 0}, + {"NOTE_TRACK", Const, 0}, + {"NOTE_TRACKERR", Const, 0}, + {"NOTE_TRIGGER", Const, 0}, + {"NOTE_TRUNCATE", Const, 1}, + {"NOTE_USECONDS", Const, 0}, + {"NOTE_VM_ERROR", Const, 0}, + {"NOTE_VM_PRESSURE", Const, 0}, + {"NOTE_VM_PRESSURE_SUDDEN_TERMINATE", Const, 0}, + {"NOTE_VM_PRESSURE_TERMINATE", Const, 0}, + {"NOTE_WRITE", Const, 0}, + {"NameCanonical", Const, 0}, + {"NameCanonicalEx", Const, 0}, + {"NameDisplay", Const, 0}, + {"NameDnsDomain", Const, 0}, + {"NameFullyQualifiedDN", Const, 0}, + {"NameSamCompatible", Const, 0}, + {"NameServicePrincipal", Const, 0}, + {"NameUniqueId", Const, 0}, + {"NameUnknown", Const, 0}, + {"NameUserPrincipal", Const, 0}, + {"Nanosleep", Func, 0}, + {"NetApiBufferFree", Func, 0}, + {"NetGetJoinInformation", Func, 2}, + {"NetSetupDomainName", Const, 2}, + {"NetSetupUnjoined", Const, 2}, + {"NetSetupUnknownStatus", Const, 2}, + {"NetSetupWorkgroupName", Const, 2}, + {"NetUserGetInfo", Func, 0}, + {"NetlinkMessage", Type, 0}, + {"NetlinkMessage.Data", Field, 0}, + {"NetlinkMessage.Header", Field, 0}, + {"NetlinkRIB", Func, 0}, + {"NetlinkRouteAttr", Type, 0}, + {"NetlinkRouteAttr.Attr", Field, 0}, + {"NetlinkRouteAttr.Value", Field, 0}, + {"NetlinkRouteRequest", Type, 0}, + {"NetlinkRouteRequest.Data", Field, 0}, + {"NetlinkRouteRequest.Header", Field, 0}, + {"NewCallback", Func, 0}, + {"NewCallbackCDecl", Func, 3}, + {"NewLazyDLL", Func, 0}, + {"NlAttr", Type, 0}, + {"NlAttr.Len", Field, 0}, + {"NlAttr.Type", Field, 0}, + {"NlMsgerr", Type, 0}, + {"NlMsgerr.Error", Field, 0}, + {"NlMsgerr.Msg", Field, 0}, + {"NlMsghdr", Type, 0}, + {"NlMsghdr.Flags", Field, 0}, + {"NlMsghdr.Len", Field, 0}, + {"NlMsghdr.Pid", Field, 0}, + {"NlMsghdr.Seq", Field, 0}, + {"NlMsghdr.Type", Field, 0}, + {"NsecToFiletime", Func, 0}, + {"NsecToTimespec", Func, 0}, + {"NsecToTimeval", Func, 0}, + {"Ntohs", Func, 0}, + {"OCRNL", Const, 0}, + {"OFDEL", Const, 0}, + {"OFILL", Const, 0}, + {"OFIOGETBMAP", Const, 1}, + {"OID_PKIX_KP_SERVER_AUTH", Var, 0}, + {"OID_SERVER_GATED_CRYPTO", Var, 0}, + {"OID_SGC_NETSCAPE", Var, 0}, + {"OLCUC", Const, 0}, + {"ONLCR", Const, 0}, + {"ONLRET", Const, 0}, + {"ONOCR", Const, 0}, + {"ONOEOT", Const, 1}, + {"OPEN_ALWAYS", Const, 0}, + {"OPEN_EXISTING", Const, 0}, + {"OPOST", Const, 0}, + {"O_ACCMODE", Const, 0}, + {"O_ALERT", Const, 0}, + {"O_ALT_IO", Const, 1}, + {"O_APPEND", Const, 0}, + {"O_ASYNC", Const, 0}, + {"O_CLOEXEC", Const, 0}, + {"O_CREAT", Const, 0}, + {"O_DIRECT", Const, 0}, + {"O_DIRECTORY", Const, 0}, + {"O_DP_GETRAWENCRYPTED", Const, 16}, + {"O_DSYNC", Const, 0}, + {"O_EVTONLY", Const, 0}, + {"O_EXCL", Const, 0}, + {"O_EXEC", Const, 0}, + {"O_EXLOCK", Const, 0}, + {"O_FSYNC", Const, 0}, + {"O_LARGEFILE", Const, 0}, + {"O_NDELAY", Const, 0}, + {"O_NOATIME", Const, 0}, + {"O_NOCTTY", Const, 0}, + {"O_NOFOLLOW", Const, 0}, + {"O_NONBLOCK", Const, 0}, + {"O_NOSIGPIPE", Const, 1}, + {"O_POPUP", Const, 0}, + {"O_RDONLY", Const, 0}, + {"O_RDWR", Const, 0}, + {"O_RSYNC", Const, 0}, + {"O_SHLOCK", Const, 0}, + {"O_SYMLINK", Const, 0}, + {"O_SYNC", Const, 0}, + {"O_TRUNC", Const, 0}, + {"O_TTY_INIT", Const, 0}, + {"O_WRONLY", Const, 0}, + {"Open", Func, 0}, + {"OpenCurrentProcessToken", Func, 0}, + {"OpenProcess", Func, 0}, + {"OpenProcessToken", Func, 0}, + {"Openat", Func, 0}, + {"Overlapped", Type, 0}, + {"Overlapped.HEvent", Field, 0}, + {"Overlapped.Internal", Field, 0}, + {"Overlapped.InternalHigh", Field, 0}, + {"Overlapped.Offset", Field, 0}, + {"Overlapped.OffsetHigh", Field, 0}, + {"PACKET_ADD_MEMBERSHIP", Const, 0}, + {"PACKET_BROADCAST", Const, 0}, + {"PACKET_DROP_MEMBERSHIP", Const, 0}, + {"PACKET_FASTROUTE", Const, 0}, + {"PACKET_HOST", Const, 0}, + {"PACKET_LOOPBACK", Const, 0}, + {"PACKET_MR_ALLMULTI", Const, 0}, + {"PACKET_MR_MULTICAST", Const, 0}, + {"PACKET_MR_PROMISC", Const, 0}, + {"PACKET_MULTICAST", Const, 0}, + {"PACKET_OTHERHOST", Const, 0}, + {"PACKET_OUTGOING", Const, 0}, + {"PACKET_RECV_OUTPUT", Const, 0}, + {"PACKET_RX_RING", Const, 0}, + {"PACKET_STATISTICS", Const, 0}, + {"PAGE_EXECUTE_READ", Const, 0}, + {"PAGE_EXECUTE_READWRITE", Const, 0}, + {"PAGE_EXECUTE_WRITECOPY", Const, 0}, + {"PAGE_READONLY", Const, 0}, + {"PAGE_READWRITE", Const, 0}, + {"PAGE_WRITECOPY", Const, 0}, + {"PARENB", Const, 0}, + {"PARMRK", Const, 0}, + {"PARODD", Const, 0}, + {"PENDIN", Const, 0}, + {"PFL_HIDDEN", Const, 2}, + {"PFL_MATCHES_PROTOCOL_ZERO", Const, 2}, + {"PFL_MULTIPLE_PROTO_ENTRIES", Const, 2}, + {"PFL_NETWORKDIRECT_PROVIDER", Const, 2}, + {"PFL_RECOMMENDED_PROTO_ENTRY", Const, 2}, + {"PF_FLUSH", Const, 1}, + {"PKCS_7_ASN_ENCODING", Const, 0}, + {"PMC5_PIPELINE_FLUSH", Const, 1}, + {"PRIO_PGRP", Const, 2}, + {"PRIO_PROCESS", Const, 2}, + {"PRIO_USER", Const, 2}, + {"PRI_IOFLUSH", Const, 1}, + {"PROCESS_QUERY_INFORMATION", Const, 0}, + {"PROCESS_TERMINATE", Const, 2}, + {"PROT_EXEC", Const, 0}, + {"PROT_GROWSDOWN", Const, 0}, + {"PROT_GROWSUP", Const, 0}, + {"PROT_NONE", Const, 0}, + {"PROT_READ", Const, 0}, + {"PROT_WRITE", Const, 0}, + {"PROV_DH_SCHANNEL", Const, 0}, + {"PROV_DSS", Const, 0}, + {"PROV_DSS_DH", Const, 0}, + {"PROV_EC_ECDSA_FULL", Const, 0}, + {"PROV_EC_ECDSA_SIG", Const, 0}, + {"PROV_EC_ECNRA_FULL", Const, 0}, + {"PROV_EC_ECNRA_SIG", Const, 0}, + {"PROV_FORTEZZA", Const, 0}, + {"PROV_INTEL_SEC", Const, 0}, + {"PROV_MS_EXCHANGE", Const, 0}, + {"PROV_REPLACE_OWF", Const, 0}, + {"PROV_RNG", Const, 0}, + {"PROV_RSA_AES", Const, 0}, + {"PROV_RSA_FULL", Const, 0}, + {"PROV_RSA_SCHANNEL", Const, 0}, + {"PROV_RSA_SIG", Const, 0}, + {"PROV_SPYRUS_LYNKS", Const, 0}, + {"PROV_SSL", Const, 0}, + {"PR_CAPBSET_DROP", Const, 0}, + {"PR_CAPBSET_READ", Const, 0}, + {"PR_CLEAR_SECCOMP_FILTER", Const, 0}, + {"PR_ENDIAN_BIG", Const, 0}, + {"PR_ENDIAN_LITTLE", Const, 0}, + {"PR_ENDIAN_PPC_LITTLE", Const, 0}, + {"PR_FPEMU_NOPRINT", Const, 0}, + {"PR_FPEMU_SIGFPE", Const, 0}, + {"PR_FP_EXC_ASYNC", Const, 0}, + {"PR_FP_EXC_DISABLED", Const, 0}, + {"PR_FP_EXC_DIV", Const, 0}, + {"PR_FP_EXC_INV", Const, 0}, + {"PR_FP_EXC_NONRECOV", Const, 0}, + {"PR_FP_EXC_OVF", Const, 0}, + {"PR_FP_EXC_PRECISE", Const, 0}, + {"PR_FP_EXC_RES", Const, 0}, + {"PR_FP_EXC_SW_ENABLE", Const, 0}, + {"PR_FP_EXC_UND", Const, 0}, + {"PR_GET_DUMPABLE", Const, 0}, + {"PR_GET_ENDIAN", Const, 0}, + {"PR_GET_FPEMU", Const, 0}, + {"PR_GET_FPEXC", Const, 0}, + {"PR_GET_KEEPCAPS", Const, 0}, + {"PR_GET_NAME", Const, 0}, + {"PR_GET_PDEATHSIG", Const, 0}, + {"PR_GET_SECCOMP", Const, 0}, + {"PR_GET_SECCOMP_FILTER", Const, 0}, + {"PR_GET_SECUREBITS", Const, 0}, + {"PR_GET_TIMERSLACK", Const, 0}, + {"PR_GET_TIMING", Const, 0}, + {"PR_GET_TSC", Const, 0}, + {"PR_GET_UNALIGN", Const, 0}, + {"PR_MCE_KILL", Const, 0}, + {"PR_MCE_KILL_CLEAR", Const, 0}, + {"PR_MCE_KILL_DEFAULT", Const, 0}, + {"PR_MCE_KILL_EARLY", Const, 0}, + {"PR_MCE_KILL_GET", Const, 0}, + {"PR_MCE_KILL_LATE", Const, 0}, + {"PR_MCE_KILL_SET", Const, 0}, + {"PR_SECCOMP_FILTER_EVENT", Const, 0}, + {"PR_SECCOMP_FILTER_SYSCALL", Const, 0}, + {"PR_SET_DUMPABLE", Const, 0}, + {"PR_SET_ENDIAN", Const, 0}, + {"PR_SET_FPEMU", Const, 0}, + {"PR_SET_FPEXC", Const, 0}, + {"PR_SET_KEEPCAPS", Const, 0}, + {"PR_SET_NAME", Const, 0}, + {"PR_SET_PDEATHSIG", Const, 0}, + {"PR_SET_PTRACER", Const, 0}, + {"PR_SET_SECCOMP", Const, 0}, + {"PR_SET_SECCOMP_FILTER", Const, 0}, + {"PR_SET_SECUREBITS", Const, 0}, + {"PR_SET_TIMERSLACK", Const, 0}, + {"PR_SET_TIMING", Const, 0}, + {"PR_SET_TSC", Const, 0}, + {"PR_SET_UNALIGN", Const, 0}, + {"PR_TASK_PERF_EVENTS_DISABLE", Const, 0}, + {"PR_TASK_PERF_EVENTS_ENABLE", Const, 0}, + {"PR_TIMING_STATISTICAL", Const, 0}, + {"PR_TIMING_TIMESTAMP", Const, 0}, + {"PR_TSC_ENABLE", Const, 0}, + {"PR_TSC_SIGSEGV", Const, 0}, + {"PR_UNALIGN_NOPRINT", Const, 0}, + {"PR_UNALIGN_SIGBUS", Const, 0}, + {"PTRACE_ARCH_PRCTL", Const, 0}, + {"PTRACE_ATTACH", Const, 0}, + {"PTRACE_CONT", Const, 0}, + {"PTRACE_DETACH", Const, 0}, + {"PTRACE_EVENT_CLONE", Const, 0}, + {"PTRACE_EVENT_EXEC", Const, 0}, + {"PTRACE_EVENT_EXIT", Const, 0}, + {"PTRACE_EVENT_FORK", Const, 0}, + {"PTRACE_EVENT_VFORK", Const, 0}, + {"PTRACE_EVENT_VFORK_DONE", Const, 0}, + {"PTRACE_GETCRUNCHREGS", Const, 0}, + {"PTRACE_GETEVENTMSG", Const, 0}, + {"PTRACE_GETFPREGS", Const, 0}, + {"PTRACE_GETFPXREGS", Const, 0}, + {"PTRACE_GETHBPREGS", Const, 0}, + {"PTRACE_GETREGS", Const, 0}, + {"PTRACE_GETREGSET", Const, 0}, + {"PTRACE_GETSIGINFO", Const, 0}, + {"PTRACE_GETVFPREGS", Const, 0}, + {"PTRACE_GETWMMXREGS", Const, 0}, + {"PTRACE_GET_THREAD_AREA", Const, 0}, + {"PTRACE_KILL", Const, 0}, + {"PTRACE_OLDSETOPTIONS", Const, 0}, + {"PTRACE_O_MASK", Const, 0}, + {"PTRACE_O_TRACECLONE", Const, 0}, + {"PTRACE_O_TRACEEXEC", Const, 0}, + {"PTRACE_O_TRACEEXIT", Const, 0}, + {"PTRACE_O_TRACEFORK", Const, 0}, + {"PTRACE_O_TRACESYSGOOD", Const, 0}, + {"PTRACE_O_TRACEVFORK", Const, 0}, + {"PTRACE_O_TRACEVFORKDONE", Const, 0}, + {"PTRACE_PEEKDATA", Const, 0}, + {"PTRACE_PEEKTEXT", Const, 0}, + {"PTRACE_PEEKUSR", Const, 0}, + {"PTRACE_POKEDATA", Const, 0}, + {"PTRACE_POKETEXT", Const, 0}, + {"PTRACE_POKEUSR", Const, 0}, + {"PTRACE_SETCRUNCHREGS", Const, 0}, + {"PTRACE_SETFPREGS", Const, 0}, + {"PTRACE_SETFPXREGS", Const, 0}, + {"PTRACE_SETHBPREGS", Const, 0}, + {"PTRACE_SETOPTIONS", Const, 0}, + {"PTRACE_SETREGS", Const, 0}, + {"PTRACE_SETREGSET", Const, 0}, + {"PTRACE_SETSIGINFO", Const, 0}, + {"PTRACE_SETVFPREGS", Const, 0}, + {"PTRACE_SETWMMXREGS", Const, 0}, + {"PTRACE_SET_SYSCALL", Const, 0}, + {"PTRACE_SET_THREAD_AREA", Const, 0}, + {"PTRACE_SINGLEBLOCK", Const, 0}, + {"PTRACE_SINGLESTEP", Const, 0}, + {"PTRACE_SYSCALL", Const, 0}, + {"PTRACE_SYSEMU", Const, 0}, + {"PTRACE_SYSEMU_SINGLESTEP", Const, 0}, + {"PTRACE_TRACEME", Const, 0}, + {"PT_ATTACH", Const, 0}, + {"PT_ATTACHEXC", Const, 0}, + {"PT_CONTINUE", Const, 0}, + {"PT_DATA_ADDR", Const, 0}, + {"PT_DENY_ATTACH", Const, 0}, + {"PT_DETACH", Const, 0}, + {"PT_FIRSTMACH", Const, 0}, + {"PT_FORCEQUOTA", Const, 0}, + {"PT_KILL", Const, 0}, + {"PT_MASK", Const, 1}, + {"PT_READ_D", Const, 0}, + {"PT_READ_I", Const, 0}, + {"PT_READ_U", Const, 0}, + {"PT_SIGEXC", Const, 0}, + {"PT_STEP", Const, 0}, + {"PT_TEXT_ADDR", Const, 0}, + {"PT_TEXT_END_ADDR", Const, 0}, + {"PT_THUPDATE", Const, 0}, + {"PT_TRACE_ME", Const, 0}, + {"PT_WRITE_D", Const, 0}, + {"PT_WRITE_I", Const, 0}, + {"PT_WRITE_U", Const, 0}, + {"ParseDirent", Func, 0}, + {"ParseNetlinkMessage", Func, 0}, + {"ParseNetlinkRouteAttr", Func, 0}, + {"ParseRoutingMessage", Func, 0}, + {"ParseRoutingSockaddr", Func, 0}, + {"ParseSocketControlMessage", Func, 0}, + {"ParseUnixCredentials", Func, 0}, + {"ParseUnixRights", Func, 0}, + {"PathMax", Const, 0}, + {"Pathconf", Func, 0}, + {"Pause", Func, 0}, + {"Pipe", Func, 0}, + {"Pipe2", Func, 1}, + {"PivotRoot", Func, 0}, + {"Pointer", Type, 11}, + {"PostQueuedCompletionStatus", Func, 0}, + {"Pread", Func, 0}, + {"Proc", Type, 0}, + {"Proc.Dll", Field, 0}, + {"Proc.Name", Field, 0}, + {"ProcAttr", Type, 0}, + {"ProcAttr.Dir", Field, 0}, + {"ProcAttr.Env", Field, 0}, + {"ProcAttr.Files", Field, 0}, + {"ProcAttr.Sys", Field, 0}, + {"Process32First", Func, 4}, + {"Process32Next", Func, 4}, + {"ProcessEntry32", Type, 4}, + {"ProcessEntry32.DefaultHeapID", Field, 4}, + {"ProcessEntry32.ExeFile", Field, 4}, + {"ProcessEntry32.Flags", Field, 4}, + {"ProcessEntry32.ModuleID", Field, 4}, + {"ProcessEntry32.ParentProcessID", Field, 4}, + {"ProcessEntry32.PriClassBase", Field, 4}, + {"ProcessEntry32.ProcessID", Field, 4}, + {"ProcessEntry32.Size", Field, 4}, + {"ProcessEntry32.Threads", Field, 4}, + {"ProcessEntry32.Usage", Field, 4}, + {"ProcessInformation", Type, 0}, + {"ProcessInformation.Process", Field, 0}, + {"ProcessInformation.ProcessId", Field, 0}, + {"ProcessInformation.Thread", Field, 0}, + {"ProcessInformation.ThreadId", Field, 0}, + {"Protoent", Type, 0}, + {"Protoent.Aliases", Field, 0}, + {"Protoent.Name", Field, 0}, + {"Protoent.Proto", Field, 0}, + {"PtraceAttach", Func, 0}, + {"PtraceCont", Func, 0}, + {"PtraceDetach", Func, 0}, + {"PtraceGetEventMsg", Func, 0}, + {"PtraceGetRegs", Func, 0}, + {"PtracePeekData", Func, 0}, + {"PtracePeekText", Func, 0}, + {"PtracePokeData", Func, 0}, + {"PtracePokeText", Func, 0}, + {"PtraceRegs", Type, 0}, + {"PtraceRegs.Cs", Field, 0}, + {"PtraceRegs.Ds", Field, 0}, + {"PtraceRegs.Eax", Field, 0}, + {"PtraceRegs.Ebp", Field, 0}, + {"PtraceRegs.Ebx", Field, 0}, + {"PtraceRegs.Ecx", Field, 0}, + {"PtraceRegs.Edi", Field, 0}, + {"PtraceRegs.Edx", Field, 0}, + {"PtraceRegs.Eflags", Field, 0}, + {"PtraceRegs.Eip", Field, 0}, + {"PtraceRegs.Es", Field, 0}, + {"PtraceRegs.Esi", Field, 0}, + {"PtraceRegs.Esp", Field, 0}, + {"PtraceRegs.Fs", Field, 0}, + {"PtraceRegs.Fs_base", Field, 0}, + {"PtraceRegs.Gs", Field, 0}, + {"PtraceRegs.Gs_base", Field, 0}, + {"PtraceRegs.Orig_eax", Field, 0}, + {"PtraceRegs.Orig_rax", Field, 0}, + {"PtraceRegs.R10", Field, 0}, + {"PtraceRegs.R11", Field, 0}, + {"PtraceRegs.R12", Field, 0}, + {"PtraceRegs.R13", Field, 0}, + {"PtraceRegs.R14", Field, 0}, + {"PtraceRegs.R15", Field, 0}, + {"PtraceRegs.R8", Field, 0}, + {"PtraceRegs.R9", Field, 0}, + {"PtraceRegs.Rax", Field, 0}, + {"PtraceRegs.Rbp", Field, 0}, + {"PtraceRegs.Rbx", Field, 0}, + {"PtraceRegs.Rcx", Field, 0}, + {"PtraceRegs.Rdi", Field, 0}, + {"PtraceRegs.Rdx", Field, 0}, + {"PtraceRegs.Rip", Field, 0}, + {"PtraceRegs.Rsi", Field, 0}, + {"PtraceRegs.Rsp", Field, 0}, + {"PtraceRegs.Ss", Field, 0}, + {"PtraceRegs.Uregs", Field, 0}, + {"PtraceRegs.Xcs", Field, 0}, + {"PtraceRegs.Xds", Field, 0}, + {"PtraceRegs.Xes", Field, 0}, + {"PtraceRegs.Xfs", Field, 0}, + {"PtraceRegs.Xgs", Field, 0}, + {"PtraceRegs.Xss", Field, 0}, + {"PtraceSetOptions", Func, 0}, + {"PtraceSetRegs", Func, 0}, + {"PtraceSingleStep", Func, 0}, + {"PtraceSyscall", Func, 1}, + {"Pwrite", Func, 0}, + {"REG_BINARY", Const, 0}, + {"REG_DWORD", Const, 0}, + {"REG_DWORD_BIG_ENDIAN", Const, 0}, + {"REG_DWORD_LITTLE_ENDIAN", Const, 0}, + {"REG_EXPAND_SZ", Const, 0}, + {"REG_FULL_RESOURCE_DESCRIPTOR", Const, 0}, + {"REG_LINK", Const, 0}, + {"REG_MULTI_SZ", Const, 0}, + {"REG_NONE", Const, 0}, + {"REG_QWORD", Const, 0}, + {"REG_QWORD_LITTLE_ENDIAN", Const, 0}, + {"REG_RESOURCE_LIST", Const, 0}, + {"REG_RESOURCE_REQUIREMENTS_LIST", Const, 0}, + {"REG_SZ", Const, 0}, + {"RLIMIT_AS", Const, 0}, + {"RLIMIT_CORE", Const, 0}, + {"RLIMIT_CPU", Const, 0}, + {"RLIMIT_CPU_USAGE_MONITOR", Const, 16}, + {"RLIMIT_DATA", Const, 0}, + {"RLIMIT_FSIZE", Const, 0}, + {"RLIMIT_NOFILE", Const, 0}, + {"RLIMIT_STACK", Const, 0}, + {"RLIM_INFINITY", Const, 0}, + {"RTAX_ADVMSS", Const, 0}, + {"RTAX_AUTHOR", Const, 0}, + {"RTAX_BRD", Const, 0}, + {"RTAX_CWND", Const, 0}, + {"RTAX_DST", Const, 0}, + {"RTAX_FEATURES", Const, 0}, + {"RTAX_FEATURE_ALLFRAG", Const, 0}, + {"RTAX_FEATURE_ECN", Const, 0}, + {"RTAX_FEATURE_SACK", Const, 0}, + {"RTAX_FEATURE_TIMESTAMP", Const, 0}, + {"RTAX_GATEWAY", Const, 0}, + {"RTAX_GENMASK", Const, 0}, + {"RTAX_HOPLIMIT", Const, 0}, + {"RTAX_IFA", Const, 0}, + {"RTAX_IFP", Const, 0}, + {"RTAX_INITCWND", Const, 0}, + {"RTAX_INITRWND", Const, 0}, + {"RTAX_LABEL", Const, 1}, + {"RTAX_LOCK", Const, 0}, + {"RTAX_MAX", Const, 0}, + {"RTAX_MTU", Const, 0}, + {"RTAX_NETMASK", Const, 0}, + {"RTAX_REORDERING", Const, 0}, + {"RTAX_RTO_MIN", Const, 0}, + {"RTAX_RTT", Const, 0}, + {"RTAX_RTTVAR", Const, 0}, + {"RTAX_SRC", Const, 1}, + {"RTAX_SRCMASK", Const, 1}, + {"RTAX_SSTHRESH", Const, 0}, + {"RTAX_TAG", Const, 1}, + {"RTAX_UNSPEC", Const, 0}, + {"RTAX_WINDOW", Const, 0}, + {"RTA_ALIGNTO", Const, 0}, + {"RTA_AUTHOR", Const, 0}, + {"RTA_BRD", Const, 0}, + {"RTA_CACHEINFO", Const, 0}, + {"RTA_DST", Const, 0}, + {"RTA_FLOW", Const, 0}, + {"RTA_GATEWAY", Const, 0}, + {"RTA_GENMASK", Const, 0}, + {"RTA_IFA", Const, 0}, + {"RTA_IFP", Const, 0}, + {"RTA_IIF", Const, 0}, + {"RTA_LABEL", Const, 1}, + {"RTA_MAX", Const, 0}, + {"RTA_METRICS", Const, 0}, + {"RTA_MULTIPATH", Const, 0}, + {"RTA_NETMASK", Const, 0}, + {"RTA_OIF", Const, 0}, + {"RTA_PREFSRC", Const, 0}, + {"RTA_PRIORITY", Const, 0}, + {"RTA_SRC", Const, 0}, + {"RTA_SRCMASK", Const, 1}, + {"RTA_TABLE", Const, 0}, + {"RTA_TAG", Const, 1}, + {"RTA_UNSPEC", Const, 0}, + {"RTCF_DIRECTSRC", Const, 0}, + {"RTCF_DOREDIRECT", Const, 0}, + {"RTCF_LOG", Const, 0}, + {"RTCF_MASQ", Const, 0}, + {"RTCF_NAT", Const, 0}, + {"RTCF_VALVE", Const, 0}, + {"RTF_ADDRCLASSMASK", Const, 0}, + {"RTF_ADDRCONF", Const, 0}, + {"RTF_ALLONLINK", Const, 0}, + {"RTF_ANNOUNCE", Const, 1}, + {"RTF_BLACKHOLE", Const, 0}, + {"RTF_BROADCAST", Const, 0}, + {"RTF_CACHE", Const, 0}, + {"RTF_CLONED", Const, 1}, + {"RTF_CLONING", Const, 0}, + {"RTF_CONDEMNED", Const, 0}, + {"RTF_DEFAULT", Const, 0}, + {"RTF_DELCLONE", Const, 0}, + {"RTF_DONE", Const, 0}, + {"RTF_DYNAMIC", Const, 0}, + {"RTF_FLOW", Const, 0}, + {"RTF_FMASK", Const, 0}, + {"RTF_GATEWAY", Const, 0}, + {"RTF_GWFLAG_COMPAT", Const, 3}, + {"RTF_HOST", Const, 0}, + {"RTF_IFREF", Const, 0}, + {"RTF_IFSCOPE", Const, 0}, + {"RTF_INTERFACE", Const, 0}, + {"RTF_IRTT", Const, 0}, + {"RTF_LINKRT", Const, 0}, + {"RTF_LLDATA", Const, 0}, + {"RTF_LLINFO", Const, 0}, + {"RTF_LOCAL", Const, 0}, + {"RTF_MASK", Const, 1}, + {"RTF_MODIFIED", Const, 0}, + {"RTF_MPATH", Const, 1}, + {"RTF_MPLS", Const, 1}, + {"RTF_MSS", Const, 0}, + {"RTF_MTU", Const, 0}, + {"RTF_MULTICAST", Const, 0}, + {"RTF_NAT", Const, 0}, + {"RTF_NOFORWARD", Const, 0}, + {"RTF_NONEXTHOP", Const, 0}, + {"RTF_NOPMTUDISC", Const, 0}, + {"RTF_PERMANENT_ARP", Const, 1}, + {"RTF_PINNED", Const, 0}, + {"RTF_POLICY", Const, 0}, + {"RTF_PRCLONING", Const, 0}, + {"RTF_PROTO1", Const, 0}, + {"RTF_PROTO2", Const, 0}, + {"RTF_PROTO3", Const, 0}, + {"RTF_PROXY", Const, 16}, + {"RTF_REINSTATE", Const, 0}, + {"RTF_REJECT", Const, 0}, + {"RTF_RNH_LOCKED", Const, 0}, + {"RTF_ROUTER", Const, 16}, + {"RTF_SOURCE", Const, 1}, + {"RTF_SRC", Const, 1}, + {"RTF_STATIC", Const, 0}, + {"RTF_STICKY", Const, 0}, + {"RTF_THROW", Const, 0}, + {"RTF_TUNNEL", Const, 1}, + {"RTF_UP", Const, 0}, + {"RTF_USETRAILERS", Const, 1}, + {"RTF_WASCLONED", Const, 0}, + {"RTF_WINDOW", Const, 0}, + {"RTF_XRESOLVE", Const, 0}, + {"RTM_ADD", Const, 0}, + {"RTM_BASE", Const, 0}, + {"RTM_CHANGE", Const, 0}, + {"RTM_CHGADDR", Const, 1}, + {"RTM_DELACTION", Const, 0}, + {"RTM_DELADDR", Const, 0}, + {"RTM_DELADDRLABEL", Const, 0}, + {"RTM_DELETE", Const, 0}, + {"RTM_DELLINK", Const, 0}, + {"RTM_DELMADDR", Const, 0}, + {"RTM_DELNEIGH", Const, 0}, + {"RTM_DELQDISC", Const, 0}, + {"RTM_DELROUTE", Const, 0}, + {"RTM_DELRULE", Const, 0}, + {"RTM_DELTCLASS", Const, 0}, + {"RTM_DELTFILTER", Const, 0}, + {"RTM_DESYNC", Const, 1}, + {"RTM_F_CLONED", Const, 0}, + {"RTM_F_EQUALIZE", Const, 0}, + {"RTM_F_NOTIFY", Const, 0}, + {"RTM_F_PREFIX", Const, 0}, + {"RTM_GET", Const, 0}, + {"RTM_GET2", Const, 0}, + {"RTM_GETACTION", Const, 0}, + {"RTM_GETADDR", Const, 0}, + {"RTM_GETADDRLABEL", Const, 0}, + {"RTM_GETANYCAST", Const, 0}, + {"RTM_GETDCB", Const, 0}, + {"RTM_GETLINK", Const, 0}, + {"RTM_GETMULTICAST", Const, 0}, + {"RTM_GETNEIGH", Const, 0}, + {"RTM_GETNEIGHTBL", Const, 0}, + {"RTM_GETQDISC", Const, 0}, + {"RTM_GETROUTE", Const, 0}, + {"RTM_GETRULE", Const, 0}, + {"RTM_GETTCLASS", Const, 0}, + {"RTM_GETTFILTER", Const, 0}, + {"RTM_IEEE80211", Const, 0}, + {"RTM_IFANNOUNCE", Const, 0}, + {"RTM_IFINFO", Const, 0}, + {"RTM_IFINFO2", Const, 0}, + {"RTM_LLINFO_UPD", Const, 1}, + {"RTM_LOCK", Const, 0}, + {"RTM_LOSING", Const, 0}, + {"RTM_MAX", Const, 0}, + {"RTM_MAXSIZE", Const, 1}, + {"RTM_MISS", Const, 0}, + {"RTM_NEWACTION", Const, 0}, + {"RTM_NEWADDR", Const, 0}, + {"RTM_NEWADDRLABEL", Const, 0}, + {"RTM_NEWLINK", Const, 0}, + {"RTM_NEWMADDR", Const, 0}, + {"RTM_NEWMADDR2", Const, 0}, + {"RTM_NEWNDUSEROPT", Const, 0}, + {"RTM_NEWNEIGH", Const, 0}, + {"RTM_NEWNEIGHTBL", Const, 0}, + {"RTM_NEWPREFIX", Const, 0}, + {"RTM_NEWQDISC", Const, 0}, + {"RTM_NEWROUTE", Const, 0}, + {"RTM_NEWRULE", Const, 0}, + {"RTM_NEWTCLASS", Const, 0}, + {"RTM_NEWTFILTER", Const, 0}, + {"RTM_NR_FAMILIES", Const, 0}, + {"RTM_NR_MSGTYPES", Const, 0}, + {"RTM_OIFINFO", Const, 1}, + {"RTM_OLDADD", Const, 0}, + {"RTM_OLDDEL", Const, 0}, + {"RTM_OOIFINFO", Const, 1}, + {"RTM_REDIRECT", Const, 0}, + {"RTM_RESOLVE", Const, 0}, + {"RTM_RTTUNIT", Const, 0}, + {"RTM_SETDCB", Const, 0}, + {"RTM_SETGATE", Const, 1}, + {"RTM_SETLINK", Const, 0}, + {"RTM_SETNEIGHTBL", Const, 0}, + {"RTM_VERSION", Const, 0}, + {"RTNH_ALIGNTO", Const, 0}, + {"RTNH_F_DEAD", Const, 0}, + {"RTNH_F_ONLINK", Const, 0}, + {"RTNH_F_PERVASIVE", Const, 0}, + {"RTNLGRP_IPV4_IFADDR", Const, 1}, + {"RTNLGRP_IPV4_MROUTE", Const, 1}, + {"RTNLGRP_IPV4_ROUTE", Const, 1}, + {"RTNLGRP_IPV4_RULE", Const, 1}, + {"RTNLGRP_IPV6_IFADDR", Const, 1}, + {"RTNLGRP_IPV6_IFINFO", Const, 1}, + {"RTNLGRP_IPV6_MROUTE", Const, 1}, + {"RTNLGRP_IPV6_PREFIX", Const, 1}, + {"RTNLGRP_IPV6_ROUTE", Const, 1}, + {"RTNLGRP_IPV6_RULE", Const, 1}, + {"RTNLGRP_LINK", Const, 1}, + {"RTNLGRP_ND_USEROPT", Const, 1}, + {"RTNLGRP_NEIGH", Const, 1}, + {"RTNLGRP_NONE", Const, 1}, + {"RTNLGRP_NOTIFY", Const, 1}, + {"RTNLGRP_TC", Const, 1}, + {"RTN_ANYCAST", Const, 0}, + {"RTN_BLACKHOLE", Const, 0}, + {"RTN_BROADCAST", Const, 0}, + {"RTN_LOCAL", Const, 0}, + {"RTN_MAX", Const, 0}, + {"RTN_MULTICAST", Const, 0}, + {"RTN_NAT", Const, 0}, + {"RTN_PROHIBIT", Const, 0}, + {"RTN_THROW", Const, 0}, + {"RTN_UNICAST", Const, 0}, + {"RTN_UNREACHABLE", Const, 0}, + {"RTN_UNSPEC", Const, 0}, + {"RTN_XRESOLVE", Const, 0}, + {"RTPROT_BIRD", Const, 0}, + {"RTPROT_BOOT", Const, 0}, + {"RTPROT_DHCP", Const, 0}, + {"RTPROT_DNROUTED", Const, 0}, + {"RTPROT_GATED", Const, 0}, + {"RTPROT_KERNEL", Const, 0}, + {"RTPROT_MRT", Const, 0}, + {"RTPROT_NTK", Const, 0}, + {"RTPROT_RA", Const, 0}, + {"RTPROT_REDIRECT", Const, 0}, + {"RTPROT_STATIC", Const, 0}, + {"RTPROT_UNSPEC", Const, 0}, + {"RTPROT_XORP", Const, 0}, + {"RTPROT_ZEBRA", Const, 0}, + {"RTV_EXPIRE", Const, 0}, + {"RTV_HOPCOUNT", Const, 0}, + {"RTV_MTU", Const, 0}, + {"RTV_RPIPE", Const, 0}, + {"RTV_RTT", Const, 0}, + {"RTV_RTTVAR", Const, 0}, + {"RTV_SPIPE", Const, 0}, + {"RTV_SSTHRESH", Const, 0}, + {"RTV_WEIGHT", Const, 0}, + {"RT_CACHING_CONTEXT", Const, 1}, + {"RT_CLASS_DEFAULT", Const, 0}, + {"RT_CLASS_LOCAL", Const, 0}, + {"RT_CLASS_MAIN", Const, 0}, + {"RT_CLASS_MAX", Const, 0}, + {"RT_CLASS_UNSPEC", Const, 0}, + {"RT_DEFAULT_FIB", Const, 1}, + {"RT_NORTREF", Const, 1}, + {"RT_SCOPE_HOST", Const, 0}, + {"RT_SCOPE_LINK", Const, 0}, + {"RT_SCOPE_NOWHERE", Const, 0}, + {"RT_SCOPE_SITE", Const, 0}, + {"RT_SCOPE_UNIVERSE", Const, 0}, + {"RT_TABLEID_MAX", Const, 1}, + {"RT_TABLE_COMPAT", Const, 0}, + {"RT_TABLE_DEFAULT", Const, 0}, + {"RT_TABLE_LOCAL", Const, 0}, + {"RT_TABLE_MAIN", Const, 0}, + {"RT_TABLE_MAX", Const, 0}, + {"RT_TABLE_UNSPEC", Const, 0}, + {"RUSAGE_CHILDREN", Const, 0}, + {"RUSAGE_SELF", Const, 0}, + {"RUSAGE_THREAD", Const, 0}, + {"Radvisory_t", Type, 0}, + {"Radvisory_t.Count", Field, 0}, + {"Radvisory_t.Offset", Field, 0}, + {"Radvisory_t.Pad_cgo_0", Field, 0}, + {"RawConn", Type, 9}, + {"RawSockaddr", Type, 0}, + {"RawSockaddr.Data", Field, 0}, + {"RawSockaddr.Family", Field, 0}, + {"RawSockaddr.Len", Field, 0}, + {"RawSockaddrAny", Type, 0}, + {"RawSockaddrAny.Addr", Field, 0}, + {"RawSockaddrAny.Pad", Field, 0}, + {"RawSockaddrDatalink", Type, 0}, + {"RawSockaddrDatalink.Alen", Field, 0}, + {"RawSockaddrDatalink.Data", Field, 0}, + {"RawSockaddrDatalink.Family", Field, 0}, + {"RawSockaddrDatalink.Index", Field, 0}, + {"RawSockaddrDatalink.Len", Field, 0}, + {"RawSockaddrDatalink.Nlen", Field, 0}, + {"RawSockaddrDatalink.Pad_cgo_0", Field, 2}, + {"RawSockaddrDatalink.Slen", Field, 0}, + {"RawSockaddrDatalink.Type", Field, 0}, + {"RawSockaddrInet4", Type, 0}, + {"RawSockaddrInet4.Addr", Field, 0}, + {"RawSockaddrInet4.Family", Field, 0}, + {"RawSockaddrInet4.Len", Field, 0}, + {"RawSockaddrInet4.Port", Field, 0}, + {"RawSockaddrInet4.Zero", Field, 0}, + {"RawSockaddrInet6", Type, 0}, + {"RawSockaddrInet6.Addr", Field, 0}, + {"RawSockaddrInet6.Family", Field, 0}, + {"RawSockaddrInet6.Flowinfo", Field, 0}, + {"RawSockaddrInet6.Len", Field, 0}, + {"RawSockaddrInet6.Port", Field, 0}, + {"RawSockaddrInet6.Scope_id", Field, 0}, + {"RawSockaddrLinklayer", Type, 0}, + {"RawSockaddrLinklayer.Addr", Field, 0}, + {"RawSockaddrLinklayer.Family", Field, 0}, + {"RawSockaddrLinklayer.Halen", Field, 0}, + {"RawSockaddrLinklayer.Hatype", Field, 0}, + {"RawSockaddrLinklayer.Ifindex", Field, 0}, + {"RawSockaddrLinklayer.Pkttype", Field, 0}, + {"RawSockaddrLinklayer.Protocol", Field, 0}, + {"RawSockaddrNetlink", Type, 0}, + {"RawSockaddrNetlink.Family", Field, 0}, + {"RawSockaddrNetlink.Groups", Field, 0}, + {"RawSockaddrNetlink.Pad", Field, 0}, + {"RawSockaddrNetlink.Pid", Field, 0}, + {"RawSockaddrUnix", Type, 0}, + {"RawSockaddrUnix.Family", Field, 0}, + {"RawSockaddrUnix.Len", Field, 0}, + {"RawSockaddrUnix.Pad_cgo_0", Field, 2}, + {"RawSockaddrUnix.Path", Field, 0}, + {"RawSyscall", Func, 0}, + {"RawSyscall6", Func, 0}, + {"Read", Func, 0}, + {"ReadConsole", Func, 1}, + {"ReadDirectoryChanges", Func, 0}, + {"ReadDirent", Func, 0}, + {"ReadFile", Func, 0}, + {"Readlink", Func, 0}, + {"Reboot", Func, 0}, + {"Recvfrom", Func, 0}, + {"Recvmsg", Func, 0}, + {"RegCloseKey", Func, 0}, + {"RegEnumKeyEx", Func, 0}, + {"RegOpenKeyEx", Func, 0}, + {"RegQueryInfoKey", Func, 0}, + {"RegQueryValueEx", Func, 0}, + {"RemoveDirectory", Func, 0}, + {"Removexattr", Func, 1}, + {"Rename", Func, 0}, + {"Renameat", Func, 0}, + {"Revoke", Func, 0}, + {"Rlimit", Type, 0}, + {"Rlimit.Cur", Field, 0}, + {"Rlimit.Max", Field, 0}, + {"Rmdir", Func, 0}, + {"RouteMessage", Type, 0}, + {"RouteMessage.Data", Field, 0}, + {"RouteMessage.Header", Field, 0}, + {"RouteRIB", Func, 0}, + {"RoutingMessage", Type, 0}, + {"RtAttr", Type, 0}, + {"RtAttr.Len", Field, 0}, + {"RtAttr.Type", Field, 0}, + {"RtGenmsg", Type, 0}, + {"RtGenmsg.Family", Field, 0}, + {"RtMetrics", Type, 0}, + {"RtMetrics.Expire", Field, 0}, + {"RtMetrics.Filler", Field, 0}, + {"RtMetrics.Hopcount", Field, 0}, + {"RtMetrics.Locks", Field, 0}, + {"RtMetrics.Mtu", Field, 0}, + {"RtMetrics.Pad", Field, 3}, + {"RtMetrics.Pksent", Field, 0}, + {"RtMetrics.Recvpipe", Field, 0}, + {"RtMetrics.Refcnt", Field, 2}, + {"RtMetrics.Rtt", Field, 0}, + {"RtMetrics.Rttvar", Field, 0}, + {"RtMetrics.Sendpipe", Field, 0}, + {"RtMetrics.Ssthresh", Field, 0}, + {"RtMetrics.Weight", Field, 0}, + {"RtMsg", Type, 0}, + {"RtMsg.Dst_len", Field, 0}, + {"RtMsg.Family", Field, 0}, + {"RtMsg.Flags", Field, 0}, + {"RtMsg.Protocol", Field, 0}, + {"RtMsg.Scope", Field, 0}, + {"RtMsg.Src_len", Field, 0}, + {"RtMsg.Table", Field, 0}, + {"RtMsg.Tos", Field, 0}, + {"RtMsg.Type", Field, 0}, + {"RtMsghdr", Type, 0}, + {"RtMsghdr.Addrs", Field, 0}, + {"RtMsghdr.Errno", Field, 0}, + {"RtMsghdr.Flags", Field, 0}, + {"RtMsghdr.Fmask", Field, 0}, + {"RtMsghdr.Hdrlen", Field, 2}, + {"RtMsghdr.Index", Field, 0}, + {"RtMsghdr.Inits", Field, 0}, + {"RtMsghdr.Mpls", Field, 2}, + {"RtMsghdr.Msglen", Field, 0}, + {"RtMsghdr.Pad_cgo_0", Field, 0}, + {"RtMsghdr.Pad_cgo_1", Field, 2}, + {"RtMsghdr.Pid", Field, 0}, + {"RtMsghdr.Priority", Field, 2}, + {"RtMsghdr.Rmx", Field, 0}, + {"RtMsghdr.Seq", Field, 0}, + {"RtMsghdr.Tableid", Field, 2}, + {"RtMsghdr.Type", Field, 0}, + {"RtMsghdr.Use", Field, 0}, + {"RtMsghdr.Version", Field, 0}, + {"RtNexthop", Type, 0}, + {"RtNexthop.Flags", Field, 0}, + {"RtNexthop.Hops", Field, 0}, + {"RtNexthop.Ifindex", Field, 0}, + {"RtNexthop.Len", Field, 0}, + {"Rusage", Type, 0}, + {"Rusage.CreationTime", Field, 0}, + {"Rusage.ExitTime", Field, 0}, + {"Rusage.Idrss", Field, 0}, + {"Rusage.Inblock", Field, 0}, + {"Rusage.Isrss", Field, 0}, + {"Rusage.Ixrss", Field, 0}, + {"Rusage.KernelTime", Field, 0}, + {"Rusage.Majflt", Field, 0}, + {"Rusage.Maxrss", Field, 0}, + {"Rusage.Minflt", Field, 0}, + {"Rusage.Msgrcv", Field, 0}, + {"Rusage.Msgsnd", Field, 0}, + {"Rusage.Nivcsw", Field, 0}, + {"Rusage.Nsignals", Field, 0}, + {"Rusage.Nswap", Field, 0}, + {"Rusage.Nvcsw", Field, 0}, + {"Rusage.Oublock", Field, 0}, + {"Rusage.Stime", Field, 0}, + {"Rusage.UserTime", Field, 0}, + {"Rusage.Utime", Field, 0}, + {"SCM_BINTIME", Const, 0}, + {"SCM_CREDENTIALS", Const, 0}, + {"SCM_CREDS", Const, 0}, + {"SCM_RIGHTS", Const, 0}, + {"SCM_TIMESTAMP", Const, 0}, + {"SCM_TIMESTAMPING", Const, 0}, + {"SCM_TIMESTAMPNS", Const, 0}, + {"SCM_TIMESTAMP_MONOTONIC", Const, 0}, + {"SHUT_RD", Const, 0}, + {"SHUT_RDWR", Const, 0}, + {"SHUT_WR", Const, 0}, + {"SID", Type, 0}, + {"SIDAndAttributes", Type, 0}, + {"SIDAndAttributes.Attributes", Field, 0}, + {"SIDAndAttributes.Sid", Field, 0}, + {"SIGABRT", Const, 0}, + {"SIGALRM", Const, 0}, + {"SIGBUS", Const, 0}, + {"SIGCHLD", Const, 0}, + {"SIGCLD", Const, 0}, + {"SIGCONT", Const, 0}, + {"SIGEMT", Const, 0}, + {"SIGFPE", Const, 0}, + {"SIGHUP", Const, 0}, + {"SIGILL", Const, 0}, + {"SIGINFO", Const, 0}, + {"SIGINT", Const, 0}, + {"SIGIO", Const, 0}, + {"SIGIOT", Const, 0}, + {"SIGKILL", Const, 0}, + {"SIGLIBRT", Const, 1}, + {"SIGLWP", Const, 0}, + {"SIGPIPE", Const, 0}, + {"SIGPOLL", Const, 0}, + {"SIGPROF", Const, 0}, + {"SIGPWR", Const, 0}, + {"SIGQUIT", Const, 0}, + {"SIGSEGV", Const, 0}, + {"SIGSTKFLT", Const, 0}, + {"SIGSTOP", Const, 0}, + {"SIGSYS", Const, 0}, + {"SIGTERM", Const, 0}, + {"SIGTHR", Const, 0}, + {"SIGTRAP", Const, 0}, + {"SIGTSTP", Const, 0}, + {"SIGTTIN", Const, 0}, + {"SIGTTOU", Const, 0}, + {"SIGUNUSED", Const, 0}, + {"SIGURG", Const, 0}, + {"SIGUSR1", Const, 0}, + {"SIGUSR2", Const, 0}, + {"SIGVTALRM", Const, 0}, + {"SIGWINCH", Const, 0}, + {"SIGXCPU", Const, 0}, + {"SIGXFSZ", Const, 0}, + {"SIOCADDDLCI", Const, 0}, + {"SIOCADDMULTI", Const, 0}, + {"SIOCADDRT", Const, 0}, + {"SIOCAIFADDR", Const, 0}, + {"SIOCAIFGROUP", Const, 0}, + {"SIOCALIFADDR", Const, 0}, + {"SIOCARPIPLL", Const, 0}, + {"SIOCATMARK", Const, 0}, + {"SIOCAUTOADDR", Const, 0}, + {"SIOCAUTONETMASK", Const, 0}, + {"SIOCBRDGADD", Const, 1}, + {"SIOCBRDGADDS", Const, 1}, + {"SIOCBRDGARL", Const, 1}, + {"SIOCBRDGDADDR", Const, 1}, + {"SIOCBRDGDEL", Const, 1}, + {"SIOCBRDGDELS", Const, 1}, + {"SIOCBRDGFLUSH", Const, 1}, + {"SIOCBRDGFRL", Const, 1}, + {"SIOCBRDGGCACHE", Const, 1}, + {"SIOCBRDGGFD", Const, 1}, + {"SIOCBRDGGHT", Const, 1}, + {"SIOCBRDGGIFFLGS", Const, 1}, + {"SIOCBRDGGMA", Const, 1}, + {"SIOCBRDGGPARAM", Const, 1}, + {"SIOCBRDGGPRI", Const, 1}, + {"SIOCBRDGGRL", Const, 1}, + {"SIOCBRDGGSIFS", Const, 1}, + {"SIOCBRDGGTO", Const, 1}, + {"SIOCBRDGIFS", Const, 1}, + {"SIOCBRDGRTS", Const, 1}, + {"SIOCBRDGSADDR", Const, 1}, + {"SIOCBRDGSCACHE", Const, 1}, + {"SIOCBRDGSFD", Const, 1}, + {"SIOCBRDGSHT", Const, 1}, + {"SIOCBRDGSIFCOST", Const, 1}, + {"SIOCBRDGSIFFLGS", Const, 1}, + {"SIOCBRDGSIFPRIO", Const, 1}, + {"SIOCBRDGSMA", Const, 1}, + {"SIOCBRDGSPRI", Const, 1}, + {"SIOCBRDGSPROTO", Const, 1}, + {"SIOCBRDGSTO", Const, 1}, + {"SIOCBRDGSTXHC", Const, 1}, + {"SIOCDARP", Const, 0}, + {"SIOCDELDLCI", Const, 0}, + {"SIOCDELMULTI", Const, 0}, + {"SIOCDELRT", Const, 0}, + {"SIOCDEVPRIVATE", Const, 0}, + {"SIOCDIFADDR", Const, 0}, + {"SIOCDIFGROUP", Const, 0}, + {"SIOCDIFPHYADDR", Const, 0}, + {"SIOCDLIFADDR", Const, 0}, + {"SIOCDRARP", Const, 0}, + {"SIOCGARP", Const, 0}, + {"SIOCGDRVSPEC", Const, 0}, + {"SIOCGETKALIVE", Const, 1}, + {"SIOCGETLABEL", Const, 1}, + {"SIOCGETPFLOW", Const, 1}, + {"SIOCGETPFSYNC", Const, 1}, + {"SIOCGETSGCNT", Const, 0}, + {"SIOCGETVIFCNT", Const, 0}, + {"SIOCGETVLAN", Const, 0}, + {"SIOCGHIWAT", Const, 0}, + {"SIOCGIFADDR", Const, 0}, + {"SIOCGIFADDRPREF", Const, 1}, + {"SIOCGIFALIAS", Const, 1}, + {"SIOCGIFALTMTU", Const, 0}, + {"SIOCGIFASYNCMAP", Const, 0}, + {"SIOCGIFBOND", Const, 0}, + {"SIOCGIFBR", Const, 0}, + {"SIOCGIFBRDADDR", Const, 0}, + {"SIOCGIFCAP", Const, 0}, + {"SIOCGIFCONF", Const, 0}, + {"SIOCGIFCOUNT", Const, 0}, + {"SIOCGIFDATA", Const, 1}, + {"SIOCGIFDESCR", Const, 0}, + {"SIOCGIFDEVMTU", Const, 0}, + {"SIOCGIFDLT", Const, 1}, + {"SIOCGIFDSTADDR", Const, 0}, + {"SIOCGIFENCAP", Const, 0}, + {"SIOCGIFFIB", Const, 1}, + {"SIOCGIFFLAGS", Const, 0}, + {"SIOCGIFGATTR", Const, 1}, + {"SIOCGIFGENERIC", Const, 0}, + {"SIOCGIFGMEMB", Const, 0}, + {"SIOCGIFGROUP", Const, 0}, + {"SIOCGIFHARDMTU", Const, 3}, + {"SIOCGIFHWADDR", Const, 0}, + {"SIOCGIFINDEX", Const, 0}, + {"SIOCGIFKPI", Const, 0}, + {"SIOCGIFMAC", Const, 0}, + {"SIOCGIFMAP", Const, 0}, + {"SIOCGIFMEDIA", Const, 0}, + {"SIOCGIFMEM", Const, 0}, + {"SIOCGIFMETRIC", Const, 0}, + {"SIOCGIFMTU", Const, 0}, + {"SIOCGIFNAME", Const, 0}, + {"SIOCGIFNETMASK", Const, 0}, + {"SIOCGIFPDSTADDR", Const, 0}, + {"SIOCGIFPFLAGS", Const, 0}, + {"SIOCGIFPHYS", Const, 0}, + {"SIOCGIFPRIORITY", Const, 1}, + {"SIOCGIFPSRCADDR", Const, 0}, + {"SIOCGIFRDOMAIN", Const, 1}, + {"SIOCGIFRTLABEL", Const, 1}, + {"SIOCGIFSLAVE", Const, 0}, + {"SIOCGIFSTATUS", Const, 0}, + {"SIOCGIFTIMESLOT", Const, 1}, + {"SIOCGIFTXQLEN", Const, 0}, + {"SIOCGIFVLAN", Const, 0}, + {"SIOCGIFWAKEFLAGS", Const, 0}, + {"SIOCGIFXFLAGS", Const, 1}, + {"SIOCGLIFADDR", Const, 0}, + {"SIOCGLIFPHYADDR", Const, 0}, + {"SIOCGLIFPHYRTABLE", Const, 1}, + {"SIOCGLIFPHYTTL", Const, 3}, + {"SIOCGLINKSTR", Const, 1}, + {"SIOCGLOWAT", Const, 0}, + {"SIOCGPGRP", Const, 0}, + {"SIOCGPRIVATE_0", Const, 0}, + {"SIOCGPRIVATE_1", Const, 0}, + {"SIOCGRARP", Const, 0}, + {"SIOCGSPPPPARAMS", Const, 3}, + {"SIOCGSTAMP", Const, 0}, + {"SIOCGSTAMPNS", Const, 0}, + {"SIOCGVH", Const, 1}, + {"SIOCGVNETID", Const, 3}, + {"SIOCIFCREATE", Const, 0}, + {"SIOCIFCREATE2", Const, 0}, + {"SIOCIFDESTROY", Const, 0}, + {"SIOCIFGCLONERS", Const, 0}, + {"SIOCINITIFADDR", Const, 1}, + {"SIOCPROTOPRIVATE", Const, 0}, + {"SIOCRSLVMULTI", Const, 0}, + {"SIOCRTMSG", Const, 0}, + {"SIOCSARP", Const, 0}, + {"SIOCSDRVSPEC", Const, 0}, + {"SIOCSETKALIVE", Const, 1}, + {"SIOCSETLABEL", Const, 1}, + {"SIOCSETPFLOW", Const, 1}, + {"SIOCSETPFSYNC", Const, 1}, + {"SIOCSETVLAN", Const, 0}, + {"SIOCSHIWAT", Const, 0}, + {"SIOCSIFADDR", Const, 0}, + {"SIOCSIFADDRPREF", Const, 1}, + {"SIOCSIFALTMTU", Const, 0}, + {"SIOCSIFASYNCMAP", Const, 0}, + {"SIOCSIFBOND", Const, 0}, + {"SIOCSIFBR", Const, 0}, + {"SIOCSIFBRDADDR", Const, 0}, + {"SIOCSIFCAP", Const, 0}, + {"SIOCSIFDESCR", Const, 0}, + {"SIOCSIFDSTADDR", Const, 0}, + {"SIOCSIFENCAP", Const, 0}, + {"SIOCSIFFIB", Const, 1}, + {"SIOCSIFFLAGS", Const, 0}, + {"SIOCSIFGATTR", Const, 1}, + {"SIOCSIFGENERIC", Const, 0}, + {"SIOCSIFHWADDR", Const, 0}, + {"SIOCSIFHWBROADCAST", Const, 0}, + {"SIOCSIFKPI", Const, 0}, + {"SIOCSIFLINK", Const, 0}, + {"SIOCSIFLLADDR", Const, 0}, + {"SIOCSIFMAC", Const, 0}, + {"SIOCSIFMAP", Const, 0}, + {"SIOCSIFMEDIA", Const, 0}, + {"SIOCSIFMEM", Const, 0}, + {"SIOCSIFMETRIC", Const, 0}, + {"SIOCSIFMTU", Const, 0}, + {"SIOCSIFNAME", Const, 0}, + {"SIOCSIFNETMASK", Const, 0}, + {"SIOCSIFPFLAGS", Const, 0}, + {"SIOCSIFPHYADDR", Const, 0}, + {"SIOCSIFPHYS", Const, 0}, + {"SIOCSIFPRIORITY", Const, 1}, + {"SIOCSIFRDOMAIN", Const, 1}, + {"SIOCSIFRTLABEL", Const, 1}, + {"SIOCSIFRVNET", Const, 0}, + {"SIOCSIFSLAVE", Const, 0}, + {"SIOCSIFTIMESLOT", Const, 1}, + {"SIOCSIFTXQLEN", Const, 0}, + {"SIOCSIFVLAN", Const, 0}, + {"SIOCSIFVNET", Const, 0}, + {"SIOCSIFXFLAGS", Const, 1}, + {"SIOCSLIFPHYADDR", Const, 0}, + {"SIOCSLIFPHYRTABLE", Const, 1}, + {"SIOCSLIFPHYTTL", Const, 3}, + {"SIOCSLINKSTR", Const, 1}, + {"SIOCSLOWAT", Const, 0}, + {"SIOCSPGRP", Const, 0}, + {"SIOCSRARP", Const, 0}, + {"SIOCSSPPPPARAMS", Const, 3}, + {"SIOCSVH", Const, 1}, + {"SIOCSVNETID", Const, 3}, + {"SIOCZIFDATA", Const, 1}, + {"SIO_GET_EXTENSION_FUNCTION_POINTER", Const, 1}, + {"SIO_GET_INTERFACE_LIST", Const, 0}, + {"SIO_KEEPALIVE_VALS", Const, 3}, + {"SIO_UDP_CONNRESET", Const, 4}, + {"SOCK_CLOEXEC", Const, 0}, + {"SOCK_DCCP", Const, 0}, + {"SOCK_DGRAM", Const, 0}, + {"SOCK_FLAGS_MASK", Const, 1}, + {"SOCK_MAXADDRLEN", Const, 0}, + {"SOCK_NONBLOCK", Const, 0}, + {"SOCK_NOSIGPIPE", Const, 1}, + {"SOCK_PACKET", Const, 0}, + {"SOCK_RAW", Const, 0}, + {"SOCK_RDM", Const, 0}, + {"SOCK_SEQPACKET", Const, 0}, + {"SOCK_STREAM", Const, 0}, + {"SOL_AAL", Const, 0}, + {"SOL_ATM", Const, 0}, + {"SOL_DECNET", Const, 0}, + {"SOL_ICMPV6", Const, 0}, + {"SOL_IP", Const, 0}, + {"SOL_IPV6", Const, 0}, + {"SOL_IRDA", Const, 0}, + {"SOL_PACKET", Const, 0}, + {"SOL_RAW", Const, 0}, + {"SOL_SOCKET", Const, 0}, + {"SOL_TCP", Const, 0}, + {"SOL_X25", Const, 0}, + {"SOMAXCONN", Const, 0}, + {"SO_ACCEPTCONN", Const, 0}, + {"SO_ACCEPTFILTER", Const, 0}, + {"SO_ATTACH_FILTER", Const, 0}, + {"SO_BINDANY", Const, 1}, + {"SO_BINDTODEVICE", Const, 0}, + {"SO_BINTIME", Const, 0}, + {"SO_BROADCAST", Const, 0}, + {"SO_BSDCOMPAT", Const, 0}, + {"SO_DEBUG", Const, 0}, + {"SO_DETACH_FILTER", Const, 0}, + {"SO_DOMAIN", Const, 0}, + {"SO_DONTROUTE", Const, 0}, + {"SO_DONTTRUNC", Const, 0}, + {"SO_ERROR", Const, 0}, + {"SO_KEEPALIVE", Const, 0}, + {"SO_LABEL", Const, 0}, + {"SO_LINGER", Const, 0}, + {"SO_LINGER_SEC", Const, 0}, + {"SO_LISTENINCQLEN", Const, 0}, + {"SO_LISTENQLEN", Const, 0}, + {"SO_LISTENQLIMIT", Const, 0}, + {"SO_MARK", Const, 0}, + {"SO_NETPROC", Const, 1}, + {"SO_NKE", Const, 0}, + {"SO_NOADDRERR", Const, 0}, + {"SO_NOHEADER", Const, 1}, + {"SO_NOSIGPIPE", Const, 0}, + {"SO_NOTIFYCONFLICT", Const, 0}, + {"SO_NO_CHECK", Const, 0}, + {"SO_NO_DDP", Const, 0}, + {"SO_NO_OFFLOAD", Const, 0}, + {"SO_NP_EXTENSIONS", Const, 0}, + {"SO_NREAD", Const, 0}, + {"SO_NUMRCVPKT", Const, 16}, + {"SO_NWRITE", Const, 0}, + {"SO_OOBINLINE", Const, 0}, + {"SO_OVERFLOWED", Const, 1}, + {"SO_PASSCRED", Const, 0}, + {"SO_PASSSEC", Const, 0}, + {"SO_PEERCRED", Const, 0}, + {"SO_PEERLABEL", Const, 0}, + {"SO_PEERNAME", Const, 0}, + {"SO_PEERSEC", Const, 0}, + {"SO_PRIORITY", Const, 0}, + {"SO_PROTOCOL", Const, 0}, + {"SO_PROTOTYPE", Const, 1}, + {"SO_RANDOMPORT", Const, 0}, + {"SO_RCVBUF", Const, 0}, + {"SO_RCVBUFFORCE", Const, 0}, + {"SO_RCVLOWAT", Const, 0}, + {"SO_RCVTIMEO", Const, 0}, + {"SO_RESTRICTIONS", Const, 0}, + {"SO_RESTRICT_DENYIN", Const, 0}, + {"SO_RESTRICT_DENYOUT", Const, 0}, + {"SO_RESTRICT_DENYSET", Const, 0}, + {"SO_REUSEADDR", Const, 0}, + {"SO_REUSEPORT", Const, 0}, + {"SO_REUSESHAREUID", Const, 0}, + {"SO_RTABLE", Const, 1}, + {"SO_RXQ_OVFL", Const, 0}, + {"SO_SECURITY_AUTHENTICATION", Const, 0}, + {"SO_SECURITY_ENCRYPTION_NETWORK", Const, 0}, + {"SO_SECURITY_ENCRYPTION_TRANSPORT", Const, 0}, + {"SO_SETFIB", Const, 0}, + {"SO_SNDBUF", Const, 0}, + {"SO_SNDBUFFORCE", Const, 0}, + {"SO_SNDLOWAT", Const, 0}, + {"SO_SNDTIMEO", Const, 0}, + {"SO_SPLICE", Const, 1}, + {"SO_TIMESTAMP", Const, 0}, + {"SO_TIMESTAMPING", Const, 0}, + {"SO_TIMESTAMPNS", Const, 0}, + {"SO_TIMESTAMP_MONOTONIC", Const, 0}, + {"SO_TYPE", Const, 0}, + {"SO_UPCALLCLOSEWAIT", Const, 0}, + {"SO_UPDATE_ACCEPT_CONTEXT", Const, 0}, + {"SO_UPDATE_CONNECT_CONTEXT", Const, 1}, + {"SO_USELOOPBACK", Const, 0}, + {"SO_USER_COOKIE", Const, 1}, + {"SO_VENDOR", Const, 3}, + {"SO_WANTMORE", Const, 0}, + {"SO_WANTOOBFLAG", Const, 0}, + {"SSLExtraCertChainPolicyPara", Type, 0}, + {"SSLExtraCertChainPolicyPara.AuthType", Field, 0}, + {"SSLExtraCertChainPolicyPara.Checks", Field, 0}, + {"SSLExtraCertChainPolicyPara.ServerName", Field, 0}, + {"SSLExtraCertChainPolicyPara.Size", Field, 0}, + {"STANDARD_RIGHTS_ALL", Const, 0}, + {"STANDARD_RIGHTS_EXECUTE", Const, 0}, + {"STANDARD_RIGHTS_READ", Const, 0}, + {"STANDARD_RIGHTS_REQUIRED", Const, 0}, + {"STANDARD_RIGHTS_WRITE", Const, 0}, + {"STARTF_USESHOWWINDOW", Const, 0}, + {"STARTF_USESTDHANDLES", Const, 0}, + {"STD_ERROR_HANDLE", Const, 0}, + {"STD_INPUT_HANDLE", Const, 0}, + {"STD_OUTPUT_HANDLE", Const, 0}, + {"SUBLANG_ENGLISH_US", Const, 0}, + {"SW_FORCEMINIMIZE", Const, 0}, + {"SW_HIDE", Const, 0}, + {"SW_MAXIMIZE", Const, 0}, + {"SW_MINIMIZE", Const, 0}, + {"SW_NORMAL", Const, 0}, + {"SW_RESTORE", Const, 0}, + {"SW_SHOW", Const, 0}, + {"SW_SHOWDEFAULT", Const, 0}, + {"SW_SHOWMAXIMIZED", Const, 0}, + {"SW_SHOWMINIMIZED", Const, 0}, + {"SW_SHOWMINNOACTIVE", Const, 0}, + {"SW_SHOWNA", Const, 0}, + {"SW_SHOWNOACTIVATE", Const, 0}, + {"SW_SHOWNORMAL", Const, 0}, + {"SYMBOLIC_LINK_FLAG_DIRECTORY", Const, 4}, + {"SYNCHRONIZE", Const, 0}, + {"SYSCTL_VERSION", Const, 1}, + {"SYSCTL_VERS_0", Const, 1}, + {"SYSCTL_VERS_1", Const, 1}, + {"SYSCTL_VERS_MASK", Const, 1}, + {"SYS_ABORT2", Const, 0}, + {"SYS_ACCEPT", Const, 0}, + {"SYS_ACCEPT4", Const, 0}, + {"SYS_ACCEPT_NOCANCEL", Const, 0}, + {"SYS_ACCESS", Const, 0}, + {"SYS_ACCESS_EXTENDED", Const, 0}, + {"SYS_ACCT", Const, 0}, + {"SYS_ADD_KEY", Const, 0}, + {"SYS_ADD_PROFIL", Const, 0}, + {"SYS_ADJFREQ", Const, 1}, + {"SYS_ADJTIME", Const, 0}, + {"SYS_ADJTIMEX", Const, 0}, + {"SYS_AFS_SYSCALL", Const, 0}, + {"SYS_AIO_CANCEL", Const, 0}, + {"SYS_AIO_ERROR", Const, 0}, + {"SYS_AIO_FSYNC", Const, 0}, + {"SYS_AIO_MLOCK", Const, 14}, + {"SYS_AIO_READ", Const, 0}, + {"SYS_AIO_RETURN", Const, 0}, + {"SYS_AIO_SUSPEND", Const, 0}, + {"SYS_AIO_SUSPEND_NOCANCEL", Const, 0}, + {"SYS_AIO_WAITCOMPLETE", Const, 14}, + {"SYS_AIO_WRITE", Const, 0}, + {"SYS_ALARM", Const, 0}, + {"SYS_ARCH_PRCTL", Const, 0}, + {"SYS_ARM_FADVISE64_64", Const, 0}, + {"SYS_ARM_SYNC_FILE_RANGE", Const, 0}, + {"SYS_ATGETMSG", Const, 0}, + {"SYS_ATPGETREQ", Const, 0}, + {"SYS_ATPGETRSP", Const, 0}, + {"SYS_ATPSNDREQ", Const, 0}, + {"SYS_ATPSNDRSP", Const, 0}, + {"SYS_ATPUTMSG", Const, 0}, + {"SYS_ATSOCKET", Const, 0}, + {"SYS_AUDIT", Const, 0}, + {"SYS_AUDITCTL", Const, 0}, + {"SYS_AUDITON", Const, 0}, + {"SYS_AUDIT_SESSION_JOIN", Const, 0}, + {"SYS_AUDIT_SESSION_PORT", Const, 0}, + {"SYS_AUDIT_SESSION_SELF", Const, 0}, + {"SYS_BDFLUSH", Const, 0}, + {"SYS_BIND", Const, 0}, + {"SYS_BINDAT", Const, 3}, + {"SYS_BREAK", Const, 0}, + {"SYS_BRK", Const, 0}, + {"SYS_BSDTHREAD_CREATE", Const, 0}, + {"SYS_BSDTHREAD_REGISTER", Const, 0}, + {"SYS_BSDTHREAD_TERMINATE", Const, 0}, + {"SYS_CAPGET", Const, 0}, + {"SYS_CAPSET", Const, 0}, + {"SYS_CAP_ENTER", Const, 0}, + {"SYS_CAP_FCNTLS_GET", Const, 1}, + {"SYS_CAP_FCNTLS_LIMIT", Const, 1}, + {"SYS_CAP_GETMODE", Const, 0}, + {"SYS_CAP_GETRIGHTS", Const, 0}, + {"SYS_CAP_IOCTLS_GET", Const, 1}, + {"SYS_CAP_IOCTLS_LIMIT", Const, 1}, + {"SYS_CAP_NEW", Const, 0}, + {"SYS_CAP_RIGHTS_GET", Const, 1}, + {"SYS_CAP_RIGHTS_LIMIT", Const, 1}, + {"SYS_CHDIR", Const, 0}, + {"SYS_CHFLAGS", Const, 0}, + {"SYS_CHFLAGSAT", Const, 3}, + {"SYS_CHMOD", Const, 0}, + {"SYS_CHMOD_EXTENDED", Const, 0}, + {"SYS_CHOWN", Const, 0}, + {"SYS_CHOWN32", Const, 0}, + {"SYS_CHROOT", Const, 0}, + {"SYS_CHUD", Const, 0}, + {"SYS_CLOCK_ADJTIME", Const, 0}, + {"SYS_CLOCK_GETCPUCLOCKID2", Const, 1}, + {"SYS_CLOCK_GETRES", Const, 0}, + {"SYS_CLOCK_GETTIME", Const, 0}, + {"SYS_CLOCK_NANOSLEEP", Const, 0}, + {"SYS_CLOCK_SETTIME", Const, 0}, + {"SYS_CLONE", Const, 0}, + {"SYS_CLOSE", Const, 0}, + {"SYS_CLOSEFROM", Const, 0}, + {"SYS_CLOSE_NOCANCEL", Const, 0}, + {"SYS_CONNECT", Const, 0}, + {"SYS_CONNECTAT", Const, 3}, + {"SYS_CONNECT_NOCANCEL", Const, 0}, + {"SYS_COPYFILE", Const, 0}, + {"SYS_CPUSET", Const, 0}, + {"SYS_CPUSET_GETAFFINITY", Const, 0}, + {"SYS_CPUSET_GETID", Const, 0}, + {"SYS_CPUSET_SETAFFINITY", Const, 0}, + {"SYS_CPUSET_SETID", Const, 0}, + {"SYS_CREAT", Const, 0}, + {"SYS_CREATE_MODULE", Const, 0}, + {"SYS_CSOPS", Const, 0}, + {"SYS_CSOPS_AUDITTOKEN", Const, 16}, + {"SYS_DELETE", Const, 0}, + {"SYS_DELETE_MODULE", Const, 0}, + {"SYS_DUP", Const, 0}, + {"SYS_DUP2", Const, 0}, + {"SYS_DUP3", Const, 0}, + {"SYS_EACCESS", Const, 0}, + {"SYS_EPOLL_CREATE", Const, 0}, + {"SYS_EPOLL_CREATE1", Const, 0}, + {"SYS_EPOLL_CTL", Const, 0}, + {"SYS_EPOLL_CTL_OLD", Const, 0}, + {"SYS_EPOLL_PWAIT", Const, 0}, + {"SYS_EPOLL_WAIT", Const, 0}, + {"SYS_EPOLL_WAIT_OLD", Const, 0}, + {"SYS_EVENTFD", Const, 0}, + {"SYS_EVENTFD2", Const, 0}, + {"SYS_EXCHANGEDATA", Const, 0}, + {"SYS_EXECVE", Const, 0}, + {"SYS_EXIT", Const, 0}, + {"SYS_EXIT_GROUP", Const, 0}, + {"SYS_EXTATTRCTL", Const, 0}, + {"SYS_EXTATTR_DELETE_FD", Const, 0}, + {"SYS_EXTATTR_DELETE_FILE", Const, 0}, + {"SYS_EXTATTR_DELETE_LINK", Const, 0}, + {"SYS_EXTATTR_GET_FD", Const, 0}, + {"SYS_EXTATTR_GET_FILE", Const, 0}, + {"SYS_EXTATTR_GET_LINK", Const, 0}, + {"SYS_EXTATTR_LIST_FD", Const, 0}, + {"SYS_EXTATTR_LIST_FILE", Const, 0}, + {"SYS_EXTATTR_LIST_LINK", Const, 0}, + {"SYS_EXTATTR_SET_FD", Const, 0}, + {"SYS_EXTATTR_SET_FILE", Const, 0}, + {"SYS_EXTATTR_SET_LINK", Const, 0}, + {"SYS_FACCESSAT", Const, 0}, + {"SYS_FADVISE64", Const, 0}, + {"SYS_FADVISE64_64", Const, 0}, + {"SYS_FALLOCATE", Const, 0}, + {"SYS_FANOTIFY_INIT", Const, 0}, + {"SYS_FANOTIFY_MARK", Const, 0}, + {"SYS_FCHDIR", Const, 0}, + {"SYS_FCHFLAGS", Const, 0}, + {"SYS_FCHMOD", Const, 0}, + {"SYS_FCHMODAT", Const, 0}, + {"SYS_FCHMOD_EXTENDED", Const, 0}, + {"SYS_FCHOWN", Const, 0}, + {"SYS_FCHOWN32", Const, 0}, + {"SYS_FCHOWNAT", Const, 0}, + {"SYS_FCHROOT", Const, 1}, + {"SYS_FCNTL", Const, 0}, + {"SYS_FCNTL64", Const, 0}, + {"SYS_FCNTL_NOCANCEL", Const, 0}, + {"SYS_FDATASYNC", Const, 0}, + {"SYS_FEXECVE", Const, 0}, + {"SYS_FFCLOCK_GETCOUNTER", Const, 0}, + {"SYS_FFCLOCK_GETESTIMATE", Const, 0}, + {"SYS_FFCLOCK_SETESTIMATE", Const, 0}, + {"SYS_FFSCTL", Const, 0}, + {"SYS_FGETATTRLIST", Const, 0}, + {"SYS_FGETXATTR", Const, 0}, + {"SYS_FHOPEN", Const, 0}, + {"SYS_FHSTAT", Const, 0}, + {"SYS_FHSTATFS", Const, 0}, + {"SYS_FILEPORT_MAKEFD", Const, 0}, + {"SYS_FILEPORT_MAKEPORT", Const, 0}, + {"SYS_FKTRACE", Const, 1}, + {"SYS_FLISTXATTR", Const, 0}, + {"SYS_FLOCK", Const, 0}, + {"SYS_FORK", Const, 0}, + {"SYS_FPATHCONF", Const, 0}, + {"SYS_FREEBSD6_FTRUNCATE", Const, 0}, + {"SYS_FREEBSD6_LSEEK", Const, 0}, + {"SYS_FREEBSD6_MMAP", Const, 0}, + {"SYS_FREEBSD6_PREAD", Const, 0}, + {"SYS_FREEBSD6_PWRITE", Const, 0}, + {"SYS_FREEBSD6_TRUNCATE", Const, 0}, + {"SYS_FREMOVEXATTR", Const, 0}, + {"SYS_FSCTL", Const, 0}, + {"SYS_FSETATTRLIST", Const, 0}, + {"SYS_FSETXATTR", Const, 0}, + {"SYS_FSGETPATH", Const, 0}, + {"SYS_FSTAT", Const, 0}, + {"SYS_FSTAT64", Const, 0}, + {"SYS_FSTAT64_EXTENDED", Const, 0}, + {"SYS_FSTATAT", Const, 0}, + {"SYS_FSTATAT64", Const, 0}, + {"SYS_FSTATFS", Const, 0}, + {"SYS_FSTATFS64", Const, 0}, + {"SYS_FSTATV", Const, 0}, + {"SYS_FSTATVFS1", Const, 1}, + {"SYS_FSTAT_EXTENDED", Const, 0}, + {"SYS_FSYNC", Const, 0}, + {"SYS_FSYNC_NOCANCEL", Const, 0}, + {"SYS_FSYNC_RANGE", Const, 1}, + {"SYS_FTIME", Const, 0}, + {"SYS_FTRUNCATE", Const, 0}, + {"SYS_FTRUNCATE64", Const, 0}, + {"SYS_FUTEX", Const, 0}, + {"SYS_FUTIMENS", Const, 1}, + {"SYS_FUTIMES", Const, 0}, + {"SYS_FUTIMESAT", Const, 0}, + {"SYS_GETATTRLIST", Const, 0}, + {"SYS_GETAUDIT", Const, 0}, + {"SYS_GETAUDIT_ADDR", Const, 0}, + {"SYS_GETAUID", Const, 0}, + {"SYS_GETCONTEXT", Const, 0}, + {"SYS_GETCPU", Const, 0}, + {"SYS_GETCWD", Const, 0}, + {"SYS_GETDENTS", Const, 0}, + {"SYS_GETDENTS64", Const, 0}, + {"SYS_GETDIRENTRIES", Const, 0}, + {"SYS_GETDIRENTRIES64", Const, 0}, + {"SYS_GETDIRENTRIESATTR", Const, 0}, + {"SYS_GETDTABLECOUNT", Const, 1}, + {"SYS_GETDTABLESIZE", Const, 0}, + {"SYS_GETEGID", Const, 0}, + {"SYS_GETEGID32", Const, 0}, + {"SYS_GETEUID", Const, 0}, + {"SYS_GETEUID32", Const, 0}, + {"SYS_GETFH", Const, 0}, + {"SYS_GETFSSTAT", Const, 0}, + {"SYS_GETFSSTAT64", Const, 0}, + {"SYS_GETGID", Const, 0}, + {"SYS_GETGID32", Const, 0}, + {"SYS_GETGROUPS", Const, 0}, + {"SYS_GETGROUPS32", Const, 0}, + {"SYS_GETHOSTUUID", Const, 0}, + {"SYS_GETITIMER", Const, 0}, + {"SYS_GETLCID", Const, 0}, + {"SYS_GETLOGIN", Const, 0}, + {"SYS_GETLOGINCLASS", Const, 0}, + {"SYS_GETPEERNAME", Const, 0}, + {"SYS_GETPGID", Const, 0}, + {"SYS_GETPGRP", Const, 0}, + {"SYS_GETPID", Const, 0}, + {"SYS_GETPMSG", Const, 0}, + {"SYS_GETPPID", Const, 0}, + {"SYS_GETPRIORITY", Const, 0}, + {"SYS_GETRESGID", Const, 0}, + {"SYS_GETRESGID32", Const, 0}, + {"SYS_GETRESUID", Const, 0}, + {"SYS_GETRESUID32", Const, 0}, + {"SYS_GETRLIMIT", Const, 0}, + {"SYS_GETRTABLE", Const, 1}, + {"SYS_GETRUSAGE", Const, 0}, + {"SYS_GETSGROUPS", Const, 0}, + {"SYS_GETSID", Const, 0}, + {"SYS_GETSOCKNAME", Const, 0}, + {"SYS_GETSOCKOPT", Const, 0}, + {"SYS_GETTHRID", Const, 1}, + {"SYS_GETTID", Const, 0}, + {"SYS_GETTIMEOFDAY", Const, 0}, + {"SYS_GETUID", Const, 0}, + {"SYS_GETUID32", Const, 0}, + {"SYS_GETVFSSTAT", Const, 1}, + {"SYS_GETWGROUPS", Const, 0}, + {"SYS_GETXATTR", Const, 0}, + {"SYS_GET_KERNEL_SYMS", Const, 0}, + {"SYS_GET_MEMPOLICY", Const, 0}, + {"SYS_GET_ROBUST_LIST", Const, 0}, + {"SYS_GET_THREAD_AREA", Const, 0}, + {"SYS_GSSD_SYSCALL", Const, 14}, + {"SYS_GTTY", Const, 0}, + {"SYS_IDENTITYSVC", Const, 0}, + {"SYS_IDLE", Const, 0}, + {"SYS_INITGROUPS", Const, 0}, + {"SYS_INIT_MODULE", Const, 0}, + {"SYS_INOTIFY_ADD_WATCH", Const, 0}, + {"SYS_INOTIFY_INIT", Const, 0}, + {"SYS_INOTIFY_INIT1", Const, 0}, + {"SYS_INOTIFY_RM_WATCH", Const, 0}, + {"SYS_IOCTL", Const, 0}, + {"SYS_IOPERM", Const, 0}, + {"SYS_IOPL", Const, 0}, + {"SYS_IOPOLICYSYS", Const, 0}, + {"SYS_IOPRIO_GET", Const, 0}, + {"SYS_IOPRIO_SET", Const, 0}, + {"SYS_IO_CANCEL", Const, 0}, + {"SYS_IO_DESTROY", Const, 0}, + {"SYS_IO_GETEVENTS", Const, 0}, + {"SYS_IO_SETUP", Const, 0}, + {"SYS_IO_SUBMIT", Const, 0}, + {"SYS_IPC", Const, 0}, + {"SYS_ISSETUGID", Const, 0}, + {"SYS_JAIL", Const, 0}, + {"SYS_JAIL_ATTACH", Const, 0}, + {"SYS_JAIL_GET", Const, 0}, + {"SYS_JAIL_REMOVE", Const, 0}, + {"SYS_JAIL_SET", Const, 0}, + {"SYS_KAS_INFO", Const, 16}, + {"SYS_KDEBUG_TRACE", Const, 0}, + {"SYS_KENV", Const, 0}, + {"SYS_KEVENT", Const, 0}, + {"SYS_KEVENT64", Const, 0}, + {"SYS_KEXEC_LOAD", Const, 0}, + {"SYS_KEYCTL", Const, 0}, + {"SYS_KILL", Const, 0}, + {"SYS_KLDFIND", Const, 0}, + {"SYS_KLDFIRSTMOD", Const, 0}, + {"SYS_KLDLOAD", Const, 0}, + {"SYS_KLDNEXT", Const, 0}, + {"SYS_KLDSTAT", Const, 0}, + {"SYS_KLDSYM", Const, 0}, + {"SYS_KLDUNLOAD", Const, 0}, + {"SYS_KLDUNLOADF", Const, 0}, + {"SYS_KMQ_NOTIFY", Const, 14}, + {"SYS_KMQ_OPEN", Const, 14}, + {"SYS_KMQ_SETATTR", Const, 14}, + {"SYS_KMQ_TIMEDRECEIVE", Const, 14}, + {"SYS_KMQ_TIMEDSEND", Const, 14}, + {"SYS_KMQ_UNLINK", Const, 14}, + {"SYS_KQUEUE", Const, 0}, + {"SYS_KQUEUE1", Const, 1}, + {"SYS_KSEM_CLOSE", Const, 14}, + {"SYS_KSEM_DESTROY", Const, 14}, + {"SYS_KSEM_GETVALUE", Const, 14}, + {"SYS_KSEM_INIT", Const, 14}, + {"SYS_KSEM_OPEN", Const, 14}, + {"SYS_KSEM_POST", Const, 14}, + {"SYS_KSEM_TIMEDWAIT", Const, 14}, + {"SYS_KSEM_TRYWAIT", Const, 14}, + {"SYS_KSEM_UNLINK", Const, 14}, + {"SYS_KSEM_WAIT", Const, 14}, + {"SYS_KTIMER_CREATE", Const, 0}, + {"SYS_KTIMER_DELETE", Const, 0}, + {"SYS_KTIMER_GETOVERRUN", Const, 0}, + {"SYS_KTIMER_GETTIME", Const, 0}, + {"SYS_KTIMER_SETTIME", Const, 0}, + {"SYS_KTRACE", Const, 0}, + {"SYS_LCHFLAGS", Const, 0}, + {"SYS_LCHMOD", Const, 0}, + {"SYS_LCHOWN", Const, 0}, + {"SYS_LCHOWN32", Const, 0}, + {"SYS_LEDGER", Const, 16}, + {"SYS_LGETFH", Const, 0}, + {"SYS_LGETXATTR", Const, 0}, + {"SYS_LINK", Const, 0}, + {"SYS_LINKAT", Const, 0}, + {"SYS_LIO_LISTIO", Const, 0}, + {"SYS_LISTEN", Const, 0}, + {"SYS_LISTXATTR", Const, 0}, + {"SYS_LLISTXATTR", Const, 0}, + {"SYS_LOCK", Const, 0}, + {"SYS_LOOKUP_DCOOKIE", Const, 0}, + {"SYS_LPATHCONF", Const, 0}, + {"SYS_LREMOVEXATTR", Const, 0}, + {"SYS_LSEEK", Const, 0}, + {"SYS_LSETXATTR", Const, 0}, + {"SYS_LSTAT", Const, 0}, + {"SYS_LSTAT64", Const, 0}, + {"SYS_LSTAT64_EXTENDED", Const, 0}, + {"SYS_LSTATV", Const, 0}, + {"SYS_LSTAT_EXTENDED", Const, 0}, + {"SYS_LUTIMES", Const, 0}, + {"SYS_MAC_SYSCALL", Const, 0}, + {"SYS_MADVISE", Const, 0}, + {"SYS_MADVISE1", Const, 0}, + {"SYS_MAXSYSCALL", Const, 0}, + {"SYS_MBIND", Const, 0}, + {"SYS_MIGRATE_PAGES", Const, 0}, + {"SYS_MINCORE", Const, 0}, + {"SYS_MINHERIT", Const, 0}, + {"SYS_MKCOMPLEX", Const, 0}, + {"SYS_MKDIR", Const, 0}, + {"SYS_MKDIRAT", Const, 0}, + {"SYS_MKDIR_EXTENDED", Const, 0}, + {"SYS_MKFIFO", Const, 0}, + {"SYS_MKFIFOAT", Const, 0}, + {"SYS_MKFIFO_EXTENDED", Const, 0}, + {"SYS_MKNOD", Const, 0}, + {"SYS_MKNODAT", Const, 0}, + {"SYS_MLOCK", Const, 0}, + {"SYS_MLOCKALL", Const, 0}, + {"SYS_MMAP", Const, 0}, + {"SYS_MMAP2", Const, 0}, + {"SYS_MODCTL", Const, 1}, + {"SYS_MODFIND", Const, 0}, + {"SYS_MODFNEXT", Const, 0}, + {"SYS_MODIFY_LDT", Const, 0}, + {"SYS_MODNEXT", Const, 0}, + {"SYS_MODSTAT", Const, 0}, + {"SYS_MODWATCH", Const, 0}, + {"SYS_MOUNT", Const, 0}, + {"SYS_MOVE_PAGES", Const, 0}, + {"SYS_MPROTECT", Const, 0}, + {"SYS_MPX", Const, 0}, + {"SYS_MQUERY", Const, 1}, + {"SYS_MQ_GETSETATTR", Const, 0}, + {"SYS_MQ_NOTIFY", Const, 0}, + {"SYS_MQ_OPEN", Const, 0}, + {"SYS_MQ_TIMEDRECEIVE", Const, 0}, + {"SYS_MQ_TIMEDSEND", Const, 0}, + {"SYS_MQ_UNLINK", Const, 0}, + {"SYS_MREMAP", Const, 0}, + {"SYS_MSGCTL", Const, 0}, + {"SYS_MSGGET", Const, 0}, + {"SYS_MSGRCV", Const, 0}, + {"SYS_MSGRCV_NOCANCEL", Const, 0}, + {"SYS_MSGSND", Const, 0}, + {"SYS_MSGSND_NOCANCEL", Const, 0}, + {"SYS_MSGSYS", Const, 0}, + {"SYS_MSYNC", Const, 0}, + {"SYS_MSYNC_NOCANCEL", Const, 0}, + {"SYS_MUNLOCK", Const, 0}, + {"SYS_MUNLOCKALL", Const, 0}, + {"SYS_MUNMAP", Const, 0}, + {"SYS_NAME_TO_HANDLE_AT", Const, 0}, + {"SYS_NANOSLEEP", Const, 0}, + {"SYS_NEWFSTATAT", Const, 0}, + {"SYS_NFSCLNT", Const, 0}, + {"SYS_NFSSERVCTL", Const, 0}, + {"SYS_NFSSVC", Const, 0}, + {"SYS_NFSTAT", Const, 0}, + {"SYS_NICE", Const, 0}, + {"SYS_NLM_SYSCALL", Const, 14}, + {"SYS_NLSTAT", Const, 0}, + {"SYS_NMOUNT", Const, 0}, + {"SYS_NSTAT", Const, 0}, + {"SYS_NTP_ADJTIME", Const, 0}, + {"SYS_NTP_GETTIME", Const, 0}, + {"SYS_NUMA_GETAFFINITY", Const, 14}, + {"SYS_NUMA_SETAFFINITY", Const, 14}, + {"SYS_OABI_SYSCALL_BASE", Const, 0}, + {"SYS_OBREAK", Const, 0}, + {"SYS_OLDFSTAT", Const, 0}, + {"SYS_OLDLSTAT", Const, 0}, + {"SYS_OLDOLDUNAME", Const, 0}, + {"SYS_OLDSTAT", Const, 0}, + {"SYS_OLDUNAME", Const, 0}, + {"SYS_OPEN", Const, 0}, + {"SYS_OPENAT", Const, 0}, + {"SYS_OPENBSD_POLL", Const, 0}, + {"SYS_OPEN_BY_HANDLE_AT", Const, 0}, + {"SYS_OPEN_DPROTECTED_NP", Const, 16}, + {"SYS_OPEN_EXTENDED", Const, 0}, + {"SYS_OPEN_NOCANCEL", Const, 0}, + {"SYS_OVADVISE", Const, 0}, + {"SYS_PACCEPT", Const, 1}, + {"SYS_PATHCONF", Const, 0}, + {"SYS_PAUSE", Const, 0}, + {"SYS_PCICONFIG_IOBASE", Const, 0}, + {"SYS_PCICONFIG_READ", Const, 0}, + {"SYS_PCICONFIG_WRITE", Const, 0}, + {"SYS_PDFORK", Const, 0}, + {"SYS_PDGETPID", Const, 0}, + {"SYS_PDKILL", Const, 0}, + {"SYS_PERF_EVENT_OPEN", Const, 0}, + {"SYS_PERSONALITY", Const, 0}, + {"SYS_PID_HIBERNATE", Const, 0}, + {"SYS_PID_RESUME", Const, 0}, + {"SYS_PID_SHUTDOWN_SOCKETS", Const, 0}, + {"SYS_PID_SUSPEND", Const, 0}, + {"SYS_PIPE", Const, 0}, + {"SYS_PIPE2", Const, 0}, + {"SYS_PIVOT_ROOT", Const, 0}, + {"SYS_PMC_CONTROL", Const, 1}, + {"SYS_PMC_GET_INFO", Const, 1}, + {"SYS_POLL", Const, 0}, + {"SYS_POLLTS", Const, 1}, + {"SYS_POLL_NOCANCEL", Const, 0}, + {"SYS_POSIX_FADVISE", Const, 0}, + {"SYS_POSIX_FALLOCATE", Const, 0}, + {"SYS_POSIX_OPENPT", Const, 0}, + {"SYS_POSIX_SPAWN", Const, 0}, + {"SYS_PPOLL", Const, 0}, + {"SYS_PRCTL", Const, 0}, + {"SYS_PREAD", Const, 0}, + {"SYS_PREAD64", Const, 0}, + {"SYS_PREADV", Const, 0}, + {"SYS_PREAD_NOCANCEL", Const, 0}, + {"SYS_PRLIMIT64", Const, 0}, + {"SYS_PROCCTL", Const, 3}, + {"SYS_PROCESS_POLICY", Const, 0}, + {"SYS_PROCESS_VM_READV", Const, 0}, + {"SYS_PROCESS_VM_WRITEV", Const, 0}, + {"SYS_PROC_INFO", Const, 0}, + {"SYS_PROF", Const, 0}, + {"SYS_PROFIL", Const, 0}, + {"SYS_PSELECT", Const, 0}, + {"SYS_PSELECT6", Const, 0}, + {"SYS_PSET_ASSIGN", Const, 1}, + {"SYS_PSET_CREATE", Const, 1}, + {"SYS_PSET_DESTROY", Const, 1}, + {"SYS_PSYNCH_CVBROAD", Const, 0}, + {"SYS_PSYNCH_CVCLRPREPOST", Const, 0}, + {"SYS_PSYNCH_CVSIGNAL", Const, 0}, + {"SYS_PSYNCH_CVWAIT", Const, 0}, + {"SYS_PSYNCH_MUTEXDROP", Const, 0}, + {"SYS_PSYNCH_MUTEXWAIT", Const, 0}, + {"SYS_PSYNCH_RW_DOWNGRADE", Const, 0}, + {"SYS_PSYNCH_RW_LONGRDLOCK", Const, 0}, + {"SYS_PSYNCH_RW_RDLOCK", Const, 0}, + {"SYS_PSYNCH_RW_UNLOCK", Const, 0}, + {"SYS_PSYNCH_RW_UNLOCK2", Const, 0}, + {"SYS_PSYNCH_RW_UPGRADE", Const, 0}, + {"SYS_PSYNCH_RW_WRLOCK", Const, 0}, + {"SYS_PSYNCH_RW_YIELDWRLOCK", Const, 0}, + {"SYS_PTRACE", Const, 0}, + {"SYS_PUTPMSG", Const, 0}, + {"SYS_PWRITE", Const, 0}, + {"SYS_PWRITE64", Const, 0}, + {"SYS_PWRITEV", Const, 0}, + {"SYS_PWRITE_NOCANCEL", Const, 0}, + {"SYS_QUERY_MODULE", Const, 0}, + {"SYS_QUOTACTL", Const, 0}, + {"SYS_RASCTL", Const, 1}, + {"SYS_RCTL_ADD_RULE", Const, 0}, + {"SYS_RCTL_GET_LIMITS", Const, 0}, + {"SYS_RCTL_GET_RACCT", Const, 0}, + {"SYS_RCTL_GET_RULES", Const, 0}, + {"SYS_RCTL_REMOVE_RULE", Const, 0}, + {"SYS_READ", Const, 0}, + {"SYS_READAHEAD", Const, 0}, + {"SYS_READDIR", Const, 0}, + {"SYS_READLINK", Const, 0}, + {"SYS_READLINKAT", Const, 0}, + {"SYS_READV", Const, 0}, + {"SYS_READV_NOCANCEL", Const, 0}, + {"SYS_READ_NOCANCEL", Const, 0}, + {"SYS_REBOOT", Const, 0}, + {"SYS_RECV", Const, 0}, + {"SYS_RECVFROM", Const, 0}, + {"SYS_RECVFROM_NOCANCEL", Const, 0}, + {"SYS_RECVMMSG", Const, 0}, + {"SYS_RECVMSG", Const, 0}, + {"SYS_RECVMSG_NOCANCEL", Const, 0}, + {"SYS_REMAP_FILE_PAGES", Const, 0}, + {"SYS_REMOVEXATTR", Const, 0}, + {"SYS_RENAME", Const, 0}, + {"SYS_RENAMEAT", Const, 0}, + {"SYS_REQUEST_KEY", Const, 0}, + {"SYS_RESTART_SYSCALL", Const, 0}, + {"SYS_REVOKE", Const, 0}, + {"SYS_RFORK", Const, 0}, + {"SYS_RMDIR", Const, 0}, + {"SYS_RTPRIO", Const, 0}, + {"SYS_RTPRIO_THREAD", Const, 0}, + {"SYS_RT_SIGACTION", Const, 0}, + {"SYS_RT_SIGPENDING", Const, 0}, + {"SYS_RT_SIGPROCMASK", Const, 0}, + {"SYS_RT_SIGQUEUEINFO", Const, 0}, + {"SYS_RT_SIGRETURN", Const, 0}, + {"SYS_RT_SIGSUSPEND", Const, 0}, + {"SYS_RT_SIGTIMEDWAIT", Const, 0}, + {"SYS_RT_TGSIGQUEUEINFO", Const, 0}, + {"SYS_SBRK", Const, 0}, + {"SYS_SCHED_GETAFFINITY", Const, 0}, + {"SYS_SCHED_GETPARAM", Const, 0}, + {"SYS_SCHED_GETSCHEDULER", Const, 0}, + {"SYS_SCHED_GET_PRIORITY_MAX", Const, 0}, + {"SYS_SCHED_GET_PRIORITY_MIN", Const, 0}, + {"SYS_SCHED_RR_GET_INTERVAL", Const, 0}, + {"SYS_SCHED_SETAFFINITY", Const, 0}, + {"SYS_SCHED_SETPARAM", Const, 0}, + {"SYS_SCHED_SETSCHEDULER", Const, 0}, + {"SYS_SCHED_YIELD", Const, 0}, + {"SYS_SCTP_GENERIC_RECVMSG", Const, 0}, + {"SYS_SCTP_GENERIC_SENDMSG", Const, 0}, + {"SYS_SCTP_GENERIC_SENDMSG_IOV", Const, 0}, + {"SYS_SCTP_PEELOFF", Const, 0}, + {"SYS_SEARCHFS", Const, 0}, + {"SYS_SECURITY", Const, 0}, + {"SYS_SELECT", Const, 0}, + {"SYS_SELECT_NOCANCEL", Const, 0}, + {"SYS_SEMCONFIG", Const, 1}, + {"SYS_SEMCTL", Const, 0}, + {"SYS_SEMGET", Const, 0}, + {"SYS_SEMOP", Const, 0}, + {"SYS_SEMSYS", Const, 0}, + {"SYS_SEMTIMEDOP", Const, 0}, + {"SYS_SEM_CLOSE", Const, 0}, + {"SYS_SEM_DESTROY", Const, 0}, + {"SYS_SEM_GETVALUE", Const, 0}, + {"SYS_SEM_INIT", Const, 0}, + {"SYS_SEM_OPEN", Const, 0}, + {"SYS_SEM_POST", Const, 0}, + {"SYS_SEM_TRYWAIT", Const, 0}, + {"SYS_SEM_UNLINK", Const, 0}, + {"SYS_SEM_WAIT", Const, 0}, + {"SYS_SEM_WAIT_NOCANCEL", Const, 0}, + {"SYS_SEND", Const, 0}, + {"SYS_SENDFILE", Const, 0}, + {"SYS_SENDFILE64", Const, 0}, + {"SYS_SENDMMSG", Const, 0}, + {"SYS_SENDMSG", Const, 0}, + {"SYS_SENDMSG_NOCANCEL", Const, 0}, + {"SYS_SENDTO", Const, 0}, + {"SYS_SENDTO_NOCANCEL", Const, 0}, + {"SYS_SETATTRLIST", Const, 0}, + {"SYS_SETAUDIT", Const, 0}, + {"SYS_SETAUDIT_ADDR", Const, 0}, + {"SYS_SETAUID", Const, 0}, + {"SYS_SETCONTEXT", Const, 0}, + {"SYS_SETDOMAINNAME", Const, 0}, + {"SYS_SETEGID", Const, 0}, + {"SYS_SETEUID", Const, 0}, + {"SYS_SETFIB", Const, 0}, + {"SYS_SETFSGID", Const, 0}, + {"SYS_SETFSGID32", Const, 0}, + {"SYS_SETFSUID", Const, 0}, + {"SYS_SETFSUID32", Const, 0}, + {"SYS_SETGID", Const, 0}, + {"SYS_SETGID32", Const, 0}, + {"SYS_SETGROUPS", Const, 0}, + {"SYS_SETGROUPS32", Const, 0}, + {"SYS_SETHOSTNAME", Const, 0}, + {"SYS_SETITIMER", Const, 0}, + {"SYS_SETLCID", Const, 0}, + {"SYS_SETLOGIN", Const, 0}, + {"SYS_SETLOGINCLASS", Const, 0}, + {"SYS_SETNS", Const, 0}, + {"SYS_SETPGID", Const, 0}, + {"SYS_SETPRIORITY", Const, 0}, + {"SYS_SETPRIVEXEC", Const, 0}, + {"SYS_SETREGID", Const, 0}, + {"SYS_SETREGID32", Const, 0}, + {"SYS_SETRESGID", Const, 0}, + {"SYS_SETRESGID32", Const, 0}, + {"SYS_SETRESUID", Const, 0}, + {"SYS_SETRESUID32", Const, 0}, + {"SYS_SETREUID", Const, 0}, + {"SYS_SETREUID32", Const, 0}, + {"SYS_SETRLIMIT", Const, 0}, + {"SYS_SETRTABLE", Const, 1}, + {"SYS_SETSGROUPS", Const, 0}, + {"SYS_SETSID", Const, 0}, + {"SYS_SETSOCKOPT", Const, 0}, + {"SYS_SETTID", Const, 0}, + {"SYS_SETTID_WITH_PID", Const, 0}, + {"SYS_SETTIMEOFDAY", Const, 0}, + {"SYS_SETUID", Const, 0}, + {"SYS_SETUID32", Const, 0}, + {"SYS_SETWGROUPS", Const, 0}, + {"SYS_SETXATTR", Const, 0}, + {"SYS_SET_MEMPOLICY", Const, 0}, + {"SYS_SET_ROBUST_LIST", Const, 0}, + {"SYS_SET_THREAD_AREA", Const, 0}, + {"SYS_SET_TID_ADDRESS", Const, 0}, + {"SYS_SGETMASK", Const, 0}, + {"SYS_SHARED_REGION_CHECK_NP", Const, 0}, + {"SYS_SHARED_REGION_MAP_AND_SLIDE_NP", Const, 0}, + {"SYS_SHMAT", Const, 0}, + {"SYS_SHMCTL", Const, 0}, + {"SYS_SHMDT", Const, 0}, + {"SYS_SHMGET", Const, 0}, + {"SYS_SHMSYS", Const, 0}, + {"SYS_SHM_OPEN", Const, 0}, + {"SYS_SHM_UNLINK", Const, 0}, + {"SYS_SHUTDOWN", Const, 0}, + {"SYS_SIGACTION", Const, 0}, + {"SYS_SIGALTSTACK", Const, 0}, + {"SYS_SIGNAL", Const, 0}, + {"SYS_SIGNALFD", Const, 0}, + {"SYS_SIGNALFD4", Const, 0}, + {"SYS_SIGPENDING", Const, 0}, + {"SYS_SIGPROCMASK", Const, 0}, + {"SYS_SIGQUEUE", Const, 0}, + {"SYS_SIGQUEUEINFO", Const, 1}, + {"SYS_SIGRETURN", Const, 0}, + {"SYS_SIGSUSPEND", Const, 0}, + {"SYS_SIGSUSPEND_NOCANCEL", Const, 0}, + {"SYS_SIGTIMEDWAIT", Const, 0}, + {"SYS_SIGWAIT", Const, 0}, + {"SYS_SIGWAITINFO", Const, 0}, + {"SYS_SOCKET", Const, 0}, + {"SYS_SOCKETCALL", Const, 0}, + {"SYS_SOCKETPAIR", Const, 0}, + {"SYS_SPLICE", Const, 0}, + {"SYS_SSETMASK", Const, 0}, + {"SYS_SSTK", Const, 0}, + {"SYS_STACK_SNAPSHOT", Const, 0}, + {"SYS_STAT", Const, 0}, + {"SYS_STAT64", Const, 0}, + {"SYS_STAT64_EXTENDED", Const, 0}, + {"SYS_STATFS", Const, 0}, + {"SYS_STATFS64", Const, 0}, + {"SYS_STATV", Const, 0}, + {"SYS_STATVFS1", Const, 1}, + {"SYS_STAT_EXTENDED", Const, 0}, + {"SYS_STIME", Const, 0}, + {"SYS_STTY", Const, 0}, + {"SYS_SWAPCONTEXT", Const, 0}, + {"SYS_SWAPCTL", Const, 1}, + {"SYS_SWAPOFF", Const, 0}, + {"SYS_SWAPON", Const, 0}, + {"SYS_SYMLINK", Const, 0}, + {"SYS_SYMLINKAT", Const, 0}, + {"SYS_SYNC", Const, 0}, + {"SYS_SYNCFS", Const, 0}, + {"SYS_SYNC_FILE_RANGE", Const, 0}, + {"SYS_SYSARCH", Const, 0}, + {"SYS_SYSCALL", Const, 0}, + {"SYS_SYSCALL_BASE", Const, 0}, + {"SYS_SYSFS", Const, 0}, + {"SYS_SYSINFO", Const, 0}, + {"SYS_SYSLOG", Const, 0}, + {"SYS_TEE", Const, 0}, + {"SYS_TGKILL", Const, 0}, + {"SYS_THREAD_SELFID", Const, 0}, + {"SYS_THR_CREATE", Const, 0}, + {"SYS_THR_EXIT", Const, 0}, + {"SYS_THR_KILL", Const, 0}, + {"SYS_THR_KILL2", Const, 0}, + {"SYS_THR_NEW", Const, 0}, + {"SYS_THR_SELF", Const, 0}, + {"SYS_THR_SET_NAME", Const, 0}, + {"SYS_THR_SUSPEND", Const, 0}, + {"SYS_THR_WAKE", Const, 0}, + {"SYS_TIME", Const, 0}, + {"SYS_TIMERFD_CREATE", Const, 0}, + {"SYS_TIMERFD_GETTIME", Const, 0}, + {"SYS_TIMERFD_SETTIME", Const, 0}, + {"SYS_TIMER_CREATE", Const, 0}, + {"SYS_TIMER_DELETE", Const, 0}, + {"SYS_TIMER_GETOVERRUN", Const, 0}, + {"SYS_TIMER_GETTIME", Const, 0}, + {"SYS_TIMER_SETTIME", Const, 0}, + {"SYS_TIMES", Const, 0}, + {"SYS_TKILL", Const, 0}, + {"SYS_TRUNCATE", Const, 0}, + {"SYS_TRUNCATE64", Const, 0}, + {"SYS_TUXCALL", Const, 0}, + {"SYS_UGETRLIMIT", Const, 0}, + {"SYS_ULIMIT", Const, 0}, + {"SYS_UMASK", Const, 0}, + {"SYS_UMASK_EXTENDED", Const, 0}, + {"SYS_UMOUNT", Const, 0}, + {"SYS_UMOUNT2", Const, 0}, + {"SYS_UNAME", Const, 0}, + {"SYS_UNDELETE", Const, 0}, + {"SYS_UNLINK", Const, 0}, + {"SYS_UNLINKAT", Const, 0}, + {"SYS_UNMOUNT", Const, 0}, + {"SYS_UNSHARE", Const, 0}, + {"SYS_USELIB", Const, 0}, + {"SYS_USTAT", Const, 0}, + {"SYS_UTIME", Const, 0}, + {"SYS_UTIMENSAT", Const, 0}, + {"SYS_UTIMES", Const, 0}, + {"SYS_UTRACE", Const, 0}, + {"SYS_UUIDGEN", Const, 0}, + {"SYS_VADVISE", Const, 1}, + {"SYS_VFORK", Const, 0}, + {"SYS_VHANGUP", Const, 0}, + {"SYS_VM86", Const, 0}, + {"SYS_VM86OLD", Const, 0}, + {"SYS_VMSPLICE", Const, 0}, + {"SYS_VM_PRESSURE_MONITOR", Const, 0}, + {"SYS_VSERVER", Const, 0}, + {"SYS_WAIT4", Const, 0}, + {"SYS_WAIT4_NOCANCEL", Const, 0}, + {"SYS_WAIT6", Const, 1}, + {"SYS_WAITEVENT", Const, 0}, + {"SYS_WAITID", Const, 0}, + {"SYS_WAITID_NOCANCEL", Const, 0}, + {"SYS_WAITPID", Const, 0}, + {"SYS_WATCHEVENT", Const, 0}, + {"SYS_WORKQ_KERNRETURN", Const, 0}, + {"SYS_WORKQ_OPEN", Const, 0}, + {"SYS_WRITE", Const, 0}, + {"SYS_WRITEV", Const, 0}, + {"SYS_WRITEV_NOCANCEL", Const, 0}, + {"SYS_WRITE_NOCANCEL", Const, 0}, + {"SYS_YIELD", Const, 0}, + {"SYS__LLSEEK", Const, 0}, + {"SYS__LWP_CONTINUE", Const, 1}, + {"SYS__LWP_CREATE", Const, 1}, + {"SYS__LWP_CTL", Const, 1}, + {"SYS__LWP_DETACH", Const, 1}, + {"SYS__LWP_EXIT", Const, 1}, + {"SYS__LWP_GETNAME", Const, 1}, + {"SYS__LWP_GETPRIVATE", Const, 1}, + {"SYS__LWP_KILL", Const, 1}, + {"SYS__LWP_PARK", Const, 1}, + {"SYS__LWP_SELF", Const, 1}, + {"SYS__LWP_SETNAME", Const, 1}, + {"SYS__LWP_SETPRIVATE", Const, 1}, + {"SYS__LWP_SUSPEND", Const, 1}, + {"SYS__LWP_UNPARK", Const, 1}, + {"SYS__LWP_UNPARK_ALL", Const, 1}, + {"SYS__LWP_WAIT", Const, 1}, + {"SYS__LWP_WAKEUP", Const, 1}, + {"SYS__NEWSELECT", Const, 0}, + {"SYS__PSET_BIND", Const, 1}, + {"SYS__SCHED_GETAFFINITY", Const, 1}, + {"SYS__SCHED_GETPARAM", Const, 1}, + {"SYS__SCHED_SETAFFINITY", Const, 1}, + {"SYS__SCHED_SETPARAM", Const, 1}, + {"SYS__SYSCTL", Const, 0}, + {"SYS__UMTX_LOCK", Const, 0}, + {"SYS__UMTX_OP", Const, 0}, + {"SYS__UMTX_UNLOCK", Const, 0}, + {"SYS___ACL_ACLCHECK_FD", Const, 0}, + {"SYS___ACL_ACLCHECK_FILE", Const, 0}, + {"SYS___ACL_ACLCHECK_LINK", Const, 0}, + {"SYS___ACL_DELETE_FD", Const, 0}, + {"SYS___ACL_DELETE_FILE", Const, 0}, + {"SYS___ACL_DELETE_LINK", Const, 0}, + {"SYS___ACL_GET_FD", Const, 0}, + {"SYS___ACL_GET_FILE", Const, 0}, + {"SYS___ACL_GET_LINK", Const, 0}, + {"SYS___ACL_SET_FD", Const, 0}, + {"SYS___ACL_SET_FILE", Const, 0}, + {"SYS___ACL_SET_LINK", Const, 0}, + {"SYS___CAP_RIGHTS_GET", Const, 14}, + {"SYS___CLONE", Const, 1}, + {"SYS___DISABLE_THREADSIGNAL", Const, 0}, + {"SYS___GETCWD", Const, 0}, + {"SYS___GETLOGIN", Const, 1}, + {"SYS___GET_TCB", Const, 1}, + {"SYS___MAC_EXECVE", Const, 0}, + {"SYS___MAC_GETFSSTAT", Const, 0}, + {"SYS___MAC_GET_FD", Const, 0}, + {"SYS___MAC_GET_FILE", Const, 0}, + {"SYS___MAC_GET_LCID", Const, 0}, + {"SYS___MAC_GET_LCTX", Const, 0}, + {"SYS___MAC_GET_LINK", Const, 0}, + {"SYS___MAC_GET_MOUNT", Const, 0}, + {"SYS___MAC_GET_PID", Const, 0}, + {"SYS___MAC_GET_PROC", Const, 0}, + {"SYS___MAC_MOUNT", Const, 0}, + {"SYS___MAC_SET_FD", Const, 0}, + {"SYS___MAC_SET_FILE", Const, 0}, + {"SYS___MAC_SET_LCTX", Const, 0}, + {"SYS___MAC_SET_LINK", Const, 0}, + {"SYS___MAC_SET_PROC", Const, 0}, + {"SYS___MAC_SYSCALL", Const, 0}, + {"SYS___OLD_SEMWAIT_SIGNAL", Const, 0}, + {"SYS___OLD_SEMWAIT_SIGNAL_NOCANCEL", Const, 0}, + {"SYS___POSIX_CHOWN", Const, 1}, + {"SYS___POSIX_FCHOWN", Const, 1}, + {"SYS___POSIX_LCHOWN", Const, 1}, + {"SYS___POSIX_RENAME", Const, 1}, + {"SYS___PTHREAD_CANCELED", Const, 0}, + {"SYS___PTHREAD_CHDIR", Const, 0}, + {"SYS___PTHREAD_FCHDIR", Const, 0}, + {"SYS___PTHREAD_KILL", Const, 0}, + {"SYS___PTHREAD_MARKCANCEL", Const, 0}, + {"SYS___PTHREAD_SIGMASK", Const, 0}, + {"SYS___QUOTACTL", Const, 1}, + {"SYS___SEMCTL", Const, 1}, + {"SYS___SEMWAIT_SIGNAL", Const, 0}, + {"SYS___SEMWAIT_SIGNAL_NOCANCEL", Const, 0}, + {"SYS___SETLOGIN", Const, 1}, + {"SYS___SETUGID", Const, 0}, + {"SYS___SET_TCB", Const, 1}, + {"SYS___SIGACTION_SIGTRAMP", Const, 1}, + {"SYS___SIGTIMEDWAIT", Const, 1}, + {"SYS___SIGWAIT", Const, 0}, + {"SYS___SIGWAIT_NOCANCEL", Const, 0}, + {"SYS___SYSCTL", Const, 0}, + {"SYS___TFORK", Const, 1}, + {"SYS___THREXIT", Const, 1}, + {"SYS___THRSIGDIVERT", Const, 1}, + {"SYS___THRSLEEP", Const, 1}, + {"SYS___THRWAKEUP", Const, 1}, + {"S_ARCH1", Const, 1}, + {"S_ARCH2", Const, 1}, + {"S_BLKSIZE", Const, 0}, + {"S_IEXEC", Const, 0}, + {"S_IFBLK", Const, 0}, + {"S_IFCHR", Const, 0}, + {"S_IFDIR", Const, 0}, + {"S_IFIFO", Const, 0}, + {"S_IFLNK", Const, 0}, + {"S_IFMT", Const, 0}, + {"S_IFREG", Const, 0}, + {"S_IFSOCK", Const, 0}, + {"S_IFWHT", Const, 0}, + {"S_IREAD", Const, 0}, + {"S_IRGRP", Const, 0}, + {"S_IROTH", Const, 0}, + {"S_IRUSR", Const, 0}, + {"S_IRWXG", Const, 0}, + {"S_IRWXO", Const, 0}, + {"S_IRWXU", Const, 0}, + {"S_ISGID", Const, 0}, + {"S_ISTXT", Const, 0}, + {"S_ISUID", Const, 0}, + {"S_ISVTX", Const, 0}, + {"S_IWGRP", Const, 0}, + {"S_IWOTH", Const, 0}, + {"S_IWRITE", Const, 0}, + {"S_IWUSR", Const, 0}, + {"S_IXGRP", Const, 0}, + {"S_IXOTH", Const, 0}, + {"S_IXUSR", Const, 0}, + {"S_LOGIN_SET", Const, 1}, + {"SecurityAttributes", Type, 0}, + {"SecurityAttributes.InheritHandle", Field, 0}, + {"SecurityAttributes.Length", Field, 0}, + {"SecurityAttributes.SecurityDescriptor", Field, 0}, + {"Seek", Func, 0}, + {"Select", Func, 0}, + {"Sendfile", Func, 0}, + {"Sendmsg", Func, 0}, + {"SendmsgN", Func, 3}, + {"Sendto", Func, 0}, + {"Servent", Type, 0}, + {"Servent.Aliases", Field, 0}, + {"Servent.Name", Field, 0}, + {"Servent.Port", Field, 0}, + {"Servent.Proto", Field, 0}, + {"SetBpf", Func, 0}, + {"SetBpfBuflen", Func, 0}, + {"SetBpfDatalink", Func, 0}, + {"SetBpfHeadercmpl", Func, 0}, + {"SetBpfImmediate", Func, 0}, + {"SetBpfInterface", Func, 0}, + {"SetBpfPromisc", Func, 0}, + {"SetBpfTimeout", Func, 0}, + {"SetCurrentDirectory", Func, 0}, + {"SetEndOfFile", Func, 0}, + {"SetEnvironmentVariable", Func, 0}, + {"SetFileAttributes", Func, 0}, + {"SetFileCompletionNotificationModes", Func, 2}, + {"SetFilePointer", Func, 0}, + {"SetFileTime", Func, 0}, + {"SetHandleInformation", Func, 0}, + {"SetKevent", Func, 0}, + {"SetLsfPromisc", Func, 0}, + {"SetNonblock", Func, 0}, + {"Setdomainname", Func, 0}, + {"Setegid", Func, 0}, + {"Setenv", Func, 0}, + {"Seteuid", Func, 0}, + {"Setfsgid", Func, 0}, + {"Setfsuid", Func, 0}, + {"Setgid", Func, 0}, + {"Setgroups", Func, 0}, + {"Sethostname", Func, 0}, + {"Setlogin", Func, 0}, + {"Setpgid", Func, 0}, + {"Setpriority", Func, 0}, + {"Setprivexec", Func, 0}, + {"Setregid", Func, 0}, + {"Setresgid", Func, 0}, + {"Setresuid", Func, 0}, + {"Setreuid", Func, 0}, + {"Setrlimit", Func, 0}, + {"Setsid", Func, 0}, + {"Setsockopt", Func, 0}, + {"SetsockoptByte", Func, 0}, + {"SetsockoptICMPv6Filter", Func, 2}, + {"SetsockoptIPMreq", Func, 0}, + {"SetsockoptIPMreqn", Func, 0}, + {"SetsockoptIPv6Mreq", Func, 0}, + {"SetsockoptInet4Addr", Func, 0}, + {"SetsockoptInt", Func, 0}, + {"SetsockoptLinger", Func, 0}, + {"SetsockoptString", Func, 0}, + {"SetsockoptTimeval", Func, 0}, + {"Settimeofday", Func, 0}, + {"Setuid", Func, 0}, + {"Setxattr", Func, 1}, + {"Shutdown", Func, 0}, + {"SidTypeAlias", Const, 0}, + {"SidTypeComputer", Const, 0}, + {"SidTypeDeletedAccount", Const, 0}, + {"SidTypeDomain", Const, 0}, + {"SidTypeGroup", Const, 0}, + {"SidTypeInvalid", Const, 0}, + {"SidTypeLabel", Const, 0}, + {"SidTypeUnknown", Const, 0}, + {"SidTypeUser", Const, 0}, + {"SidTypeWellKnownGroup", Const, 0}, + {"Signal", Type, 0}, + {"SizeofBpfHdr", Const, 0}, + {"SizeofBpfInsn", Const, 0}, + {"SizeofBpfProgram", Const, 0}, + {"SizeofBpfStat", Const, 0}, + {"SizeofBpfVersion", Const, 0}, + {"SizeofBpfZbuf", Const, 0}, + {"SizeofBpfZbufHeader", Const, 0}, + {"SizeofCmsghdr", Const, 0}, + {"SizeofICMPv6Filter", Const, 2}, + {"SizeofIPMreq", Const, 0}, + {"SizeofIPMreqn", Const, 0}, + {"SizeofIPv6MTUInfo", Const, 2}, + {"SizeofIPv6Mreq", Const, 0}, + {"SizeofIfAddrmsg", Const, 0}, + {"SizeofIfAnnounceMsghdr", Const, 1}, + {"SizeofIfData", Const, 0}, + {"SizeofIfInfomsg", Const, 0}, + {"SizeofIfMsghdr", Const, 0}, + {"SizeofIfaMsghdr", Const, 0}, + {"SizeofIfmaMsghdr", Const, 0}, + {"SizeofIfmaMsghdr2", Const, 0}, + {"SizeofInet4Pktinfo", Const, 0}, + {"SizeofInet6Pktinfo", Const, 0}, + {"SizeofInotifyEvent", Const, 0}, + {"SizeofLinger", Const, 0}, + {"SizeofMsghdr", Const, 0}, + {"SizeofNlAttr", Const, 0}, + {"SizeofNlMsgerr", Const, 0}, + {"SizeofNlMsghdr", Const, 0}, + {"SizeofRtAttr", Const, 0}, + {"SizeofRtGenmsg", Const, 0}, + {"SizeofRtMetrics", Const, 0}, + {"SizeofRtMsg", Const, 0}, + {"SizeofRtMsghdr", Const, 0}, + {"SizeofRtNexthop", Const, 0}, + {"SizeofSockFilter", Const, 0}, + {"SizeofSockFprog", Const, 0}, + {"SizeofSockaddrAny", Const, 0}, + {"SizeofSockaddrDatalink", Const, 0}, + {"SizeofSockaddrInet4", Const, 0}, + {"SizeofSockaddrInet6", Const, 0}, + {"SizeofSockaddrLinklayer", Const, 0}, + {"SizeofSockaddrNetlink", Const, 0}, + {"SizeofSockaddrUnix", Const, 0}, + {"SizeofTCPInfo", Const, 1}, + {"SizeofUcred", Const, 0}, + {"SlicePtrFromStrings", Func, 1}, + {"SockFilter", Type, 0}, + {"SockFilter.Code", Field, 0}, + {"SockFilter.Jf", Field, 0}, + {"SockFilter.Jt", Field, 0}, + {"SockFilter.K", Field, 0}, + {"SockFprog", Type, 0}, + {"SockFprog.Filter", Field, 0}, + {"SockFprog.Len", Field, 0}, + {"SockFprog.Pad_cgo_0", Field, 0}, + {"Sockaddr", Type, 0}, + {"SockaddrDatalink", Type, 0}, + {"SockaddrDatalink.Alen", Field, 0}, + {"SockaddrDatalink.Data", Field, 0}, + {"SockaddrDatalink.Family", Field, 0}, + {"SockaddrDatalink.Index", Field, 0}, + {"SockaddrDatalink.Len", Field, 0}, + {"SockaddrDatalink.Nlen", Field, 0}, + {"SockaddrDatalink.Slen", Field, 0}, + {"SockaddrDatalink.Type", Field, 0}, + {"SockaddrGen", Type, 0}, + {"SockaddrInet4", Type, 0}, + {"SockaddrInet4.Addr", Field, 0}, + {"SockaddrInet4.Port", Field, 0}, + {"SockaddrInet6", Type, 0}, + {"SockaddrInet6.Addr", Field, 0}, + {"SockaddrInet6.Port", Field, 0}, + {"SockaddrInet6.ZoneId", Field, 0}, + {"SockaddrLinklayer", Type, 0}, + {"SockaddrLinklayer.Addr", Field, 0}, + {"SockaddrLinklayer.Halen", Field, 0}, + {"SockaddrLinklayer.Hatype", Field, 0}, + {"SockaddrLinklayer.Ifindex", Field, 0}, + {"SockaddrLinklayer.Pkttype", Field, 0}, + {"SockaddrLinklayer.Protocol", Field, 0}, + {"SockaddrNetlink", Type, 0}, + {"SockaddrNetlink.Family", Field, 0}, + {"SockaddrNetlink.Groups", Field, 0}, + {"SockaddrNetlink.Pad", Field, 0}, + {"SockaddrNetlink.Pid", Field, 0}, + {"SockaddrUnix", Type, 0}, + {"SockaddrUnix.Name", Field, 0}, + {"Socket", Func, 0}, + {"SocketControlMessage", Type, 0}, + {"SocketControlMessage.Data", Field, 0}, + {"SocketControlMessage.Header", Field, 0}, + {"SocketDisableIPv6", Var, 0}, + {"Socketpair", Func, 0}, + {"Splice", Func, 0}, + {"StartProcess", Func, 0}, + {"StartupInfo", Type, 0}, + {"StartupInfo.Cb", Field, 0}, + {"StartupInfo.Desktop", Field, 0}, + {"StartupInfo.FillAttribute", Field, 0}, + {"StartupInfo.Flags", Field, 0}, + {"StartupInfo.ShowWindow", Field, 0}, + {"StartupInfo.StdErr", Field, 0}, + {"StartupInfo.StdInput", Field, 0}, + {"StartupInfo.StdOutput", Field, 0}, + {"StartupInfo.Title", Field, 0}, + {"StartupInfo.X", Field, 0}, + {"StartupInfo.XCountChars", Field, 0}, + {"StartupInfo.XSize", Field, 0}, + {"StartupInfo.Y", Field, 0}, + {"StartupInfo.YCountChars", Field, 0}, + {"StartupInfo.YSize", Field, 0}, + {"Stat", Func, 0}, + {"Stat_t", Type, 0}, + {"Stat_t.Atim", Field, 0}, + {"Stat_t.Atim_ext", Field, 12}, + {"Stat_t.Atimespec", Field, 0}, + {"Stat_t.Birthtimespec", Field, 0}, + {"Stat_t.Blksize", Field, 0}, + {"Stat_t.Blocks", Field, 0}, + {"Stat_t.Btim_ext", Field, 12}, + {"Stat_t.Ctim", Field, 0}, + {"Stat_t.Ctim_ext", Field, 12}, + {"Stat_t.Ctimespec", Field, 0}, + {"Stat_t.Dev", Field, 0}, + {"Stat_t.Flags", Field, 0}, + {"Stat_t.Gen", Field, 0}, + {"Stat_t.Gid", Field, 0}, + {"Stat_t.Ino", Field, 0}, + {"Stat_t.Lspare", Field, 0}, + {"Stat_t.Lspare0", Field, 2}, + {"Stat_t.Lspare1", Field, 2}, + {"Stat_t.Mode", Field, 0}, + {"Stat_t.Mtim", Field, 0}, + {"Stat_t.Mtim_ext", Field, 12}, + {"Stat_t.Mtimespec", Field, 0}, + {"Stat_t.Nlink", Field, 0}, + {"Stat_t.Pad_cgo_0", Field, 0}, + {"Stat_t.Pad_cgo_1", Field, 0}, + {"Stat_t.Pad_cgo_2", Field, 0}, + {"Stat_t.Padding0", Field, 12}, + {"Stat_t.Padding1", Field, 12}, + {"Stat_t.Qspare", Field, 0}, + {"Stat_t.Rdev", Field, 0}, + {"Stat_t.Size", Field, 0}, + {"Stat_t.Spare", Field, 2}, + {"Stat_t.Uid", Field, 0}, + {"Stat_t.X__pad0", Field, 0}, + {"Stat_t.X__pad1", Field, 0}, + {"Stat_t.X__pad2", Field, 0}, + {"Stat_t.X__st_birthtim", Field, 2}, + {"Stat_t.X__st_ino", Field, 0}, + {"Stat_t.X__unused", Field, 0}, + {"Statfs", Func, 0}, + {"Statfs_t", Type, 0}, + {"Statfs_t.Asyncreads", Field, 0}, + {"Statfs_t.Asyncwrites", Field, 0}, + {"Statfs_t.Bavail", Field, 0}, + {"Statfs_t.Bfree", Field, 0}, + {"Statfs_t.Blocks", Field, 0}, + {"Statfs_t.Bsize", Field, 0}, + {"Statfs_t.Charspare", Field, 0}, + {"Statfs_t.F_asyncreads", Field, 2}, + {"Statfs_t.F_asyncwrites", Field, 2}, + {"Statfs_t.F_bavail", Field, 2}, + {"Statfs_t.F_bfree", Field, 2}, + {"Statfs_t.F_blocks", Field, 2}, + {"Statfs_t.F_bsize", Field, 2}, + {"Statfs_t.F_ctime", Field, 2}, + {"Statfs_t.F_favail", Field, 2}, + {"Statfs_t.F_ffree", Field, 2}, + {"Statfs_t.F_files", Field, 2}, + {"Statfs_t.F_flags", Field, 2}, + {"Statfs_t.F_fsid", Field, 2}, + {"Statfs_t.F_fstypename", Field, 2}, + {"Statfs_t.F_iosize", Field, 2}, + {"Statfs_t.F_mntfromname", Field, 2}, + {"Statfs_t.F_mntfromspec", Field, 3}, + {"Statfs_t.F_mntonname", Field, 2}, + {"Statfs_t.F_namemax", Field, 2}, + {"Statfs_t.F_owner", Field, 2}, + {"Statfs_t.F_spare", Field, 2}, + {"Statfs_t.F_syncreads", Field, 2}, + {"Statfs_t.F_syncwrites", Field, 2}, + {"Statfs_t.Ffree", Field, 0}, + {"Statfs_t.Files", Field, 0}, + {"Statfs_t.Flags", Field, 0}, + {"Statfs_t.Frsize", Field, 0}, + {"Statfs_t.Fsid", Field, 0}, + {"Statfs_t.Fssubtype", Field, 0}, + {"Statfs_t.Fstypename", Field, 0}, + {"Statfs_t.Iosize", Field, 0}, + {"Statfs_t.Mntfromname", Field, 0}, + {"Statfs_t.Mntonname", Field, 0}, + {"Statfs_t.Mount_info", Field, 2}, + {"Statfs_t.Namelen", Field, 0}, + {"Statfs_t.Namemax", Field, 0}, + {"Statfs_t.Owner", Field, 0}, + {"Statfs_t.Pad_cgo_0", Field, 0}, + {"Statfs_t.Pad_cgo_1", Field, 2}, + {"Statfs_t.Reserved", Field, 0}, + {"Statfs_t.Spare", Field, 0}, + {"Statfs_t.Syncreads", Field, 0}, + {"Statfs_t.Syncwrites", Field, 0}, + {"Statfs_t.Type", Field, 0}, + {"Statfs_t.Version", Field, 0}, + {"Stderr", Var, 0}, + {"Stdin", Var, 0}, + {"Stdout", Var, 0}, + {"StringBytePtr", Func, 0}, + {"StringByteSlice", Func, 0}, + {"StringSlicePtr", Func, 0}, + {"StringToSid", Func, 0}, + {"StringToUTF16", Func, 0}, + {"StringToUTF16Ptr", Func, 0}, + {"Symlink", Func, 0}, + {"Sync", Func, 0}, + {"SyncFileRange", Func, 0}, + {"SysProcAttr", Type, 0}, + {"SysProcAttr.AdditionalInheritedHandles", Field, 17}, + {"SysProcAttr.AmbientCaps", Field, 9}, + {"SysProcAttr.CgroupFD", Field, 20}, + {"SysProcAttr.Chroot", Field, 0}, + {"SysProcAttr.Cloneflags", Field, 2}, + {"SysProcAttr.CmdLine", Field, 0}, + {"SysProcAttr.CreationFlags", Field, 1}, + {"SysProcAttr.Credential", Field, 0}, + {"SysProcAttr.Ctty", Field, 1}, + {"SysProcAttr.Foreground", Field, 5}, + {"SysProcAttr.GidMappings", Field, 4}, + {"SysProcAttr.GidMappingsEnableSetgroups", Field, 5}, + {"SysProcAttr.HideWindow", Field, 0}, + {"SysProcAttr.Jail", Field, 21}, + {"SysProcAttr.NoInheritHandles", Field, 16}, + {"SysProcAttr.Noctty", Field, 0}, + {"SysProcAttr.ParentProcess", Field, 17}, + {"SysProcAttr.Pdeathsig", Field, 0}, + {"SysProcAttr.Pgid", Field, 5}, + {"SysProcAttr.PidFD", Field, 22}, + {"SysProcAttr.ProcessAttributes", Field, 13}, + {"SysProcAttr.Ptrace", Field, 0}, + {"SysProcAttr.Setctty", Field, 0}, + {"SysProcAttr.Setpgid", Field, 0}, + {"SysProcAttr.Setsid", Field, 0}, + {"SysProcAttr.ThreadAttributes", Field, 13}, + {"SysProcAttr.Token", Field, 10}, + {"SysProcAttr.UidMappings", Field, 4}, + {"SysProcAttr.Unshareflags", Field, 7}, + {"SysProcAttr.UseCgroupFD", Field, 20}, + {"SysProcIDMap", Type, 4}, + {"SysProcIDMap.ContainerID", Field, 4}, + {"SysProcIDMap.HostID", Field, 4}, + {"SysProcIDMap.Size", Field, 4}, + {"Syscall", Func, 0}, + {"Syscall12", Func, 0}, + {"Syscall15", Func, 0}, + {"Syscall18", Func, 12}, + {"Syscall6", Func, 0}, + {"Syscall9", Func, 0}, + {"SyscallN", Func, 18}, + {"Sysctl", Func, 0}, + {"SysctlUint32", Func, 0}, + {"Sysctlnode", Type, 2}, + {"Sysctlnode.Flags", Field, 2}, + {"Sysctlnode.Name", Field, 2}, + {"Sysctlnode.Num", Field, 2}, + {"Sysctlnode.Un", Field, 2}, + {"Sysctlnode.Ver", Field, 2}, + {"Sysctlnode.X__rsvd", Field, 2}, + {"Sysctlnode.X_sysctl_desc", Field, 2}, + {"Sysctlnode.X_sysctl_func", Field, 2}, + {"Sysctlnode.X_sysctl_parent", Field, 2}, + {"Sysctlnode.X_sysctl_size", Field, 2}, + {"Sysinfo", Func, 0}, + {"Sysinfo_t", Type, 0}, + {"Sysinfo_t.Bufferram", Field, 0}, + {"Sysinfo_t.Freehigh", Field, 0}, + {"Sysinfo_t.Freeram", Field, 0}, + {"Sysinfo_t.Freeswap", Field, 0}, + {"Sysinfo_t.Loads", Field, 0}, + {"Sysinfo_t.Pad", Field, 0}, + {"Sysinfo_t.Pad_cgo_0", Field, 0}, + {"Sysinfo_t.Pad_cgo_1", Field, 0}, + {"Sysinfo_t.Procs", Field, 0}, + {"Sysinfo_t.Sharedram", Field, 0}, + {"Sysinfo_t.Totalhigh", Field, 0}, + {"Sysinfo_t.Totalram", Field, 0}, + {"Sysinfo_t.Totalswap", Field, 0}, + {"Sysinfo_t.Unit", Field, 0}, + {"Sysinfo_t.Uptime", Field, 0}, + {"Sysinfo_t.X_f", Field, 0}, + {"Systemtime", Type, 0}, + {"Systemtime.Day", Field, 0}, + {"Systemtime.DayOfWeek", Field, 0}, + {"Systemtime.Hour", Field, 0}, + {"Systemtime.Milliseconds", Field, 0}, + {"Systemtime.Minute", Field, 0}, + {"Systemtime.Month", Field, 0}, + {"Systemtime.Second", Field, 0}, + {"Systemtime.Year", Field, 0}, + {"TCGETS", Const, 0}, + {"TCIFLUSH", Const, 1}, + {"TCIOFLUSH", Const, 1}, + {"TCOFLUSH", Const, 1}, + {"TCPInfo", Type, 1}, + {"TCPInfo.Advmss", Field, 1}, + {"TCPInfo.Ato", Field, 1}, + {"TCPInfo.Backoff", Field, 1}, + {"TCPInfo.Ca_state", Field, 1}, + {"TCPInfo.Fackets", Field, 1}, + {"TCPInfo.Last_ack_recv", Field, 1}, + {"TCPInfo.Last_ack_sent", Field, 1}, + {"TCPInfo.Last_data_recv", Field, 1}, + {"TCPInfo.Last_data_sent", Field, 1}, + {"TCPInfo.Lost", Field, 1}, + {"TCPInfo.Options", Field, 1}, + {"TCPInfo.Pad_cgo_0", Field, 1}, + {"TCPInfo.Pmtu", Field, 1}, + {"TCPInfo.Probes", Field, 1}, + {"TCPInfo.Rcv_mss", Field, 1}, + {"TCPInfo.Rcv_rtt", Field, 1}, + {"TCPInfo.Rcv_space", Field, 1}, + {"TCPInfo.Rcv_ssthresh", Field, 1}, + {"TCPInfo.Reordering", Field, 1}, + {"TCPInfo.Retrans", Field, 1}, + {"TCPInfo.Retransmits", Field, 1}, + {"TCPInfo.Rto", Field, 1}, + {"TCPInfo.Rtt", Field, 1}, + {"TCPInfo.Rttvar", Field, 1}, + {"TCPInfo.Sacked", Field, 1}, + {"TCPInfo.Snd_cwnd", Field, 1}, + {"TCPInfo.Snd_mss", Field, 1}, + {"TCPInfo.Snd_ssthresh", Field, 1}, + {"TCPInfo.State", Field, 1}, + {"TCPInfo.Total_retrans", Field, 1}, + {"TCPInfo.Unacked", Field, 1}, + {"TCPKeepalive", Type, 3}, + {"TCPKeepalive.Interval", Field, 3}, + {"TCPKeepalive.OnOff", Field, 3}, + {"TCPKeepalive.Time", Field, 3}, + {"TCP_CA_NAME_MAX", Const, 0}, + {"TCP_CONGCTL", Const, 1}, + {"TCP_CONGESTION", Const, 0}, + {"TCP_CONNECTIONTIMEOUT", Const, 0}, + {"TCP_CORK", Const, 0}, + {"TCP_DEFER_ACCEPT", Const, 0}, + {"TCP_ENABLE_ECN", Const, 16}, + {"TCP_INFO", Const, 0}, + {"TCP_KEEPALIVE", Const, 0}, + {"TCP_KEEPCNT", Const, 0}, + {"TCP_KEEPIDLE", Const, 0}, + {"TCP_KEEPINIT", Const, 1}, + {"TCP_KEEPINTVL", Const, 0}, + {"TCP_LINGER2", Const, 0}, + {"TCP_MAXBURST", Const, 0}, + {"TCP_MAXHLEN", Const, 0}, + {"TCP_MAXOLEN", Const, 0}, + {"TCP_MAXSEG", Const, 0}, + {"TCP_MAXWIN", Const, 0}, + {"TCP_MAX_SACK", Const, 0}, + {"TCP_MAX_WINSHIFT", Const, 0}, + {"TCP_MD5SIG", Const, 0}, + {"TCP_MD5SIG_MAXKEYLEN", Const, 0}, + {"TCP_MINMSS", Const, 0}, + {"TCP_MINMSSOVERLOAD", Const, 0}, + {"TCP_MSS", Const, 0}, + {"TCP_NODELAY", Const, 0}, + {"TCP_NOOPT", Const, 0}, + {"TCP_NOPUSH", Const, 0}, + {"TCP_NOTSENT_LOWAT", Const, 16}, + {"TCP_NSTATES", Const, 1}, + {"TCP_QUICKACK", Const, 0}, + {"TCP_RXT_CONNDROPTIME", Const, 0}, + {"TCP_RXT_FINDROP", Const, 0}, + {"TCP_SACK_ENABLE", Const, 1}, + {"TCP_SENDMOREACKS", Const, 16}, + {"TCP_SYNCNT", Const, 0}, + {"TCP_VENDOR", Const, 3}, + {"TCP_WINDOW_CLAMP", Const, 0}, + {"TCSAFLUSH", Const, 1}, + {"TCSETS", Const, 0}, + {"TF_DISCONNECT", Const, 0}, + {"TF_REUSE_SOCKET", Const, 0}, + {"TF_USE_DEFAULT_WORKER", Const, 0}, + {"TF_USE_KERNEL_APC", Const, 0}, + {"TF_USE_SYSTEM_THREAD", Const, 0}, + {"TF_WRITE_BEHIND", Const, 0}, + {"TH32CS_INHERIT", Const, 4}, + {"TH32CS_SNAPALL", Const, 4}, + {"TH32CS_SNAPHEAPLIST", Const, 4}, + {"TH32CS_SNAPMODULE", Const, 4}, + {"TH32CS_SNAPMODULE32", Const, 4}, + {"TH32CS_SNAPPROCESS", Const, 4}, + {"TH32CS_SNAPTHREAD", Const, 4}, + {"TIME_ZONE_ID_DAYLIGHT", Const, 0}, + {"TIME_ZONE_ID_STANDARD", Const, 0}, + {"TIME_ZONE_ID_UNKNOWN", Const, 0}, + {"TIOCCBRK", Const, 0}, + {"TIOCCDTR", Const, 0}, + {"TIOCCONS", Const, 0}, + {"TIOCDCDTIMESTAMP", Const, 0}, + {"TIOCDRAIN", Const, 0}, + {"TIOCDSIMICROCODE", Const, 0}, + {"TIOCEXCL", Const, 0}, + {"TIOCEXT", Const, 0}, + {"TIOCFLAG_CDTRCTS", Const, 1}, + {"TIOCFLAG_CLOCAL", Const, 1}, + {"TIOCFLAG_CRTSCTS", Const, 1}, + {"TIOCFLAG_MDMBUF", Const, 1}, + {"TIOCFLAG_PPS", Const, 1}, + {"TIOCFLAG_SOFTCAR", Const, 1}, + {"TIOCFLUSH", Const, 0}, + {"TIOCGDEV", Const, 0}, + {"TIOCGDRAINWAIT", Const, 0}, + {"TIOCGETA", Const, 0}, + {"TIOCGETD", Const, 0}, + {"TIOCGFLAGS", Const, 1}, + {"TIOCGICOUNT", Const, 0}, + {"TIOCGLCKTRMIOS", Const, 0}, + {"TIOCGLINED", Const, 1}, + {"TIOCGPGRP", Const, 0}, + {"TIOCGPTN", Const, 0}, + {"TIOCGQSIZE", Const, 1}, + {"TIOCGRANTPT", Const, 1}, + {"TIOCGRS485", Const, 0}, + {"TIOCGSERIAL", Const, 0}, + {"TIOCGSID", Const, 0}, + {"TIOCGSIZE", Const, 1}, + {"TIOCGSOFTCAR", Const, 0}, + {"TIOCGTSTAMP", Const, 1}, + {"TIOCGWINSZ", Const, 0}, + {"TIOCINQ", Const, 0}, + {"TIOCIXOFF", Const, 0}, + {"TIOCIXON", Const, 0}, + {"TIOCLINUX", Const, 0}, + {"TIOCMBIC", Const, 0}, + {"TIOCMBIS", Const, 0}, + {"TIOCMGDTRWAIT", Const, 0}, + {"TIOCMGET", Const, 0}, + {"TIOCMIWAIT", Const, 0}, + {"TIOCMODG", Const, 0}, + {"TIOCMODS", Const, 0}, + {"TIOCMSDTRWAIT", Const, 0}, + {"TIOCMSET", Const, 0}, + {"TIOCM_CAR", Const, 0}, + {"TIOCM_CD", Const, 0}, + {"TIOCM_CTS", Const, 0}, + {"TIOCM_DCD", Const, 0}, + {"TIOCM_DSR", Const, 0}, + {"TIOCM_DTR", Const, 0}, + {"TIOCM_LE", Const, 0}, + {"TIOCM_RI", Const, 0}, + {"TIOCM_RNG", Const, 0}, + {"TIOCM_RTS", Const, 0}, + {"TIOCM_SR", Const, 0}, + {"TIOCM_ST", Const, 0}, + {"TIOCNOTTY", Const, 0}, + {"TIOCNXCL", Const, 0}, + {"TIOCOUTQ", Const, 0}, + {"TIOCPKT", Const, 0}, + {"TIOCPKT_DATA", Const, 0}, + {"TIOCPKT_DOSTOP", Const, 0}, + {"TIOCPKT_FLUSHREAD", Const, 0}, + {"TIOCPKT_FLUSHWRITE", Const, 0}, + {"TIOCPKT_IOCTL", Const, 0}, + {"TIOCPKT_NOSTOP", Const, 0}, + {"TIOCPKT_START", Const, 0}, + {"TIOCPKT_STOP", Const, 0}, + {"TIOCPTMASTER", Const, 0}, + {"TIOCPTMGET", Const, 1}, + {"TIOCPTSNAME", Const, 1}, + {"TIOCPTYGNAME", Const, 0}, + {"TIOCPTYGRANT", Const, 0}, + {"TIOCPTYUNLK", Const, 0}, + {"TIOCRCVFRAME", Const, 1}, + {"TIOCREMOTE", Const, 0}, + {"TIOCSBRK", Const, 0}, + {"TIOCSCONS", Const, 0}, + {"TIOCSCTTY", Const, 0}, + {"TIOCSDRAINWAIT", Const, 0}, + {"TIOCSDTR", Const, 0}, + {"TIOCSERCONFIG", Const, 0}, + {"TIOCSERGETLSR", Const, 0}, + {"TIOCSERGETMULTI", Const, 0}, + {"TIOCSERGSTRUCT", Const, 0}, + {"TIOCSERGWILD", Const, 0}, + {"TIOCSERSETMULTI", Const, 0}, + {"TIOCSERSWILD", Const, 0}, + {"TIOCSER_TEMT", Const, 0}, + {"TIOCSETA", Const, 0}, + {"TIOCSETAF", Const, 0}, + {"TIOCSETAW", Const, 0}, + {"TIOCSETD", Const, 0}, + {"TIOCSFLAGS", Const, 1}, + {"TIOCSIG", Const, 0}, + {"TIOCSLCKTRMIOS", Const, 0}, + {"TIOCSLINED", Const, 1}, + {"TIOCSPGRP", Const, 0}, + {"TIOCSPTLCK", Const, 0}, + {"TIOCSQSIZE", Const, 1}, + {"TIOCSRS485", Const, 0}, + {"TIOCSSERIAL", Const, 0}, + {"TIOCSSIZE", Const, 1}, + {"TIOCSSOFTCAR", Const, 0}, + {"TIOCSTART", Const, 0}, + {"TIOCSTAT", Const, 0}, + {"TIOCSTI", Const, 0}, + {"TIOCSTOP", Const, 0}, + {"TIOCSTSTAMP", Const, 1}, + {"TIOCSWINSZ", Const, 0}, + {"TIOCTIMESTAMP", Const, 0}, + {"TIOCUCNTL", Const, 0}, + {"TIOCVHANGUP", Const, 0}, + {"TIOCXMTFRAME", Const, 1}, + {"TOKEN_ADJUST_DEFAULT", Const, 0}, + {"TOKEN_ADJUST_GROUPS", Const, 0}, + {"TOKEN_ADJUST_PRIVILEGES", Const, 0}, + {"TOKEN_ADJUST_SESSIONID", Const, 11}, + {"TOKEN_ALL_ACCESS", Const, 0}, + {"TOKEN_ASSIGN_PRIMARY", Const, 0}, + {"TOKEN_DUPLICATE", Const, 0}, + {"TOKEN_EXECUTE", Const, 0}, + {"TOKEN_IMPERSONATE", Const, 0}, + {"TOKEN_QUERY", Const, 0}, + {"TOKEN_QUERY_SOURCE", Const, 0}, + {"TOKEN_READ", Const, 0}, + {"TOKEN_WRITE", Const, 0}, + {"TOSTOP", Const, 0}, + {"TRUNCATE_EXISTING", Const, 0}, + {"TUNATTACHFILTER", Const, 0}, + {"TUNDETACHFILTER", Const, 0}, + {"TUNGETFEATURES", Const, 0}, + {"TUNGETIFF", Const, 0}, + {"TUNGETSNDBUF", Const, 0}, + {"TUNGETVNETHDRSZ", Const, 0}, + {"TUNSETDEBUG", Const, 0}, + {"TUNSETGROUP", Const, 0}, + {"TUNSETIFF", Const, 0}, + {"TUNSETLINK", Const, 0}, + {"TUNSETNOCSUM", Const, 0}, + {"TUNSETOFFLOAD", Const, 0}, + {"TUNSETOWNER", Const, 0}, + {"TUNSETPERSIST", Const, 0}, + {"TUNSETSNDBUF", Const, 0}, + {"TUNSETTXFILTER", Const, 0}, + {"TUNSETVNETHDRSZ", Const, 0}, + {"Tee", Func, 0}, + {"TerminateProcess", Func, 0}, + {"Termios", Type, 0}, + {"Termios.Cc", Field, 0}, + {"Termios.Cflag", Field, 0}, + {"Termios.Iflag", Field, 0}, + {"Termios.Ispeed", Field, 0}, + {"Termios.Lflag", Field, 0}, + {"Termios.Line", Field, 0}, + {"Termios.Oflag", Field, 0}, + {"Termios.Ospeed", Field, 0}, + {"Termios.Pad_cgo_0", Field, 0}, + {"Tgkill", Func, 0}, + {"Time", Func, 0}, + {"Time_t", Type, 0}, + {"Times", Func, 0}, + {"Timespec", Type, 0}, + {"Timespec.Nsec", Field, 0}, + {"Timespec.Pad_cgo_0", Field, 2}, + {"Timespec.Sec", Field, 0}, + {"TimespecToNsec", Func, 0}, + {"Timeval", Type, 0}, + {"Timeval.Pad_cgo_0", Field, 0}, + {"Timeval.Sec", Field, 0}, + {"Timeval.Usec", Field, 0}, + {"Timeval32", Type, 0}, + {"Timeval32.Sec", Field, 0}, + {"Timeval32.Usec", Field, 0}, + {"TimevalToNsec", Func, 0}, + {"Timex", Type, 0}, + {"Timex.Calcnt", Field, 0}, + {"Timex.Constant", Field, 0}, + {"Timex.Errcnt", Field, 0}, + {"Timex.Esterror", Field, 0}, + {"Timex.Freq", Field, 0}, + {"Timex.Jitcnt", Field, 0}, + {"Timex.Jitter", Field, 0}, + {"Timex.Maxerror", Field, 0}, + {"Timex.Modes", Field, 0}, + {"Timex.Offset", Field, 0}, + {"Timex.Pad_cgo_0", Field, 0}, + {"Timex.Pad_cgo_1", Field, 0}, + {"Timex.Pad_cgo_2", Field, 0}, + {"Timex.Pad_cgo_3", Field, 0}, + {"Timex.Ppsfreq", Field, 0}, + {"Timex.Precision", Field, 0}, + {"Timex.Shift", Field, 0}, + {"Timex.Stabil", Field, 0}, + {"Timex.Status", Field, 0}, + {"Timex.Stbcnt", Field, 0}, + {"Timex.Tai", Field, 0}, + {"Timex.Tick", Field, 0}, + {"Timex.Time", Field, 0}, + {"Timex.Tolerance", Field, 0}, + {"Timezoneinformation", Type, 0}, + {"Timezoneinformation.Bias", Field, 0}, + {"Timezoneinformation.DaylightBias", Field, 0}, + {"Timezoneinformation.DaylightDate", Field, 0}, + {"Timezoneinformation.DaylightName", Field, 0}, + {"Timezoneinformation.StandardBias", Field, 0}, + {"Timezoneinformation.StandardDate", Field, 0}, + {"Timezoneinformation.StandardName", Field, 0}, + {"Tms", Type, 0}, + {"Tms.Cstime", Field, 0}, + {"Tms.Cutime", Field, 0}, + {"Tms.Stime", Field, 0}, + {"Tms.Utime", Field, 0}, + {"Token", Type, 0}, + {"TokenAccessInformation", Const, 0}, + {"TokenAuditPolicy", Const, 0}, + {"TokenDefaultDacl", Const, 0}, + {"TokenElevation", Const, 0}, + {"TokenElevationType", Const, 0}, + {"TokenGroups", Const, 0}, + {"TokenGroupsAndPrivileges", Const, 0}, + {"TokenHasRestrictions", Const, 0}, + {"TokenImpersonationLevel", Const, 0}, + {"TokenIntegrityLevel", Const, 0}, + {"TokenLinkedToken", Const, 0}, + {"TokenLogonSid", Const, 0}, + {"TokenMandatoryPolicy", Const, 0}, + {"TokenOrigin", Const, 0}, + {"TokenOwner", Const, 0}, + {"TokenPrimaryGroup", Const, 0}, + {"TokenPrivileges", Const, 0}, + {"TokenRestrictedSids", Const, 0}, + {"TokenSandBoxInert", Const, 0}, + {"TokenSessionId", Const, 0}, + {"TokenSessionReference", Const, 0}, + {"TokenSource", Const, 0}, + {"TokenStatistics", Const, 0}, + {"TokenType", Const, 0}, + {"TokenUIAccess", Const, 0}, + {"TokenUser", Const, 0}, + {"TokenVirtualizationAllowed", Const, 0}, + {"TokenVirtualizationEnabled", Const, 0}, + {"Tokenprimarygroup", Type, 0}, + {"Tokenprimarygroup.PrimaryGroup", Field, 0}, + {"Tokenuser", Type, 0}, + {"Tokenuser.User", Field, 0}, + {"TranslateAccountName", Func, 0}, + {"TranslateName", Func, 0}, + {"TransmitFile", Func, 0}, + {"TransmitFileBuffers", Type, 0}, + {"TransmitFileBuffers.Head", Field, 0}, + {"TransmitFileBuffers.HeadLength", Field, 0}, + {"TransmitFileBuffers.Tail", Field, 0}, + {"TransmitFileBuffers.TailLength", Field, 0}, + {"Truncate", Func, 0}, + {"UNIX_PATH_MAX", Const, 12}, + {"USAGE_MATCH_TYPE_AND", Const, 0}, + {"USAGE_MATCH_TYPE_OR", Const, 0}, + {"UTF16FromString", Func, 1}, + {"UTF16PtrFromString", Func, 1}, + {"UTF16ToString", Func, 0}, + {"Ucred", Type, 0}, + {"Ucred.Gid", Field, 0}, + {"Ucred.Pid", Field, 0}, + {"Ucred.Uid", Field, 0}, + {"Umask", Func, 0}, + {"Uname", Func, 0}, + {"Undelete", Func, 0}, + {"UnixCredentials", Func, 0}, + {"UnixRights", Func, 0}, + {"Unlink", Func, 0}, + {"Unlinkat", Func, 0}, + {"UnmapViewOfFile", Func, 0}, + {"Unmount", Func, 0}, + {"Unsetenv", Func, 4}, + {"Unshare", Func, 0}, + {"UserInfo10", Type, 0}, + {"UserInfo10.Comment", Field, 0}, + {"UserInfo10.FullName", Field, 0}, + {"UserInfo10.Name", Field, 0}, + {"UserInfo10.UsrComment", Field, 0}, + {"Ustat", Func, 0}, + {"Ustat_t", Type, 0}, + {"Ustat_t.Fname", Field, 0}, + {"Ustat_t.Fpack", Field, 0}, + {"Ustat_t.Pad_cgo_0", Field, 0}, + {"Ustat_t.Pad_cgo_1", Field, 0}, + {"Ustat_t.Tfree", Field, 0}, + {"Ustat_t.Tinode", Field, 0}, + {"Utimbuf", Type, 0}, + {"Utimbuf.Actime", Field, 0}, + {"Utimbuf.Modtime", Field, 0}, + {"Utime", Func, 0}, + {"Utimes", Func, 0}, + {"UtimesNano", Func, 1}, + {"Utsname", Type, 0}, + {"Utsname.Domainname", Field, 0}, + {"Utsname.Machine", Field, 0}, + {"Utsname.Nodename", Field, 0}, + {"Utsname.Release", Field, 0}, + {"Utsname.Sysname", Field, 0}, + {"Utsname.Version", Field, 0}, + {"VDISCARD", Const, 0}, + {"VDSUSP", Const, 1}, + {"VEOF", Const, 0}, + {"VEOL", Const, 0}, + {"VEOL2", Const, 0}, + {"VERASE", Const, 0}, + {"VERASE2", Const, 1}, + {"VINTR", Const, 0}, + {"VKILL", Const, 0}, + {"VLNEXT", Const, 0}, + {"VMIN", Const, 0}, + {"VQUIT", Const, 0}, + {"VREPRINT", Const, 0}, + {"VSTART", Const, 0}, + {"VSTATUS", Const, 1}, + {"VSTOP", Const, 0}, + {"VSUSP", Const, 0}, + {"VSWTC", Const, 0}, + {"VT0", Const, 1}, + {"VT1", Const, 1}, + {"VTDLY", Const, 1}, + {"VTIME", Const, 0}, + {"VWERASE", Const, 0}, + {"VirtualLock", Func, 0}, + {"VirtualUnlock", Func, 0}, + {"WAIT_ABANDONED", Const, 0}, + {"WAIT_FAILED", Const, 0}, + {"WAIT_OBJECT_0", Const, 0}, + {"WAIT_TIMEOUT", Const, 0}, + {"WALL", Const, 0}, + {"WALLSIG", Const, 1}, + {"WALTSIG", Const, 1}, + {"WCLONE", Const, 0}, + {"WCONTINUED", Const, 0}, + {"WCOREFLAG", Const, 0}, + {"WEXITED", Const, 0}, + {"WLINUXCLONE", Const, 0}, + {"WNOHANG", Const, 0}, + {"WNOTHREAD", Const, 0}, + {"WNOWAIT", Const, 0}, + {"WNOZOMBIE", Const, 1}, + {"WOPTSCHECKED", Const, 1}, + {"WORDSIZE", Const, 0}, + {"WSABuf", Type, 0}, + {"WSABuf.Buf", Field, 0}, + {"WSABuf.Len", Field, 0}, + {"WSACleanup", Func, 0}, + {"WSADESCRIPTION_LEN", Const, 0}, + {"WSAData", Type, 0}, + {"WSAData.Description", Field, 0}, + {"WSAData.HighVersion", Field, 0}, + {"WSAData.MaxSockets", Field, 0}, + {"WSAData.MaxUdpDg", Field, 0}, + {"WSAData.SystemStatus", Field, 0}, + {"WSAData.VendorInfo", Field, 0}, + {"WSAData.Version", Field, 0}, + {"WSAEACCES", Const, 2}, + {"WSAECONNABORTED", Const, 9}, + {"WSAECONNRESET", Const, 3}, + {"WSAEnumProtocols", Func, 2}, + {"WSAID_CONNECTEX", Var, 1}, + {"WSAIoctl", Func, 0}, + {"WSAPROTOCOL_LEN", Const, 2}, + {"WSAProtocolChain", Type, 2}, + {"WSAProtocolChain.ChainEntries", Field, 2}, + {"WSAProtocolChain.ChainLen", Field, 2}, + {"WSAProtocolInfo", Type, 2}, + {"WSAProtocolInfo.AddressFamily", Field, 2}, + {"WSAProtocolInfo.CatalogEntryId", Field, 2}, + {"WSAProtocolInfo.MaxSockAddr", Field, 2}, + {"WSAProtocolInfo.MessageSize", Field, 2}, + {"WSAProtocolInfo.MinSockAddr", Field, 2}, + {"WSAProtocolInfo.NetworkByteOrder", Field, 2}, + {"WSAProtocolInfo.Protocol", Field, 2}, + {"WSAProtocolInfo.ProtocolChain", Field, 2}, + {"WSAProtocolInfo.ProtocolMaxOffset", Field, 2}, + {"WSAProtocolInfo.ProtocolName", Field, 2}, + {"WSAProtocolInfo.ProviderFlags", Field, 2}, + {"WSAProtocolInfo.ProviderId", Field, 2}, + {"WSAProtocolInfo.ProviderReserved", Field, 2}, + {"WSAProtocolInfo.SecurityScheme", Field, 2}, + {"WSAProtocolInfo.ServiceFlags1", Field, 2}, + {"WSAProtocolInfo.ServiceFlags2", Field, 2}, + {"WSAProtocolInfo.ServiceFlags3", Field, 2}, + {"WSAProtocolInfo.ServiceFlags4", Field, 2}, + {"WSAProtocolInfo.SocketType", Field, 2}, + {"WSAProtocolInfo.Version", Field, 2}, + {"WSARecv", Func, 0}, + {"WSARecvFrom", Func, 0}, + {"WSASYS_STATUS_LEN", Const, 0}, + {"WSASend", Func, 0}, + {"WSASendTo", Func, 0}, + {"WSASendto", Func, 0}, + {"WSAStartup", Func, 0}, + {"WSTOPPED", Const, 0}, + {"WTRAPPED", Const, 1}, + {"WUNTRACED", Const, 0}, + {"Wait4", Func, 0}, + {"WaitForSingleObject", Func, 0}, + {"WaitStatus", Type, 0}, + {"WaitStatus.ExitCode", Field, 0}, + {"Win32FileAttributeData", Type, 0}, + {"Win32FileAttributeData.CreationTime", Field, 0}, + {"Win32FileAttributeData.FileAttributes", Field, 0}, + {"Win32FileAttributeData.FileSizeHigh", Field, 0}, + {"Win32FileAttributeData.FileSizeLow", Field, 0}, + {"Win32FileAttributeData.LastAccessTime", Field, 0}, + {"Win32FileAttributeData.LastWriteTime", Field, 0}, + {"Win32finddata", Type, 0}, + {"Win32finddata.AlternateFileName", Field, 0}, + {"Win32finddata.CreationTime", Field, 0}, + {"Win32finddata.FileAttributes", Field, 0}, + {"Win32finddata.FileName", Field, 0}, + {"Win32finddata.FileSizeHigh", Field, 0}, + {"Win32finddata.FileSizeLow", Field, 0}, + {"Win32finddata.LastAccessTime", Field, 0}, + {"Win32finddata.LastWriteTime", Field, 0}, + {"Win32finddata.Reserved0", Field, 0}, + {"Win32finddata.Reserved1", Field, 0}, + {"Write", Func, 0}, + {"WriteConsole", Func, 1}, + {"WriteFile", Func, 0}, + {"X509_ASN_ENCODING", Const, 0}, + {"XCASE", Const, 0}, + {"XP1_CONNECTIONLESS", Const, 2}, + {"XP1_CONNECT_DATA", Const, 2}, + {"XP1_DISCONNECT_DATA", Const, 2}, + {"XP1_EXPEDITED_DATA", Const, 2}, + {"XP1_GRACEFUL_CLOSE", Const, 2}, + {"XP1_GUARANTEED_DELIVERY", Const, 2}, + {"XP1_GUARANTEED_ORDER", Const, 2}, + {"XP1_IFS_HANDLES", Const, 2}, + {"XP1_MESSAGE_ORIENTED", Const, 2}, + {"XP1_MULTIPOINT_CONTROL_PLANE", Const, 2}, + {"XP1_MULTIPOINT_DATA_PLANE", Const, 2}, + {"XP1_PARTIAL_MESSAGE", Const, 2}, + {"XP1_PSEUDO_STREAM", Const, 2}, + {"XP1_QOS_SUPPORTED", Const, 2}, + {"XP1_SAN_SUPPORT_SDP", Const, 2}, + {"XP1_SUPPORT_BROADCAST", Const, 2}, + {"XP1_SUPPORT_MULTIPOINT", Const, 2}, + {"XP1_UNI_RECV", Const, 2}, + {"XP1_UNI_SEND", Const, 2}, + }, + "syscall/js": { + {"CopyBytesToGo", Func, 0}, + {"CopyBytesToJS", Func, 0}, + {"Error", Type, 0}, + {"Func", Type, 0}, + {"FuncOf", Func, 0}, + {"Global", Func, 0}, + {"Null", Func, 0}, + {"Type", Type, 0}, + {"TypeBoolean", Const, 0}, + {"TypeFunction", Const, 0}, + {"TypeNull", Const, 0}, + {"TypeNumber", Const, 0}, + {"TypeObject", Const, 0}, + {"TypeString", Const, 0}, + {"TypeSymbol", Const, 0}, + {"TypeUndefined", Const, 0}, + {"Undefined", Func, 0}, + {"Value", Type, 0}, + {"ValueError", Type, 0}, + {"ValueOf", Func, 0}, + }, + "testing": { + {"(*B).Cleanup", Method, 14}, + {"(*B).Elapsed", Method, 20}, + {"(*B).Error", Method, 0}, + {"(*B).Errorf", Method, 0}, + {"(*B).Fail", Method, 0}, + {"(*B).FailNow", Method, 0}, + {"(*B).Failed", Method, 0}, + {"(*B).Fatal", Method, 0}, + {"(*B).Fatalf", Method, 0}, + {"(*B).Helper", Method, 9}, + {"(*B).Log", Method, 0}, + {"(*B).Logf", Method, 0}, + {"(*B).Name", Method, 8}, + {"(*B).ReportAllocs", Method, 1}, + {"(*B).ReportMetric", Method, 13}, + {"(*B).ResetTimer", Method, 0}, + {"(*B).Run", Method, 7}, + {"(*B).RunParallel", Method, 3}, + {"(*B).SetBytes", Method, 0}, + {"(*B).SetParallelism", Method, 3}, + {"(*B).Setenv", Method, 17}, + {"(*B).Skip", Method, 1}, + {"(*B).SkipNow", Method, 1}, + {"(*B).Skipf", Method, 1}, + {"(*B).Skipped", Method, 1}, + {"(*B).StartTimer", Method, 0}, + {"(*B).StopTimer", Method, 0}, + {"(*B).TempDir", Method, 15}, + {"(*F).Add", Method, 18}, + {"(*F).Cleanup", Method, 18}, + {"(*F).Error", Method, 18}, + {"(*F).Errorf", Method, 18}, + {"(*F).Fail", Method, 18}, + {"(*F).FailNow", Method, 18}, + {"(*F).Failed", Method, 18}, + {"(*F).Fatal", Method, 18}, + {"(*F).Fatalf", Method, 18}, + {"(*F).Fuzz", Method, 18}, + {"(*F).Helper", Method, 18}, + {"(*F).Log", Method, 18}, + {"(*F).Logf", Method, 18}, + {"(*F).Name", Method, 18}, + {"(*F).Setenv", Method, 18}, + {"(*F).Skip", Method, 18}, + {"(*F).SkipNow", Method, 18}, + {"(*F).Skipf", Method, 18}, + {"(*F).Skipped", Method, 18}, + {"(*F).TempDir", Method, 18}, + {"(*M).Run", Method, 4}, + {"(*PB).Next", Method, 3}, + {"(*T).Cleanup", Method, 14}, + {"(*T).Deadline", Method, 15}, + {"(*T).Error", Method, 0}, + {"(*T).Errorf", Method, 0}, + {"(*T).Fail", Method, 0}, + {"(*T).FailNow", Method, 0}, + {"(*T).Failed", Method, 0}, + {"(*T).Fatal", Method, 0}, + {"(*T).Fatalf", Method, 0}, + {"(*T).Helper", Method, 9}, + {"(*T).Log", Method, 0}, + {"(*T).Logf", Method, 0}, + {"(*T).Name", Method, 8}, + {"(*T).Parallel", Method, 0}, + {"(*T).Run", Method, 7}, + {"(*T).Setenv", Method, 17}, + {"(*T).Skip", Method, 1}, + {"(*T).SkipNow", Method, 1}, + {"(*T).Skipf", Method, 1}, + {"(*T).Skipped", Method, 1}, + {"(*T).TempDir", Method, 15}, + {"(BenchmarkResult).AllocedBytesPerOp", Method, 1}, + {"(BenchmarkResult).AllocsPerOp", Method, 1}, + {"(BenchmarkResult).MemString", Method, 1}, + {"(BenchmarkResult).NsPerOp", Method, 0}, + {"(BenchmarkResult).String", Method, 0}, + {"AllocsPerRun", Func, 1}, + {"B", Type, 0}, + {"B.N", Field, 0}, + {"Benchmark", Func, 0}, + {"BenchmarkResult", Type, 0}, + {"BenchmarkResult.Bytes", Field, 0}, + {"BenchmarkResult.Extra", Field, 13}, + {"BenchmarkResult.MemAllocs", Field, 1}, + {"BenchmarkResult.MemBytes", Field, 1}, + {"BenchmarkResult.N", Field, 0}, + {"BenchmarkResult.T", Field, 0}, + {"Cover", Type, 2}, + {"Cover.Blocks", Field, 2}, + {"Cover.Counters", Field, 2}, + {"Cover.CoveredPackages", Field, 2}, + {"Cover.Mode", Field, 2}, + {"CoverBlock", Type, 2}, + {"CoverBlock.Col0", Field, 2}, + {"CoverBlock.Col1", Field, 2}, + {"CoverBlock.Line0", Field, 2}, + {"CoverBlock.Line1", Field, 2}, + {"CoverBlock.Stmts", Field, 2}, + {"CoverMode", Func, 8}, + {"Coverage", Func, 4}, + {"F", Type, 18}, + {"Init", Func, 13}, + {"InternalBenchmark", Type, 0}, + {"InternalBenchmark.F", Field, 0}, + {"InternalBenchmark.Name", Field, 0}, + {"InternalExample", Type, 0}, + {"InternalExample.F", Field, 0}, + {"InternalExample.Name", Field, 0}, + {"InternalExample.Output", Field, 0}, + {"InternalExample.Unordered", Field, 7}, + {"InternalFuzzTarget", Type, 18}, + {"InternalFuzzTarget.Fn", Field, 18}, + {"InternalFuzzTarget.Name", Field, 18}, + {"InternalTest", Type, 0}, + {"InternalTest.F", Field, 0}, + {"InternalTest.Name", Field, 0}, + {"M", Type, 4}, + {"Main", Func, 0}, + {"MainStart", Func, 4}, + {"PB", Type, 3}, + {"RegisterCover", Func, 2}, + {"RunBenchmarks", Func, 0}, + {"RunExamples", Func, 0}, + {"RunTests", Func, 0}, + {"Short", Func, 0}, + {"T", Type, 0}, + {"TB", Type, 2}, + {"Testing", Func, 21}, + {"Verbose", Func, 1}, + }, + "testing/fstest": { + {"(MapFS).Glob", Method, 16}, + {"(MapFS).Open", Method, 16}, + {"(MapFS).ReadDir", Method, 16}, + {"(MapFS).ReadFile", Method, 16}, + {"(MapFS).Stat", Method, 16}, + {"(MapFS).Sub", Method, 16}, + {"MapFS", Type, 16}, + {"MapFile", Type, 16}, + {"MapFile.Data", Field, 16}, + {"MapFile.ModTime", Field, 16}, + {"MapFile.Mode", Field, 16}, + {"MapFile.Sys", Field, 16}, + {"TestFS", Func, 16}, + }, + "testing/iotest": { + {"DataErrReader", Func, 0}, + {"ErrReader", Func, 16}, + {"ErrTimeout", Var, 0}, + {"HalfReader", Func, 0}, + {"NewReadLogger", Func, 0}, + {"NewWriteLogger", Func, 0}, + {"OneByteReader", Func, 0}, + {"TestReader", Func, 16}, + {"TimeoutReader", Func, 0}, + {"TruncateWriter", Func, 0}, + }, + "testing/quick": { + {"(*CheckEqualError).Error", Method, 0}, + {"(*CheckError).Error", Method, 0}, + {"(SetupError).Error", Method, 0}, + {"Check", Func, 0}, + {"CheckEqual", Func, 0}, + {"CheckEqualError", Type, 0}, + {"CheckEqualError.CheckError", Field, 0}, + {"CheckEqualError.Out1", Field, 0}, + {"CheckEqualError.Out2", Field, 0}, + {"CheckError", Type, 0}, + {"CheckError.Count", Field, 0}, + {"CheckError.In", Field, 0}, + {"Config", Type, 0}, + {"Config.MaxCount", Field, 0}, + {"Config.MaxCountScale", Field, 0}, + {"Config.Rand", Field, 0}, + {"Config.Values", Field, 0}, + {"Generator", Type, 0}, + {"SetupError", Type, 0}, + {"Value", Func, 0}, + }, + "testing/slogtest": { + {"Run", Func, 22}, + {"TestHandler", Func, 21}, + }, + "text/scanner": { + {"(*Position).IsValid", Method, 0}, + {"(*Scanner).Init", Method, 0}, + {"(*Scanner).IsValid", Method, 0}, + {"(*Scanner).Next", Method, 0}, + {"(*Scanner).Peek", Method, 0}, + {"(*Scanner).Pos", Method, 0}, + {"(*Scanner).Scan", Method, 0}, + {"(*Scanner).TokenText", Method, 0}, + {"(Position).String", Method, 0}, + {"(Scanner).String", Method, 0}, + {"Char", Const, 0}, + {"Comment", Const, 0}, + {"EOF", Const, 0}, + {"Float", Const, 0}, + {"GoTokens", Const, 0}, + {"GoWhitespace", Const, 0}, + {"Ident", Const, 0}, + {"Int", Const, 0}, + {"Position", Type, 0}, + {"Position.Column", Field, 0}, + {"Position.Filename", Field, 0}, + {"Position.Line", Field, 0}, + {"Position.Offset", Field, 0}, + {"RawString", Const, 0}, + {"ScanChars", Const, 0}, + {"ScanComments", Const, 0}, + {"ScanFloats", Const, 0}, + {"ScanIdents", Const, 0}, + {"ScanInts", Const, 0}, + {"ScanRawStrings", Const, 0}, + {"ScanStrings", Const, 0}, + {"Scanner", Type, 0}, + {"Scanner.Error", Field, 0}, + {"Scanner.ErrorCount", Field, 0}, + {"Scanner.IsIdentRune", Field, 4}, + {"Scanner.Mode", Field, 0}, + {"Scanner.Position", Field, 0}, + {"Scanner.Whitespace", Field, 0}, + {"SkipComments", Const, 0}, + {"String", Const, 0}, + {"TokenString", Func, 0}, + }, + "text/tabwriter": { + {"(*Writer).Flush", Method, 0}, + {"(*Writer).Init", Method, 0}, + {"(*Writer).Write", Method, 0}, + {"AlignRight", Const, 0}, + {"Debug", Const, 0}, + {"DiscardEmptyColumns", Const, 0}, + {"Escape", Const, 0}, + {"FilterHTML", Const, 0}, + {"NewWriter", Func, 0}, + {"StripEscape", Const, 0}, + {"TabIndent", Const, 0}, + {"Writer", Type, 0}, + }, + "text/template": { + {"(*Template).AddParseTree", Method, 0}, + {"(*Template).Clone", Method, 0}, + {"(*Template).DefinedTemplates", Method, 5}, + {"(*Template).Delims", Method, 0}, + {"(*Template).Execute", Method, 0}, + {"(*Template).ExecuteTemplate", Method, 0}, + {"(*Template).Funcs", Method, 0}, + {"(*Template).Lookup", Method, 0}, + {"(*Template).Name", Method, 0}, + {"(*Template).New", Method, 0}, + {"(*Template).Option", Method, 5}, + {"(*Template).Parse", Method, 0}, + {"(*Template).ParseFS", Method, 16}, + {"(*Template).ParseFiles", Method, 0}, + {"(*Template).ParseGlob", Method, 0}, + {"(*Template).Templates", Method, 0}, + {"(ExecError).Error", Method, 6}, + {"(ExecError).Unwrap", Method, 13}, + {"(Template).Copy", Method, 2}, + {"(Template).ErrorContext", Method, 1}, + {"ExecError", Type, 6}, + {"ExecError.Err", Field, 6}, + {"ExecError.Name", Field, 6}, + {"FuncMap", Type, 0}, + {"HTMLEscape", Func, 0}, + {"HTMLEscapeString", Func, 0}, + {"HTMLEscaper", Func, 0}, + {"IsTrue", Func, 6}, + {"JSEscape", Func, 0}, + {"JSEscapeString", Func, 0}, + {"JSEscaper", Func, 0}, + {"Must", Func, 0}, + {"New", Func, 0}, + {"ParseFS", Func, 16}, + {"ParseFiles", Func, 0}, + {"ParseGlob", Func, 0}, + {"Template", Type, 0}, + {"Template.Tree", Field, 0}, + {"URLQueryEscaper", Func, 0}, + }, + "text/template/parse": { + {"(*ActionNode).Copy", Method, 0}, + {"(*ActionNode).String", Method, 0}, + {"(*BoolNode).Copy", Method, 0}, + {"(*BoolNode).String", Method, 0}, + {"(*BranchNode).Copy", Method, 4}, + {"(*BranchNode).String", Method, 0}, + {"(*BreakNode).Copy", Method, 18}, + {"(*BreakNode).String", Method, 18}, + {"(*ChainNode).Add", Method, 1}, + {"(*ChainNode).Copy", Method, 1}, + {"(*ChainNode).String", Method, 1}, + {"(*CommandNode).Copy", Method, 0}, + {"(*CommandNode).String", Method, 0}, + {"(*CommentNode).Copy", Method, 16}, + {"(*CommentNode).String", Method, 16}, + {"(*ContinueNode).Copy", Method, 18}, + {"(*ContinueNode).String", Method, 18}, + {"(*DotNode).Copy", Method, 0}, + {"(*DotNode).String", Method, 0}, + {"(*DotNode).Type", Method, 0}, + {"(*FieldNode).Copy", Method, 0}, + {"(*FieldNode).String", Method, 0}, + {"(*IdentifierNode).Copy", Method, 0}, + {"(*IdentifierNode).SetPos", Method, 1}, + {"(*IdentifierNode).SetTree", Method, 4}, + {"(*IdentifierNode).String", Method, 0}, + {"(*IfNode).Copy", Method, 0}, + {"(*IfNode).String", Method, 0}, + {"(*ListNode).Copy", Method, 0}, + {"(*ListNode).CopyList", Method, 0}, + {"(*ListNode).String", Method, 0}, + {"(*NilNode).Copy", Method, 1}, + {"(*NilNode).String", Method, 1}, + {"(*NilNode).Type", Method, 1}, + {"(*NumberNode).Copy", Method, 0}, + {"(*NumberNode).String", Method, 0}, + {"(*PipeNode).Copy", Method, 0}, + {"(*PipeNode).CopyPipe", Method, 0}, + {"(*PipeNode).String", Method, 0}, + {"(*RangeNode).Copy", Method, 0}, + {"(*RangeNode).String", Method, 0}, + {"(*StringNode).Copy", Method, 0}, + {"(*StringNode).String", Method, 0}, + {"(*TemplateNode).Copy", Method, 0}, + {"(*TemplateNode).String", Method, 0}, + {"(*TextNode).Copy", Method, 0}, + {"(*TextNode).String", Method, 0}, + {"(*Tree).Copy", Method, 2}, + {"(*Tree).ErrorContext", Method, 1}, + {"(*Tree).Parse", Method, 0}, + {"(*VariableNode).Copy", Method, 0}, + {"(*VariableNode).String", Method, 0}, + {"(*WithNode).Copy", Method, 0}, + {"(*WithNode).String", Method, 0}, + {"(ActionNode).Position", Method, 1}, + {"(ActionNode).Type", Method, 0}, + {"(BoolNode).Position", Method, 1}, + {"(BoolNode).Type", Method, 0}, + {"(BranchNode).Position", Method, 1}, + {"(BranchNode).Type", Method, 0}, + {"(BreakNode).Position", Method, 18}, + {"(BreakNode).Type", Method, 18}, + {"(ChainNode).Position", Method, 1}, + {"(ChainNode).Type", Method, 1}, + {"(CommandNode).Position", Method, 1}, + {"(CommandNode).Type", Method, 0}, + {"(CommentNode).Position", Method, 16}, + {"(CommentNode).Type", Method, 16}, + {"(ContinueNode).Position", Method, 18}, + {"(ContinueNode).Type", Method, 18}, + {"(DotNode).Position", Method, 1}, + {"(FieldNode).Position", Method, 1}, + {"(FieldNode).Type", Method, 0}, + {"(IdentifierNode).Position", Method, 1}, + {"(IdentifierNode).Type", Method, 0}, + {"(IfNode).Position", Method, 1}, + {"(IfNode).Type", Method, 0}, + {"(ListNode).Position", Method, 1}, + {"(ListNode).Type", Method, 0}, + {"(NilNode).Position", Method, 1}, + {"(NodeType).Type", Method, 0}, + {"(NumberNode).Position", Method, 1}, + {"(NumberNode).Type", Method, 0}, + {"(PipeNode).Position", Method, 1}, + {"(PipeNode).Type", Method, 0}, + {"(Pos).Position", Method, 1}, + {"(RangeNode).Position", Method, 1}, + {"(RangeNode).Type", Method, 0}, + {"(StringNode).Position", Method, 1}, + {"(StringNode).Type", Method, 0}, + {"(TemplateNode).Position", Method, 1}, + {"(TemplateNode).Type", Method, 0}, + {"(TextNode).Position", Method, 1}, + {"(TextNode).Type", Method, 0}, + {"(VariableNode).Position", Method, 1}, + {"(VariableNode).Type", Method, 0}, + {"(WithNode).Position", Method, 1}, + {"(WithNode).Type", Method, 0}, + {"ActionNode", Type, 0}, + {"ActionNode.Line", Field, 0}, + {"ActionNode.NodeType", Field, 0}, + {"ActionNode.Pipe", Field, 0}, + {"ActionNode.Pos", Field, 1}, + {"BoolNode", Type, 0}, + {"BoolNode.NodeType", Field, 0}, + {"BoolNode.Pos", Field, 1}, + {"BoolNode.True", Field, 0}, + {"BranchNode", Type, 0}, + {"BranchNode.ElseList", Field, 0}, + {"BranchNode.Line", Field, 0}, + {"BranchNode.List", Field, 0}, + {"BranchNode.NodeType", Field, 0}, + {"BranchNode.Pipe", Field, 0}, + {"BranchNode.Pos", Field, 1}, + {"BreakNode", Type, 18}, + {"BreakNode.Line", Field, 18}, + {"BreakNode.NodeType", Field, 18}, + {"BreakNode.Pos", Field, 18}, + {"ChainNode", Type, 1}, + {"ChainNode.Field", Field, 1}, + {"ChainNode.Node", Field, 1}, + {"ChainNode.NodeType", Field, 1}, + {"ChainNode.Pos", Field, 1}, + {"CommandNode", Type, 0}, + {"CommandNode.Args", Field, 0}, + {"CommandNode.NodeType", Field, 0}, + {"CommandNode.Pos", Field, 1}, + {"CommentNode", Type, 16}, + {"CommentNode.NodeType", Field, 16}, + {"CommentNode.Pos", Field, 16}, + {"CommentNode.Text", Field, 16}, + {"ContinueNode", Type, 18}, + {"ContinueNode.Line", Field, 18}, + {"ContinueNode.NodeType", Field, 18}, + {"ContinueNode.Pos", Field, 18}, + {"DotNode", Type, 0}, + {"DotNode.NodeType", Field, 4}, + {"DotNode.Pos", Field, 1}, + {"FieldNode", Type, 0}, + {"FieldNode.Ident", Field, 0}, + {"FieldNode.NodeType", Field, 0}, + {"FieldNode.Pos", Field, 1}, + {"IdentifierNode", Type, 0}, + {"IdentifierNode.Ident", Field, 0}, + {"IdentifierNode.NodeType", Field, 0}, + {"IdentifierNode.Pos", Field, 1}, + {"IfNode", Type, 0}, + {"IfNode.BranchNode", Field, 0}, + {"IsEmptyTree", Func, 0}, + {"ListNode", Type, 0}, + {"ListNode.NodeType", Field, 0}, + {"ListNode.Nodes", Field, 0}, + {"ListNode.Pos", Field, 1}, + {"Mode", Type, 16}, + {"New", Func, 0}, + {"NewIdentifier", Func, 0}, + {"NilNode", Type, 1}, + {"NilNode.NodeType", Field, 4}, + {"NilNode.Pos", Field, 1}, + {"Node", Type, 0}, + {"NodeAction", Const, 0}, + {"NodeBool", Const, 0}, + {"NodeBreak", Const, 18}, + {"NodeChain", Const, 1}, + {"NodeCommand", Const, 0}, + {"NodeComment", Const, 16}, + {"NodeContinue", Const, 18}, + {"NodeDot", Const, 0}, + {"NodeField", Const, 0}, + {"NodeIdentifier", Const, 0}, + {"NodeIf", Const, 0}, + {"NodeList", Const, 0}, + {"NodeNil", Const, 1}, + {"NodeNumber", Const, 0}, + {"NodePipe", Const, 0}, + {"NodeRange", Const, 0}, + {"NodeString", Const, 0}, + {"NodeTemplate", Const, 0}, + {"NodeText", Const, 0}, + {"NodeType", Type, 0}, + {"NodeVariable", Const, 0}, + {"NodeWith", Const, 0}, + {"NumberNode", Type, 0}, + {"NumberNode.Complex128", Field, 0}, + {"NumberNode.Float64", Field, 0}, + {"NumberNode.Int64", Field, 0}, + {"NumberNode.IsComplex", Field, 0}, + {"NumberNode.IsFloat", Field, 0}, + {"NumberNode.IsInt", Field, 0}, + {"NumberNode.IsUint", Field, 0}, + {"NumberNode.NodeType", Field, 0}, + {"NumberNode.Pos", Field, 1}, + {"NumberNode.Text", Field, 0}, + {"NumberNode.Uint64", Field, 0}, + {"Parse", Func, 0}, + {"ParseComments", Const, 16}, + {"PipeNode", Type, 0}, + {"PipeNode.Cmds", Field, 0}, + {"PipeNode.Decl", Field, 0}, + {"PipeNode.IsAssign", Field, 11}, + {"PipeNode.Line", Field, 0}, + {"PipeNode.NodeType", Field, 0}, + {"PipeNode.Pos", Field, 1}, + {"Pos", Type, 1}, + {"RangeNode", Type, 0}, + {"RangeNode.BranchNode", Field, 0}, + {"SkipFuncCheck", Const, 17}, + {"StringNode", Type, 0}, + {"StringNode.NodeType", Field, 0}, + {"StringNode.Pos", Field, 1}, + {"StringNode.Quoted", Field, 0}, + {"StringNode.Text", Field, 0}, + {"TemplateNode", Type, 0}, + {"TemplateNode.Line", Field, 0}, + {"TemplateNode.Name", Field, 0}, + {"TemplateNode.NodeType", Field, 0}, + {"TemplateNode.Pipe", Field, 0}, + {"TemplateNode.Pos", Field, 1}, + {"TextNode", Type, 0}, + {"TextNode.NodeType", Field, 0}, + {"TextNode.Pos", Field, 1}, + {"TextNode.Text", Field, 0}, + {"Tree", Type, 0}, + {"Tree.Mode", Field, 16}, + {"Tree.Name", Field, 0}, + {"Tree.ParseName", Field, 1}, + {"Tree.Root", Field, 0}, + {"VariableNode", Type, 0}, + {"VariableNode.Ident", Field, 0}, + {"VariableNode.NodeType", Field, 0}, + {"VariableNode.Pos", Field, 1}, + {"WithNode", Type, 0}, + {"WithNode.BranchNode", Field, 0}, + }, + "time": { + {"(*Location).String", Method, 0}, + {"(*ParseError).Error", Method, 0}, + {"(*Ticker).Reset", Method, 15}, + {"(*Ticker).Stop", Method, 0}, + {"(*Time).GobDecode", Method, 0}, + {"(*Time).UnmarshalBinary", Method, 2}, + {"(*Time).UnmarshalJSON", Method, 0}, + {"(*Time).UnmarshalText", Method, 2}, + {"(*Timer).Reset", Method, 1}, + {"(*Timer).Stop", Method, 0}, + {"(Duration).Abs", Method, 19}, + {"(Duration).Hours", Method, 0}, + {"(Duration).Microseconds", Method, 13}, + {"(Duration).Milliseconds", Method, 13}, + {"(Duration).Minutes", Method, 0}, + {"(Duration).Nanoseconds", Method, 0}, + {"(Duration).Round", Method, 9}, + {"(Duration).Seconds", Method, 0}, + {"(Duration).String", Method, 0}, + {"(Duration).Truncate", Method, 9}, + {"(Month).String", Method, 0}, + {"(Time).Add", Method, 0}, + {"(Time).AddDate", Method, 0}, + {"(Time).After", Method, 0}, + {"(Time).AppendFormat", Method, 5}, + {"(Time).Before", Method, 0}, + {"(Time).Clock", Method, 0}, + {"(Time).Compare", Method, 20}, + {"(Time).Date", Method, 0}, + {"(Time).Day", Method, 0}, + {"(Time).Equal", Method, 0}, + {"(Time).Format", Method, 0}, + {"(Time).GoString", Method, 17}, + {"(Time).GobEncode", Method, 0}, + {"(Time).Hour", Method, 0}, + {"(Time).ISOWeek", Method, 0}, + {"(Time).In", Method, 0}, + {"(Time).IsDST", Method, 17}, + {"(Time).IsZero", Method, 0}, + {"(Time).Local", Method, 0}, + {"(Time).Location", Method, 0}, + {"(Time).MarshalBinary", Method, 2}, + {"(Time).MarshalJSON", Method, 0}, + {"(Time).MarshalText", Method, 2}, + {"(Time).Minute", Method, 0}, + {"(Time).Month", Method, 0}, + {"(Time).Nanosecond", Method, 0}, + {"(Time).Round", Method, 1}, + {"(Time).Second", Method, 0}, + {"(Time).String", Method, 0}, + {"(Time).Sub", Method, 0}, + {"(Time).Truncate", Method, 1}, + {"(Time).UTC", Method, 0}, + {"(Time).Unix", Method, 0}, + {"(Time).UnixMicro", Method, 17}, + {"(Time).UnixMilli", Method, 17}, + {"(Time).UnixNano", Method, 0}, + {"(Time).Weekday", Method, 0}, + {"(Time).Year", Method, 0}, + {"(Time).YearDay", Method, 1}, + {"(Time).Zone", Method, 0}, + {"(Time).ZoneBounds", Method, 19}, + {"(Weekday).String", Method, 0}, + {"ANSIC", Const, 0}, + {"After", Func, 0}, + {"AfterFunc", Func, 0}, + {"April", Const, 0}, + {"August", Const, 0}, + {"Date", Func, 0}, + {"DateOnly", Const, 20}, + {"DateTime", Const, 20}, + {"December", Const, 0}, + {"Duration", Type, 0}, + {"February", Const, 0}, + {"FixedZone", Func, 0}, + {"Friday", Const, 0}, + {"Hour", Const, 0}, + {"January", Const, 0}, + {"July", Const, 0}, + {"June", Const, 0}, + {"Kitchen", Const, 0}, + {"Layout", Const, 17}, + {"LoadLocation", Func, 0}, + {"LoadLocationFromTZData", Func, 10}, + {"Local", Var, 0}, + {"Location", Type, 0}, + {"March", Const, 0}, + {"May", Const, 0}, + {"Microsecond", Const, 0}, + {"Millisecond", Const, 0}, + {"Minute", Const, 0}, + {"Monday", Const, 0}, + {"Month", Type, 0}, + {"Nanosecond", Const, 0}, + {"NewTicker", Func, 0}, + {"NewTimer", Func, 0}, + {"November", Const, 0}, + {"Now", Func, 0}, + {"October", Const, 0}, + {"Parse", Func, 0}, + {"ParseDuration", Func, 0}, + {"ParseError", Type, 0}, + {"ParseError.Layout", Field, 0}, + {"ParseError.LayoutElem", Field, 0}, + {"ParseError.Message", Field, 0}, + {"ParseError.Value", Field, 0}, + {"ParseError.ValueElem", Field, 0}, + {"ParseInLocation", Func, 1}, + {"RFC1123", Const, 0}, + {"RFC1123Z", Const, 0}, + {"RFC3339", Const, 0}, + {"RFC3339Nano", Const, 0}, + {"RFC822", Const, 0}, + {"RFC822Z", Const, 0}, + {"RFC850", Const, 0}, + {"RubyDate", Const, 0}, + {"Saturday", Const, 0}, + {"Second", Const, 0}, + {"September", Const, 0}, + {"Since", Func, 0}, + {"Sleep", Func, 0}, + {"Stamp", Const, 0}, + {"StampMicro", Const, 0}, + {"StampMilli", Const, 0}, + {"StampNano", Const, 0}, + {"Sunday", Const, 0}, + {"Thursday", Const, 0}, + {"Tick", Func, 0}, + {"Ticker", Type, 0}, + {"Ticker.C", Field, 0}, + {"Time", Type, 0}, + {"TimeOnly", Const, 20}, + {"Timer", Type, 0}, + {"Timer.C", Field, 0}, + {"Tuesday", Const, 0}, + {"UTC", Var, 0}, + {"Unix", Func, 0}, + {"UnixDate", Const, 0}, + {"UnixMicro", Func, 17}, + {"UnixMilli", Func, 17}, + {"Until", Func, 8}, + {"Wednesday", Const, 0}, + {"Weekday", Type, 0}, + }, + "unicode": { + {"(SpecialCase).ToLower", Method, 0}, + {"(SpecialCase).ToTitle", Method, 0}, + {"(SpecialCase).ToUpper", Method, 0}, + {"ASCII_Hex_Digit", Var, 0}, + {"Adlam", Var, 7}, + {"Ahom", Var, 5}, + {"Anatolian_Hieroglyphs", Var, 5}, + {"Arabic", Var, 0}, + {"Armenian", Var, 0}, + {"Avestan", Var, 0}, + {"AzeriCase", Var, 0}, + {"Balinese", Var, 0}, + {"Bamum", Var, 0}, + {"Bassa_Vah", Var, 4}, + {"Batak", Var, 0}, + {"Bengali", Var, 0}, + {"Bhaiksuki", Var, 7}, + {"Bidi_Control", Var, 0}, + {"Bopomofo", Var, 0}, + {"Brahmi", Var, 0}, + {"Braille", Var, 0}, + {"Buginese", Var, 0}, + {"Buhid", Var, 0}, + {"C", Var, 0}, + {"Canadian_Aboriginal", Var, 0}, + {"Carian", Var, 0}, + {"CaseRange", Type, 0}, + {"CaseRange.Delta", Field, 0}, + {"CaseRange.Hi", Field, 0}, + {"CaseRange.Lo", Field, 0}, + {"CaseRanges", Var, 0}, + {"Categories", Var, 0}, + {"Caucasian_Albanian", Var, 4}, + {"Cc", Var, 0}, + {"Cf", Var, 0}, + {"Chakma", Var, 1}, + {"Cham", Var, 0}, + {"Cherokee", Var, 0}, + {"Chorasmian", Var, 16}, + {"Co", Var, 0}, + {"Common", Var, 0}, + {"Coptic", Var, 0}, + {"Cs", Var, 0}, + {"Cuneiform", Var, 0}, + {"Cypriot", Var, 0}, + {"Cypro_Minoan", Var, 21}, + {"Cyrillic", Var, 0}, + {"Dash", Var, 0}, + {"Deprecated", Var, 0}, + {"Deseret", Var, 0}, + {"Devanagari", Var, 0}, + {"Diacritic", Var, 0}, + {"Digit", Var, 0}, + {"Dives_Akuru", Var, 16}, + {"Dogra", Var, 13}, + {"Duployan", Var, 4}, + {"Egyptian_Hieroglyphs", Var, 0}, + {"Elbasan", Var, 4}, + {"Elymaic", Var, 14}, + {"Ethiopic", Var, 0}, + {"Extender", Var, 0}, + {"FoldCategory", Var, 0}, + {"FoldScript", Var, 0}, + {"Georgian", Var, 0}, + {"Glagolitic", Var, 0}, + {"Gothic", Var, 0}, + {"Grantha", Var, 4}, + {"GraphicRanges", Var, 0}, + {"Greek", Var, 0}, + {"Gujarati", Var, 0}, + {"Gunjala_Gondi", Var, 13}, + {"Gurmukhi", Var, 0}, + {"Han", Var, 0}, + {"Hangul", Var, 0}, + {"Hanifi_Rohingya", Var, 13}, + {"Hanunoo", Var, 0}, + {"Hatran", Var, 5}, + {"Hebrew", Var, 0}, + {"Hex_Digit", Var, 0}, + {"Hiragana", Var, 0}, + {"Hyphen", Var, 0}, + {"IDS_Binary_Operator", Var, 0}, + {"IDS_Trinary_Operator", Var, 0}, + {"Ideographic", Var, 0}, + {"Imperial_Aramaic", Var, 0}, + {"In", Func, 2}, + {"Inherited", Var, 0}, + {"Inscriptional_Pahlavi", Var, 0}, + {"Inscriptional_Parthian", Var, 0}, + {"Is", Func, 0}, + {"IsControl", Func, 0}, + {"IsDigit", Func, 0}, + {"IsGraphic", Func, 0}, + {"IsLetter", Func, 0}, + {"IsLower", Func, 0}, + {"IsMark", Func, 0}, + {"IsNumber", Func, 0}, + {"IsOneOf", Func, 0}, + {"IsPrint", Func, 0}, + {"IsPunct", Func, 0}, + {"IsSpace", Func, 0}, + {"IsSymbol", Func, 0}, + {"IsTitle", Func, 0}, + {"IsUpper", Func, 0}, + {"Javanese", Var, 0}, + {"Join_Control", Var, 0}, + {"Kaithi", Var, 0}, + {"Kannada", Var, 0}, + {"Katakana", Var, 0}, + {"Kawi", Var, 21}, + {"Kayah_Li", Var, 0}, + {"Kharoshthi", Var, 0}, + {"Khitan_Small_Script", Var, 16}, + {"Khmer", Var, 0}, + {"Khojki", Var, 4}, + {"Khudawadi", Var, 4}, + {"L", Var, 0}, + {"Lao", Var, 0}, + {"Latin", Var, 0}, + {"Lepcha", Var, 0}, + {"Letter", Var, 0}, + {"Limbu", Var, 0}, + {"Linear_A", Var, 4}, + {"Linear_B", Var, 0}, + {"Lisu", Var, 0}, + {"Ll", Var, 0}, + {"Lm", Var, 0}, + {"Lo", Var, 0}, + {"Logical_Order_Exception", Var, 0}, + {"Lower", Var, 0}, + {"LowerCase", Const, 0}, + {"Lt", Var, 0}, + {"Lu", Var, 0}, + {"Lycian", Var, 0}, + {"Lydian", Var, 0}, + {"M", Var, 0}, + {"Mahajani", Var, 4}, + {"Makasar", Var, 13}, + {"Malayalam", Var, 0}, + {"Mandaic", Var, 0}, + {"Manichaean", Var, 4}, + {"Marchen", Var, 7}, + {"Mark", Var, 0}, + {"Masaram_Gondi", Var, 10}, + {"MaxASCII", Const, 0}, + {"MaxCase", Const, 0}, + {"MaxLatin1", Const, 0}, + {"MaxRune", Const, 0}, + {"Mc", Var, 0}, + {"Me", Var, 0}, + {"Medefaidrin", Var, 13}, + {"Meetei_Mayek", Var, 0}, + {"Mende_Kikakui", Var, 4}, + {"Meroitic_Cursive", Var, 1}, + {"Meroitic_Hieroglyphs", Var, 1}, + {"Miao", Var, 1}, + {"Mn", Var, 0}, + {"Modi", Var, 4}, + {"Mongolian", Var, 0}, + {"Mro", Var, 4}, + {"Multani", Var, 5}, + {"Myanmar", Var, 0}, + {"N", Var, 0}, + {"Nabataean", Var, 4}, + {"Nag_Mundari", Var, 21}, + {"Nandinagari", Var, 14}, + {"Nd", Var, 0}, + {"New_Tai_Lue", Var, 0}, + {"Newa", Var, 7}, + {"Nko", Var, 0}, + {"Nl", Var, 0}, + {"No", Var, 0}, + {"Noncharacter_Code_Point", Var, 0}, + {"Number", Var, 0}, + {"Nushu", Var, 10}, + {"Nyiakeng_Puachue_Hmong", Var, 14}, + {"Ogham", Var, 0}, + {"Ol_Chiki", Var, 0}, + {"Old_Hungarian", Var, 5}, + {"Old_Italic", Var, 0}, + {"Old_North_Arabian", Var, 4}, + {"Old_Permic", Var, 4}, + {"Old_Persian", Var, 0}, + {"Old_Sogdian", Var, 13}, + {"Old_South_Arabian", Var, 0}, + {"Old_Turkic", Var, 0}, + {"Old_Uyghur", Var, 21}, + {"Oriya", Var, 0}, + {"Osage", Var, 7}, + {"Osmanya", Var, 0}, + {"Other", Var, 0}, + {"Other_Alphabetic", Var, 0}, + {"Other_Default_Ignorable_Code_Point", Var, 0}, + {"Other_Grapheme_Extend", Var, 0}, + {"Other_ID_Continue", Var, 0}, + {"Other_ID_Start", Var, 0}, + {"Other_Lowercase", Var, 0}, + {"Other_Math", Var, 0}, + {"Other_Uppercase", Var, 0}, + {"P", Var, 0}, + {"Pahawh_Hmong", Var, 4}, + {"Palmyrene", Var, 4}, + {"Pattern_Syntax", Var, 0}, + {"Pattern_White_Space", Var, 0}, + {"Pau_Cin_Hau", Var, 4}, + {"Pc", Var, 0}, + {"Pd", Var, 0}, + {"Pe", Var, 0}, + {"Pf", Var, 0}, + {"Phags_Pa", Var, 0}, + {"Phoenician", Var, 0}, + {"Pi", Var, 0}, + {"Po", Var, 0}, + {"Prepended_Concatenation_Mark", Var, 7}, + {"PrintRanges", Var, 0}, + {"Properties", Var, 0}, + {"Ps", Var, 0}, + {"Psalter_Pahlavi", Var, 4}, + {"Punct", Var, 0}, + {"Quotation_Mark", Var, 0}, + {"Radical", Var, 0}, + {"Range16", Type, 0}, + {"Range16.Hi", Field, 0}, + {"Range16.Lo", Field, 0}, + {"Range16.Stride", Field, 0}, + {"Range32", Type, 0}, + {"Range32.Hi", Field, 0}, + {"Range32.Lo", Field, 0}, + {"Range32.Stride", Field, 0}, + {"RangeTable", Type, 0}, + {"RangeTable.LatinOffset", Field, 1}, + {"RangeTable.R16", Field, 0}, + {"RangeTable.R32", Field, 0}, + {"Regional_Indicator", Var, 10}, + {"Rejang", Var, 0}, + {"ReplacementChar", Const, 0}, + {"Runic", Var, 0}, + {"S", Var, 0}, + {"STerm", Var, 0}, + {"Samaritan", Var, 0}, + {"Saurashtra", Var, 0}, + {"Sc", Var, 0}, + {"Scripts", Var, 0}, + {"Sentence_Terminal", Var, 7}, + {"Sharada", Var, 1}, + {"Shavian", Var, 0}, + {"Siddham", Var, 4}, + {"SignWriting", Var, 5}, + {"SimpleFold", Func, 0}, + {"Sinhala", Var, 0}, + {"Sk", Var, 0}, + {"Sm", Var, 0}, + {"So", Var, 0}, + {"Soft_Dotted", Var, 0}, + {"Sogdian", Var, 13}, + {"Sora_Sompeng", Var, 1}, + {"Soyombo", Var, 10}, + {"Space", Var, 0}, + {"SpecialCase", Type, 0}, + {"Sundanese", Var, 0}, + {"Syloti_Nagri", Var, 0}, + {"Symbol", Var, 0}, + {"Syriac", Var, 0}, + {"Tagalog", Var, 0}, + {"Tagbanwa", Var, 0}, + {"Tai_Le", Var, 0}, + {"Tai_Tham", Var, 0}, + {"Tai_Viet", Var, 0}, + {"Takri", Var, 1}, + {"Tamil", Var, 0}, + {"Tangsa", Var, 21}, + {"Tangut", Var, 7}, + {"Telugu", Var, 0}, + {"Terminal_Punctuation", Var, 0}, + {"Thaana", Var, 0}, + {"Thai", Var, 0}, + {"Tibetan", Var, 0}, + {"Tifinagh", Var, 0}, + {"Tirhuta", Var, 4}, + {"Title", Var, 0}, + {"TitleCase", Const, 0}, + {"To", Func, 0}, + {"ToLower", Func, 0}, + {"ToTitle", Func, 0}, + {"ToUpper", Func, 0}, + {"Toto", Var, 21}, + {"TurkishCase", Var, 0}, + {"Ugaritic", Var, 0}, + {"Unified_Ideograph", Var, 0}, + {"Upper", Var, 0}, + {"UpperCase", Const, 0}, + {"UpperLower", Const, 0}, + {"Vai", Var, 0}, + {"Variation_Selector", Var, 0}, + {"Version", Const, 0}, + {"Vithkuqi", Var, 21}, + {"Wancho", Var, 14}, + {"Warang_Citi", Var, 4}, + {"White_Space", Var, 0}, + {"Yezidi", Var, 16}, + {"Yi", Var, 0}, + {"Z", Var, 0}, + {"Zanabazar_Square", Var, 10}, + {"Zl", Var, 0}, + {"Zp", Var, 0}, + {"Zs", Var, 0}, + }, + "unicode/utf16": { + {"AppendRune", Func, 20}, + {"Decode", Func, 0}, + {"DecodeRune", Func, 0}, + {"Encode", Func, 0}, + {"EncodeRune", Func, 0}, + {"IsSurrogate", Func, 0}, + }, + "unicode/utf8": { + {"AppendRune", Func, 18}, + {"DecodeLastRune", Func, 0}, + {"DecodeLastRuneInString", Func, 0}, + {"DecodeRune", Func, 0}, + {"DecodeRuneInString", Func, 0}, + {"EncodeRune", Func, 0}, + {"FullRune", Func, 0}, + {"FullRuneInString", Func, 0}, + {"MaxRune", Const, 0}, + {"RuneCount", Func, 0}, + {"RuneCountInString", Func, 0}, + {"RuneError", Const, 0}, + {"RuneLen", Func, 0}, + {"RuneSelf", Const, 0}, + {"RuneStart", Func, 0}, + {"UTFMax", Const, 0}, + {"Valid", Func, 0}, + {"ValidRune", Func, 1}, + {"ValidString", Func, 0}, + }, + "unsafe": { + {"Add", Func, 0}, + {"Alignof", Func, 0}, + {"Offsetof", Func, 0}, + {"Pointer", Type, 0}, + {"Sizeof", Func, 0}, + {"Slice", Func, 0}, + {"SliceData", Func, 0}, + {"String", Func, 0}, + {"StringData", Func, 0}, + }, +} diff --git a/vendor/golang.org/x/tools/internal/stdlib/stdlib.go b/vendor/golang.org/x/tools/internal/stdlib/stdlib.go new file mode 100644 index 000000000000..98904017f2ca --- /dev/null +++ b/vendor/golang.org/x/tools/internal/stdlib/stdlib.go @@ -0,0 +1,97 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run generate.go + +// Package stdlib provides a table of all exported symbols in the +// standard library, along with the version at which they first +// appeared. +package stdlib + +import ( + "fmt" + "strings" +) + +type Symbol struct { + Name string + Kind Kind + Version Version // Go version that first included the symbol +} + +// A Kind indicates the kind of a symbol: +// function, variable, constant, type, and so on. +type Kind int8 + +const ( + Invalid Kind = iota // Example name: + Type // "Buffer" + Func // "Println" + Var // "EOF" + Const // "Pi" + Field // "Point.X" + Method // "(*Buffer).Grow" +) + +func (kind Kind) String() string { + return [...]string{ + Invalid: "invalid", + Type: "type", + Func: "func", + Var: "var", + Const: "const", + Field: "field", + Method: "method", + }[kind] +} + +// A Version represents a version of Go of the form "go1.%d". +type Version int8 + +// String returns a version string of the form "go1.23", without allocating. +func (v Version) String() string { return versions[v] } + +var versions [30]string // (increase constant as needed) + +func init() { + for i := range versions { + versions[i] = fmt.Sprintf("go1.%d", i) + } +} + +// HasPackage reports whether the specified package path is part of +// the standard library's public API. +func HasPackage(path string) bool { + _, ok := PackageSymbols[path] + return ok +} + +// SplitField splits the field symbol name into type and field +// components. It must be called only on Field symbols. +// +// Example: "File.Package" -> ("File", "Package") +func (sym *Symbol) SplitField() (typename, name string) { + if sym.Kind != Field { + panic("not a field") + } + typename, name, _ = strings.Cut(sym.Name, ".") + return +} + +// SplitMethod splits the method symbol name into pointer, receiver, +// and method components. It must be called only on Method symbols. +// +// Example: "(*Buffer).Grow" -> (true, "Buffer", "Grow") +func (sym *Symbol) SplitMethod() (ptr bool, recv, name string) { + if sym.Kind != Method { + panic("not a method") + } + recv, name, _ = strings.Cut(sym.Name, ".") + recv = recv[len("(") : len(recv)-len(")")] + ptr = recv[0] == '*' + if ptr { + recv = recv[len("*"):] + } + return +} diff --git a/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go b/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go index 7e638ec24fcb..ff9437a36cd6 100644 --- a/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go +++ b/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go @@ -34,30 +34,16 @@ func GetLines(file *token.File) []int { lines []int _ []struct{} } - type tokenFile118 struct { - _ *token.FileSet // deleted in go1.19 - tokenFile119 - } - - type uP = unsafe.Pointer - switch unsafe.Sizeof(*file) { - case unsafe.Sizeof(tokenFile118{}): - var ptr *tokenFile118 - *(*uP)(uP(&ptr)) = uP(file) - ptr.mu.Lock() - defer ptr.mu.Unlock() - return ptr.lines - case unsafe.Sizeof(tokenFile119{}): - var ptr *tokenFile119 - *(*uP)(uP(&ptr)) = uP(file) - ptr.mu.Lock() - defer ptr.mu.Unlock() - return ptr.lines - - default: + if unsafe.Sizeof(*file) != unsafe.Sizeof(tokenFile119{}) { panic("unexpected token.File size") } + var ptr *tokenFile119 + type uP = unsafe.Pointer + *(*uP)(uP(&ptr)) = uP(file) + ptr.mu.Lock() + defer ptr.mu.Unlock() + return ptr.lines } // AddExistingFiles adds the specified files to the FileSet if they diff --git a/vendor/golang.org/x/tools/internal/typeparams/common.go b/vendor/golang.org/x/tools/internal/typeparams/common.go deleted file mode 100644 index cdab9885314f..000000000000 --- a/vendor/golang.org/x/tools/internal/typeparams/common.go +++ /dev/null @@ -1,204 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package typeparams contains common utilities for writing tools that interact -// with generic Go code, as introduced with Go 1.18. -// -// Many of the types and functions in this package are proxies for the new APIs -// introduced in the standard library with Go 1.18. For example, the -// typeparams.Union type is an alias for go/types.Union, and the ForTypeSpec -// function returns the value of the go/ast.TypeSpec.TypeParams field. At Go -// versions older than 1.18 these helpers are implemented as stubs, allowing -// users of this package to write code that handles generic constructs inline, -// even if the Go version being used to compile does not support generics. -// -// Additionally, this package contains common utilities for working with the -// new generic constructs, to supplement the standard library APIs. Notably, -// the StructuralTerms API computes a minimal representation of the structural -// restrictions on a type parameter. -// -// An external version of these APIs is available in the -// golang.org/x/exp/typeparams module. -package typeparams - -import ( - "fmt" - "go/ast" - "go/token" - "go/types" -) - -// UnpackIndexExpr extracts data from AST nodes that represent index -// expressions. -// -// For an ast.IndexExpr, the resulting indices slice will contain exactly one -// index expression. For an ast.IndexListExpr (go1.18+), it may have a variable -// number of index expressions. -// -// For nodes that don't represent index expressions, the first return value of -// UnpackIndexExpr will be nil. -func UnpackIndexExpr(n ast.Node) (x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) { - switch e := n.(type) { - case *ast.IndexExpr: - return e.X, e.Lbrack, []ast.Expr{e.Index}, e.Rbrack - case *ast.IndexListExpr: - return e.X, e.Lbrack, e.Indices, e.Rbrack - } - return nil, token.NoPos, nil, token.NoPos -} - -// PackIndexExpr returns an *ast.IndexExpr or *ast.IndexListExpr, depending on -// the cardinality of indices. Calling PackIndexExpr with len(indices) == 0 -// will panic. -func PackIndexExpr(x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) ast.Expr { - switch len(indices) { - case 0: - panic("empty indices") - case 1: - return &ast.IndexExpr{ - X: x, - Lbrack: lbrack, - Index: indices[0], - Rbrack: rbrack, - } - default: - return &ast.IndexListExpr{ - X: x, - Lbrack: lbrack, - Indices: indices, - Rbrack: rbrack, - } - } -} - -// IsTypeParam reports whether t is a type parameter. -func IsTypeParam(t types.Type) bool { - _, ok := t.(*types.TypeParam) - return ok -} - -// OriginMethod returns the origin method associated with the method fn. -// For methods on a non-generic receiver base type, this is just -// fn. However, for methods with a generic receiver, OriginMethod returns the -// corresponding method in the method set of the origin type. -// -// As a special case, if fn is not a method (has no receiver), OriginMethod -// returns fn. -func OriginMethod(fn *types.Func) *types.Func { - recv := fn.Type().(*types.Signature).Recv() - if recv == nil { - return fn - } - base := recv.Type() - p, isPtr := base.(*types.Pointer) - if isPtr { - base = p.Elem() - } - named, isNamed := base.(*types.Named) - if !isNamed { - // Receiver is a *types.Interface. - return fn - } - if named.TypeParams().Len() == 0 { - // Receiver base has no type parameters, so we can avoid the lookup below. - return fn - } - orig := named.Origin() - gfn, _, _ := types.LookupFieldOrMethod(orig, true, fn.Pkg(), fn.Name()) - - // This is a fix for a gopls crash (#60628) due to a go/types bug (#60634). In: - // package p - // type T *int - // func (*T) f() {} - // LookupFieldOrMethod(T, true, p, f)=nil, but NewMethodSet(*T)={(*T).f}. - // Here we make them consistent by force. - // (The go/types bug is general, but this workaround is reached only - // for generic T thanks to the early return above.) - if gfn == nil { - mset := types.NewMethodSet(types.NewPointer(orig)) - for i := 0; i < mset.Len(); i++ { - m := mset.At(i) - if m.Obj().Id() == fn.Id() { - gfn = m.Obj() - break - } - } - } - - // In golang/go#61196, we observe another crash, this time inexplicable. - if gfn == nil { - panic(fmt.Sprintf("missing origin method for %s.%s; named == origin: %t, named.NumMethods(): %d, origin.NumMethods(): %d", named, fn, named == orig, named.NumMethods(), orig.NumMethods())) - } - - return gfn.(*types.Func) -} - -// GenericAssignableTo is a generalization of types.AssignableTo that -// implements the following rule for uninstantiated generic types: -// -// If V and T are generic named types, then V is considered assignable to T if, -// for every possible instantation of V[A_1, ..., A_N], the instantiation -// T[A_1, ..., A_N] is valid and V[A_1, ..., A_N] implements T[A_1, ..., A_N]. -// -// If T has structural constraints, they must be satisfied by V. -// -// For example, consider the following type declarations: -// -// type Interface[T any] interface { -// Accept(T) -// } -// -// type Container[T any] struct { -// Element T -// } -// -// func (c Container[T]) Accept(t T) { c.Element = t } -// -// In this case, GenericAssignableTo reports that instantiations of Container -// are assignable to the corresponding instantiation of Interface. -func GenericAssignableTo(ctxt *types.Context, V, T types.Type) bool { - // If V and T are not both named, or do not have matching non-empty type - // parameter lists, fall back on types.AssignableTo. - - VN, Vnamed := V.(*types.Named) - TN, Tnamed := T.(*types.Named) - if !Vnamed || !Tnamed { - return types.AssignableTo(V, T) - } - - vtparams := VN.TypeParams() - ttparams := TN.TypeParams() - if vtparams.Len() == 0 || vtparams.Len() != ttparams.Len() || VN.TypeArgs().Len() != 0 || TN.TypeArgs().Len() != 0 { - return types.AssignableTo(V, T) - } - - // V and T have the same (non-zero) number of type params. Instantiate both - // with the type parameters of V. This must always succeed for V, and will - // succeed for T if and only if the type set of each type parameter of V is a - // subset of the type set of the corresponding type parameter of T, meaning - // that every instantiation of V corresponds to a valid instantiation of T. - - // Minor optimization: ensure we share a context across the two - // instantiations below. - if ctxt == nil { - ctxt = types.NewContext() - } - - var targs []types.Type - for i := 0; i < vtparams.Len(); i++ { - targs = append(targs, vtparams.At(i)) - } - - vinst, err := types.Instantiate(ctxt, V, targs, true) - if err != nil { - panic("type parameters should satisfy their own constraints") - } - - tinst, err := types.Instantiate(ctxt, T, targs, true) - if err != nil { - return false - } - - return types.AssignableTo(vinst, tinst) -} diff --git a/vendor/golang.org/x/tools/internal/typeparams/coretype.go b/vendor/golang.org/x/tools/internal/typeparams/coretype.go deleted file mode 100644 index 7ea8840eab7c..000000000000 --- a/vendor/golang.org/x/tools/internal/typeparams/coretype.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package typeparams - -import ( - "go/types" -) - -// CoreType returns the core type of T or nil if T does not have a core type. -// -// See https://go.dev/ref/spec#Core_types for the definition of a core type. -func CoreType(T types.Type) types.Type { - U := T.Underlying() - if _, ok := U.(*types.Interface); !ok { - return U // for non-interface types, - } - - terms, err := _NormalTerms(U) - if len(terms) == 0 || err != nil { - // len(terms) -> empty type set of interface. - // err != nil => U is invalid, exceeds complexity bounds, or has an empty type set. - return nil // no core type. - } - - U = terms[0].Type().Underlying() - var identical int // i in [0,identical) => Identical(U, terms[i].Type().Underlying()) - for identical = 1; identical < len(terms); identical++ { - if !types.Identical(U, terms[identical].Type().Underlying()) { - break - } - } - - if identical == len(terms) { - // https://go.dev/ref/spec#Core_types - // "There is a single type U which is the underlying type of all types in the type set of T" - return U - } - ch, ok := U.(*types.Chan) - if !ok { - return nil // no core type as identical < len(terms) and U is not a channel. - } - // https://go.dev/ref/spec#Core_types - // "the type chan E if T contains only bidirectional channels, or the type chan<- E or - // <-chan E depending on the direction of the directional channels present." - for chans := identical; chans < len(terms); chans++ { - curr, ok := terms[chans].Type().Underlying().(*types.Chan) - if !ok { - return nil - } - if !types.Identical(ch.Elem(), curr.Elem()) { - return nil // channel elements are not identical. - } - if ch.Dir() == types.SendRecv { - // ch is bidirectional. We can safely always use curr's direction. - ch = curr - } else if curr.Dir() != types.SendRecv && ch.Dir() != curr.Dir() { - // ch and curr are not bidirectional and not the same direction. - return nil - } - } - return ch -} - -// _NormalTerms returns a slice of terms representing the normalized structural -// type restrictions of a type, if any. -// -// For all types other than *types.TypeParam, *types.Interface, and -// *types.Union, this is just a single term with Tilde() == false and -// Type() == typ. For *types.TypeParam, *types.Interface, and *types.Union, see -// below. -// -// Structural type restrictions of a type parameter are created via -// non-interface types embedded in its constraint interface (directly, or via a -// chain of interface embeddings). For example, in the declaration type -// T[P interface{~int; m()}] int the structural restriction of the type -// parameter P is ~int. -// -// With interface embedding and unions, the specification of structural type -// restrictions may be arbitrarily complex. For example, consider the -// following: -// -// type A interface{ ~string|~[]byte } -// -// type B interface{ int|string } -// -// type C interface { ~string|~int } -// -// type T[P interface{ A|B; C }] int -// -// In this example, the structural type restriction of P is ~string|int: A|B -// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int, -// which when intersected with C (~string|~int) yields ~string|int. -// -// _NormalTerms computes these expansions and reductions, producing a -// "normalized" form of the embeddings. A structural restriction is normalized -// if it is a single union containing no interface terms, and is minimal in the -// sense that removing any term changes the set of types satisfying the -// constraint. It is left as a proof for the reader that, modulo sorting, there -// is exactly one such normalized form. -// -// Because the minimal representation always takes this form, _NormalTerms -// returns a slice of tilde terms corresponding to the terms of the union in -// the normalized structural restriction. An error is returned if the type is -// invalid, exceeds complexity bounds, or has an empty type set. In the latter -// case, _NormalTerms returns ErrEmptyTypeSet. -// -// _NormalTerms makes no guarantees about the order of terms, except that it -// is deterministic. -func _NormalTerms(typ types.Type) ([]*types.Term, error) { - switch typ := typ.(type) { - case *types.TypeParam: - return StructuralTerms(typ) - case *types.Union: - return UnionTermSet(typ) - case *types.Interface: - return InterfaceTermSet(typ) - default: - return []*types.Term{types.NewTerm(false, typ)}, nil - } -} diff --git a/vendor/golang.org/x/tools/internal/typeparams/normalize.go b/vendor/golang.org/x/tools/internal/typeparams/normalize.go deleted file mode 100644 index 93c80fdc96ce..000000000000 --- a/vendor/golang.org/x/tools/internal/typeparams/normalize.go +++ /dev/null @@ -1,218 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package typeparams - -import ( - "errors" - "fmt" - "go/types" - "os" - "strings" -) - -//go:generate go run copytermlist.go - -const debug = false - -var ErrEmptyTypeSet = errors.New("empty type set") - -// StructuralTerms returns a slice of terms representing the normalized -// structural type restrictions of a type parameter, if any. -// -// Structural type restrictions of a type parameter are created via -// non-interface types embedded in its constraint interface (directly, or via a -// chain of interface embeddings). For example, in the declaration -// -// type T[P interface{~int; m()}] int -// -// the structural restriction of the type parameter P is ~int. -// -// With interface embedding and unions, the specification of structural type -// restrictions may be arbitrarily complex. For example, consider the -// following: -// -// type A interface{ ~string|~[]byte } -// -// type B interface{ int|string } -// -// type C interface { ~string|~int } -// -// type T[P interface{ A|B; C }] int -// -// In this example, the structural type restriction of P is ~string|int: A|B -// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int, -// which when intersected with C (~string|~int) yields ~string|int. -// -// StructuralTerms computes these expansions and reductions, producing a -// "normalized" form of the embeddings. A structural restriction is normalized -// if it is a single union containing no interface terms, and is minimal in the -// sense that removing any term changes the set of types satisfying the -// constraint. It is left as a proof for the reader that, modulo sorting, there -// is exactly one such normalized form. -// -// Because the minimal representation always takes this form, StructuralTerms -// returns a slice of tilde terms corresponding to the terms of the union in -// the normalized structural restriction. An error is returned if the -// constraint interface is invalid, exceeds complexity bounds, or has an empty -// type set. In the latter case, StructuralTerms returns ErrEmptyTypeSet. -// -// StructuralTerms makes no guarantees about the order of terms, except that it -// is deterministic. -func StructuralTerms(tparam *types.TypeParam) ([]*types.Term, error) { - constraint := tparam.Constraint() - if constraint == nil { - return nil, fmt.Errorf("%s has nil constraint", tparam) - } - iface, _ := constraint.Underlying().(*types.Interface) - if iface == nil { - return nil, fmt.Errorf("constraint is %T, not *types.Interface", constraint.Underlying()) - } - return InterfaceTermSet(iface) -} - -// InterfaceTermSet computes the normalized terms for a constraint interface, -// returning an error if the term set cannot be computed or is empty. In the -// latter case, the error will be ErrEmptyTypeSet. -// -// See the documentation of StructuralTerms for more information on -// normalization. -func InterfaceTermSet(iface *types.Interface) ([]*types.Term, error) { - return computeTermSet(iface) -} - -// UnionTermSet computes the normalized terms for a union, returning an error -// if the term set cannot be computed or is empty. In the latter case, the -// error will be ErrEmptyTypeSet. -// -// See the documentation of StructuralTerms for more information on -// normalization. -func UnionTermSet(union *types.Union) ([]*types.Term, error) { - return computeTermSet(union) -} - -func computeTermSet(typ types.Type) ([]*types.Term, error) { - tset, err := computeTermSetInternal(typ, make(map[types.Type]*termSet), 0) - if err != nil { - return nil, err - } - if tset.terms.isEmpty() { - return nil, ErrEmptyTypeSet - } - if tset.terms.isAll() { - return nil, nil - } - var terms []*types.Term - for _, term := range tset.terms { - terms = append(terms, types.NewTerm(term.tilde, term.typ)) - } - return terms, nil -} - -// A termSet holds the normalized set of terms for a given type. -// -// The name termSet is intentionally distinct from 'type set': a type set is -// all types that implement a type (and includes method restrictions), whereas -// a term set just represents the structural restrictions on a type. -type termSet struct { - complete bool - terms termlist -} - -func indentf(depth int, format string, args ...interface{}) { - fmt.Fprintf(os.Stderr, strings.Repeat(".", depth)+format+"\n", args...) -} - -func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth int) (res *termSet, err error) { - if t == nil { - panic("nil type") - } - - if debug { - indentf(depth, "%s", t.String()) - defer func() { - if err != nil { - indentf(depth, "=> %s", err) - } else { - indentf(depth, "=> %s", res.terms.String()) - } - }() - } - - const maxTermCount = 100 - if tset, ok := seen[t]; ok { - if !tset.complete { - return nil, fmt.Errorf("cycle detected in the declaration of %s", t) - } - return tset, nil - } - - // Mark the current type as seen to avoid infinite recursion. - tset := new(termSet) - defer func() { - tset.complete = true - }() - seen[t] = tset - - switch u := t.Underlying().(type) { - case *types.Interface: - // The term set of an interface is the intersection of the term sets of its - // embedded types. - tset.terms = allTermlist - for i := 0; i < u.NumEmbeddeds(); i++ { - embedded := u.EmbeddedType(i) - if _, ok := embedded.Underlying().(*types.TypeParam); ok { - return nil, fmt.Errorf("invalid embedded type %T", embedded) - } - tset2, err := computeTermSetInternal(embedded, seen, depth+1) - if err != nil { - return nil, err - } - tset.terms = tset.terms.intersect(tset2.terms) - } - case *types.Union: - // The term set of a union is the union of term sets of its terms. - tset.terms = nil - for i := 0; i < u.Len(); i++ { - t := u.Term(i) - var terms termlist - switch t.Type().Underlying().(type) { - case *types.Interface: - tset2, err := computeTermSetInternal(t.Type(), seen, depth+1) - if err != nil { - return nil, err - } - terms = tset2.terms - case *types.TypeParam, *types.Union: - // A stand-alone type parameter or union is not permitted as union - // term. - return nil, fmt.Errorf("invalid union term %T", t) - default: - if t.Type() == types.Typ[types.Invalid] { - continue - } - terms = termlist{{t.Tilde(), t.Type()}} - } - tset.terms = tset.terms.union(terms) - if len(tset.terms) > maxTermCount { - return nil, fmt.Errorf("exceeded max term count %d", maxTermCount) - } - } - case *types.TypeParam: - panic("unreachable") - default: - // For all other types, the term set is just a single non-tilde term - // holding the type itself. - if u != types.Typ[types.Invalid] { - tset.terms = termlist{{false, t}} - } - } - return tset, nil -} - -// under is a facade for the go/types internal function of the same name. It is -// used by typeterm.go. -func under(t types.Type) types.Type { - return t.Underlying() -} diff --git a/vendor/golang.org/x/tools/internal/typeparams/termlist.go b/vendor/golang.org/x/tools/internal/typeparams/termlist.go deleted file mode 100644 index cbd12f801314..000000000000 --- a/vendor/golang.org/x/tools/internal/typeparams/termlist.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by copytermlist.go DO NOT EDIT. - -package typeparams - -import ( - "bytes" - "go/types" -) - -// A termlist represents the type set represented by the union -// t1 āˆŖ y2 āˆŖ ... tn of the type sets of the terms t1 to tn. -// A termlist is in normal form if all terms are disjoint. -// termlist operations don't require the operands to be in -// normal form. -type termlist []*term - -// allTermlist represents the set of all types. -// It is in normal form. -var allTermlist = termlist{new(term)} - -// String prints the termlist exactly (without normalization). -func (xl termlist) String() string { - if len(xl) == 0 { - return "āˆ…" - } - var buf bytes.Buffer - for i, x := range xl { - if i > 0 { - buf.WriteString(" | ") - } - buf.WriteString(x.String()) - } - return buf.String() -} - -// isEmpty reports whether the termlist xl represents the empty set of types. -func (xl termlist) isEmpty() bool { - // If there's a non-nil term, the entire list is not empty. - // If the termlist is in normal form, this requires at most - // one iteration. - for _, x := range xl { - if x != nil { - return false - } - } - return true -} - -// isAll reports whether the termlist xl represents the set of all types. -func (xl termlist) isAll() bool { - // If there's a š“¤ term, the entire list is š“¤. - // If the termlist is in normal form, this requires at most - // one iteration. - for _, x := range xl { - if x != nil && x.typ == nil { - return true - } - } - return false -} - -// norm returns the normal form of xl. -func (xl termlist) norm() termlist { - // Quadratic algorithm, but good enough for now. - // TODO(gri) fix asymptotic performance - used := make([]bool, len(xl)) - var rl termlist - for i, xi := range xl { - if xi == nil || used[i] { - continue - } - for j := i + 1; j < len(xl); j++ { - xj := xl[j] - if xj == nil || used[j] { - continue - } - if u1, u2 := xi.union(xj); u2 == nil { - // If we encounter a š“¤ term, the entire list is š“¤. - // Exit early. - // (Note that this is not just an optimization; - // if we continue, we may end up with a š“¤ term - // and other terms and the result would not be - // in normal form.) - if u1.typ == nil { - return allTermlist - } - xi = u1 - used[j] = true // xj is now unioned into xi - ignore it in future iterations - } - } - rl = append(rl, xi) - } - return rl -} - -// union returns the union xl āˆŖ yl. -func (xl termlist) union(yl termlist) termlist { - return append(xl, yl...).norm() -} - -// intersect returns the intersection xl āˆ© yl. -func (xl termlist) intersect(yl termlist) termlist { - if xl.isEmpty() || yl.isEmpty() { - return nil - } - - // Quadratic algorithm, but good enough for now. - // TODO(gri) fix asymptotic performance - var rl termlist - for _, x := range xl { - for _, y := range yl { - if r := x.intersect(y); r != nil { - rl = append(rl, r) - } - } - } - return rl.norm() -} - -// equal reports whether xl and yl represent the same type set. -func (xl termlist) equal(yl termlist) bool { - // TODO(gri) this should be more efficient - return xl.subsetOf(yl) && yl.subsetOf(xl) -} - -// includes reports whether t āˆˆ xl. -func (xl termlist) includes(t types.Type) bool { - for _, x := range xl { - if x.includes(t) { - return true - } - } - return false -} - -// supersetOf reports whether y āŠ† xl. -func (xl termlist) supersetOf(y *term) bool { - for _, x := range xl { - if y.subsetOf(x) { - return true - } - } - return false -} - -// subsetOf reports whether xl āŠ† yl. -func (xl termlist) subsetOf(yl termlist) bool { - if yl.isEmpty() { - return xl.isEmpty() - } - - // each term x of xl must be a subset of yl - for _, x := range xl { - if !yl.supersetOf(x) { - return false // x is not a subset yl - } - } - return true -} diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeterm.go b/vendor/golang.org/x/tools/internal/typeparams/typeterm.go deleted file mode 100644 index 7350bb702a17..000000000000 --- a/vendor/golang.org/x/tools/internal/typeparams/typeterm.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Code generated by copytermlist.go DO NOT EDIT. - -package typeparams - -import "go/types" - -// A term describes elementary type sets: -// -// āˆ…: (*term)(nil) == āˆ… // set of no types (empty set) -// š“¤: &term{} == š“¤ // set of all types (š“¤niverse) -// T: &term{false, T} == {T} // set of type T -// ~t: &term{true, t} == {t' | under(t') == t} // set of types with underlying type t -type term struct { - tilde bool // valid if typ != nil - typ types.Type -} - -func (x *term) String() string { - switch { - case x == nil: - return "āˆ…" - case x.typ == nil: - return "š“¤" - case x.tilde: - return "~" + x.typ.String() - default: - return x.typ.String() - } -} - -// equal reports whether x and y represent the same type set. -func (x *term) equal(y *term) bool { - // easy cases - switch { - case x == nil || y == nil: - return x == y - case x.typ == nil || y.typ == nil: - return x.typ == y.typ - } - // āˆ… āŠ‚ x, y āŠ‚ š“¤ - - return x.tilde == y.tilde && types.Identical(x.typ, y.typ) -} - -// union returns the union x āˆŖ y: zero, one, or two non-nil terms. -func (x *term) union(y *term) (_, _ *term) { - // easy cases - switch { - case x == nil && y == nil: - return nil, nil // āˆ… āˆŖ āˆ… == āˆ… - case x == nil: - return y, nil // āˆ… āˆŖ y == y - case y == nil: - return x, nil // x āˆŖ āˆ… == x - case x.typ == nil: - return x, nil // š“¤ āˆŖ y == š“¤ - case y.typ == nil: - return y, nil // x āˆŖ š“¤ == š“¤ - } - // āˆ… āŠ‚ x, y āŠ‚ š“¤ - - if x.disjoint(y) { - return x, y // x āˆŖ y == (x, y) if x āˆ© y == āˆ… - } - // x.typ == y.typ - - // ~t āˆŖ ~t == ~t - // ~t āˆŖ T == ~t - // T āˆŖ ~t == ~t - // T āˆŖ T == T - if x.tilde || !y.tilde { - return x, nil - } - return y, nil -} - -// intersect returns the intersection x āˆ© y. -func (x *term) intersect(y *term) *term { - // easy cases - switch { - case x == nil || y == nil: - return nil // āˆ… āˆ© y == āˆ… and āˆ© āˆ… == āˆ… - case x.typ == nil: - return y // š“¤ āˆ© y == y - case y.typ == nil: - return x // x āˆ© š“¤ == x - } - // āˆ… āŠ‚ x, y āŠ‚ š“¤ - - if x.disjoint(y) { - return nil // x āˆ© y == āˆ… if x āˆ© y == āˆ… - } - // x.typ == y.typ - - // ~t āˆ© ~t == ~t - // ~t āˆ© T == T - // T āˆ© ~t == T - // T āˆ© T == T - if !x.tilde || y.tilde { - return x - } - return y -} - -// includes reports whether t āˆˆ x. -func (x *term) includes(t types.Type) bool { - // easy cases - switch { - case x == nil: - return false // t āˆˆ āˆ… == false - case x.typ == nil: - return true // t āˆˆ š“¤ == true - } - // āˆ… āŠ‚ x āŠ‚ š“¤ - - u := t - if x.tilde { - u = under(u) - } - return types.Identical(x.typ, u) -} - -// subsetOf reports whether x āŠ† y. -func (x *term) subsetOf(y *term) bool { - // easy cases - switch { - case x == nil: - return true // āˆ… āŠ† y == true - case y == nil: - return false // x āŠ† āˆ… == false since x != āˆ… - case y.typ == nil: - return true // x āŠ† š“¤ == true - case x.typ == nil: - return false // š“¤ āŠ† y == false since y != š“¤ - } - // āˆ… āŠ‚ x, y āŠ‚ š“¤ - - if x.disjoint(y) { - return false // x āŠ† y == false if x āˆ© y == āˆ… - } - // x.typ == y.typ - - // ~t āŠ† ~t == true - // ~t āŠ† T == false - // T āŠ† ~t == true - // T āŠ† T == true - return !x.tilde || y.tilde -} - -// disjoint reports whether x āˆ© y == āˆ…. -// x.typ and y.typ must not be nil. -func (x *term) disjoint(y *term) bool { - if debug && (x.typ == nil || y.typ == nil) { - panic("invalid argument(s)") - } - ux := x.typ - if y.tilde { - ux = under(ux) - } - uy := y.typ - if x.tilde { - uy = under(uy) - } - return !types.Identical(ux, uy) -} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go index 07484073a57d..e0c27ed251c9 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go @@ -167,7 +167,7 @@ const ( UntypedNilUse // WrongAssignCount occurs when the number of values on the right-hand side - // of an assignment or or initialization expression does not match the number + // of an assignment or initialization expression does not match the number // of variables on the left-hand side. // // Example: diff --git a/vendor/golang.org/x/tools/internal/typesinternal/recv.go b/vendor/golang.org/x/tools/internal/typesinternal/recv.go new file mode 100644 index 000000000000..fea7c8b75e8e --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typesinternal/recv.go @@ -0,0 +1,43 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typesinternal + +import ( + "go/types" + + "golang.org/x/tools/internal/aliases" +) + +// ReceiverNamed returns the named type (if any) associated with the +// type of recv, which may be of the form N or *N, or aliases thereof. +// It also reports whether a Pointer was present. +func ReceiverNamed(recv *types.Var) (isPtr bool, named *types.Named) { + t := recv.Type() + if ptr, ok := aliases.Unalias(t).(*types.Pointer); ok { + isPtr = true + t = ptr.Elem() + } + named, _ = aliases.Unalias(t).(*types.Named) + return +} + +// Unpointer returns T given *T or an alias thereof. +// For all other types it is the identity function. +// It does not look at underlying types. +// The result may be an alias. +// +// Use this function to strip off the optional pointer on a receiver +// in a field or method selection, without losing the named type +// (which is needed to compute the method set). +// +// See also [typeparams.MustDeref], which removes one level of +// indirection from the type, regardless of named types (analogous to +// a LOAD instruction). +func Unpointer(t types.Type) types.Type { + if ptr, ok := aliases.Unalias(t).(*types.Pointer); ok { + return ptr.Elem() + } + return t +} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/toonew.go b/vendor/golang.org/x/tools/internal/typesinternal/toonew.go new file mode 100644 index 000000000000..cc86487eaa0a --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typesinternal/toonew.go @@ -0,0 +1,89 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typesinternal + +import ( + "go/types" + + "golang.org/x/tools/internal/stdlib" + "golang.org/x/tools/internal/versions" +) + +// TooNewStdSymbols computes the set of package-level symbols +// exported by pkg that are not available at the specified version. +// The result maps each symbol to its minimum version. +// +// The pkg is allowed to contain type errors. +func TooNewStdSymbols(pkg *types.Package, version string) map[types.Object]string { + disallowed := make(map[types.Object]string) + + // Pass 1: package-level symbols. + symbols := stdlib.PackageSymbols[pkg.Path()] + for _, sym := range symbols { + symver := sym.Version.String() + if versions.Before(version, symver) { + switch sym.Kind { + case stdlib.Func, stdlib.Var, stdlib.Const, stdlib.Type: + disallowed[pkg.Scope().Lookup(sym.Name)] = symver + } + } + } + + // Pass 2: fields and methods. + // + // We allow fields and methods if their associated type is + // disallowed, as otherwise we would report false positives + // for compatibility shims. Consider: + // + // //go:build go1.22 + // type T struct { F std.Real } // correct new API + // + // //go:build !go1.22 + // type T struct { F fake } // shim + // type fake struct { ... } + // func (fake) M () {} + // + // These alternative declarations of T use either the std.Real + // type, introduced in go1.22, or a fake type, for the field + // F. (The fakery could be arbitrarily deep, involving more + // nested fields and methods than are shown here.) Clients + // that use the compatibility shim T will compile with any + // version of go, whether older or newer than go1.22, but only + // the newer version will use the std.Real implementation. + // + // Now consider a reference to method M in new(T).F.M() in a + // module that requires a minimum of go1.21. The analysis may + // occur using a version of Go higher than 1.21, selecting the + // first version of T, so the method M is Real.M. This would + // spuriously cause the analyzer to report a reference to a + // too-new symbol even though this expression compiles just + // fine (with the fake implementation) using go1.21. + for _, sym := range symbols { + symVersion := sym.Version.String() + if !versions.Before(version, symVersion) { + continue // allowed + } + + var obj types.Object + switch sym.Kind { + case stdlib.Field: + typename, name := sym.SplitField() + if t := pkg.Scope().Lookup(typename); t != nil && disallowed[t] == "" { + obj, _, _ = types.LookupFieldOrMethod(t.Type(), false, pkg, name) + } + + case stdlib.Method: + ptr, recvname, name := sym.SplitMethod() + if t := pkg.Scope().Lookup(recvname); t != nil && disallowed[t] == "" { + obj, _, _ = types.LookupFieldOrMethod(t.Type(), ptr, pkg, name) + } + } + if obj != nil { + disallowed[obj] = symVersion + } + } + + return disallowed +} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types.go b/vendor/golang.org/x/tools/internal/typesinternal/types.go index ce7d4351b220..7c77c2fbc038 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/types.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/types.go @@ -48,5 +48,3 @@ func ReadGo116ErrorData(err types.Error) (code ErrorCode, start, end token.Pos, } return ErrorCode(data[0]), token.Pos(data[1]), token.Pos(data[2]), true } - -var SetGoVersion = func(conf *types.Config, version string) bool { return false } diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types_118.go b/vendor/golang.org/x/tools/internal/typesinternal/types_118.go deleted file mode 100644 index a42b072a67d3..000000000000 --- a/vendor/golang.org/x/tools/internal/typesinternal/types_118.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.18 -// +build go1.18 - -package typesinternal - -import ( - "go/types" -) - -func init() { - SetGoVersion = func(conf *types.Config, version string) bool { - conf.GoVersion = version - return true - } -} diff --git a/vendor/golang.org/x/tools/internal/versions/features.go b/vendor/golang.org/x/tools/internal/versions/features.go new file mode 100644 index 000000000000..b53f17861613 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/features.go @@ -0,0 +1,43 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package versions + +// This file contains predicates for working with file versions to +// decide when a tool should consider a language feature enabled. + +// GoVersions that features in x/tools can be gated to. +const ( + Go1_18 = "go1.18" + Go1_19 = "go1.19" + Go1_20 = "go1.20" + Go1_21 = "go1.21" + Go1_22 = "go1.22" +) + +// Future is an invalid unknown Go version sometime in the future. +// Do not use directly with Compare. +const Future = "" + +// AtLeast reports whether the file version v comes after a Go release. +// +// Use this predicate to enable a behavior once a certain Go release +// has happened (and stays enabled in the future). +func AtLeast(v, release string) bool { + if v == Future { + return true // an unknown future version is always after y. + } + return Compare(Lang(v), Lang(release)) >= 0 +} + +// Before reports whether the file version v is strictly before a Go release. +// +// Use this predicate to disable a behavior once a certain Go release +// has happened (and stays enabled in the future). +func Before(v, release string) bool { + if v == Future { + return false // an unknown future version happens after y. + } + return Compare(Lang(v), Lang(release)) < 0 +} diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain.go b/vendor/golang.org/x/tools/internal/versions/toolchain.go new file mode 100644 index 000000000000..377bf7a53b4a --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/toolchain.go @@ -0,0 +1,14 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package versions + +// toolchain is maximum version (<1.22) that the go toolchain used +// to build the current tool is known to support. +// +// When a tool is built with >=1.22, the value of toolchain is unused. +// +// x/tools does not support building with go <1.18. So we take this +// as the minimum possible maximum. +var toolchain string = Go1_18 diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go b/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go new file mode 100644 index 000000000000..f65beed9d832 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go @@ -0,0 +1,14 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.19 +// +build go1.19 + +package versions + +func init() { + if Compare(toolchain, Go1_19) < 0 { + toolchain = Go1_19 + } +} diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go b/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go new file mode 100644 index 000000000000..1a9efa126cdf --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go @@ -0,0 +1,14 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.20 +// +build go1.20 + +package versions + +func init() { + if Compare(toolchain, Go1_20) < 0 { + toolchain = Go1_20 + } +} diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go b/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go new file mode 100644 index 000000000000..b7ef216dfecf --- /dev/null +++ b/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go @@ -0,0 +1,14 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.21 +// +build go1.21 + +package versions + +func init() { + if Compare(toolchain, Go1_21) < 0 { + toolchain = Go1_21 + } +} diff --git a/vendor/golang.org/x/tools/internal/versions/types_go121.go b/vendor/golang.org/x/tools/internal/versions/types_go121.go index a7b79207aeeb..b4345d3349e5 100644 --- a/vendor/golang.org/x/tools/internal/versions/types_go121.go +++ b/vendor/golang.org/x/tools/internal/versions/types_go121.go @@ -12,9 +12,19 @@ import ( "go/types" ) -// FileVersions always reports the a file's Go version as the -// zero version at this Go version. -func FileVersions(info *types.Info, file *ast.File) string { return "" } +// FileVersion returns a language version (<=1.21) derived from runtime.Version() +// or an unknown future version. +func FileVersion(info *types.Info, file *ast.File) string { + // In x/tools built with Go <= 1.21, we do not have Info.FileVersions + // available. We use a go version derived from the toolchain used to + // compile the tool by default. + // This will be <= go1.21. We take this as the maximum version that + // this tool can support. + // + // There are no features currently in x/tools that need to tell fine grained + // differences for versions <1.22. + return toolchain +} -// InitFileVersions is a noop at this Go version. +// InitFileVersions is a noop when compiled with this Go version. func InitFileVersions(*types.Info) {} diff --git a/vendor/golang.org/x/tools/internal/versions/types_go122.go b/vendor/golang.org/x/tools/internal/versions/types_go122.go index 7b9ba89a8220..e8180632a526 100644 --- a/vendor/golang.org/x/tools/internal/versions/types_go122.go +++ b/vendor/golang.org/x/tools/internal/versions/types_go122.go @@ -12,10 +12,27 @@ import ( "go/types" ) -// FileVersions maps a file to the file's semantic Go version. -// The reported version is the zero version if a version cannot be determined. -func FileVersions(info *types.Info, file *ast.File) string { - return info.FileVersions[file] +// FileVersions returns a file's Go version. +// The reported version is an unknown Future version if a +// version cannot be determined. +func FileVersion(info *types.Info, file *ast.File) string { + // In tools built with Go >= 1.22, the Go version of a file + // follow a cascades of sources: + // 1) types.Info.FileVersion, which follows the cascade: + // 1.a) file version (ast.File.GoVersion), + // 1.b) the package version (types.Config.GoVersion), or + // 2) is some unknown Future version. + // + // File versions require a valid package version to be provided to types + // in Config.GoVersion. Config.GoVersion is either from the package's module + // or the toolchain (go run). This value should be provided by go/packages + // or unitchecker.Config.GoVersion. + if v := info.FileVersions[file]; IsValid(v) { + return v + } + // Note: we could instead return runtime.Version() [if valid]. + // This would act as a max version on what a tool can support. + return Future } // InitFileVersions initializes info to record Go versions for Go files. diff --git a/vendor/golang.org/x/tools/internal/versions/versions.go b/vendor/golang.org/x/tools/internal/versions/versions.go index e16f6c33a523..8d1f7453dbfc 100644 --- a/vendor/golang.org/x/tools/internal/versions/versions.go +++ b/vendor/golang.org/x/tools/internal/versions/versions.go @@ -4,6 +4,10 @@ package versions +import ( + "strings" +) + // Note: If we use build tags to use go/versions when go >=1.22, // we run into go.dev/issue/53737. Under some operations users would see an // import of "go/versions" even if they would not compile the file. @@ -45,6 +49,7 @@ func IsValid(x string) bool { return isValid(stripGo(x)) } // stripGo converts from a "go1.21" version to a "1.21" version. // If v does not start with "go", stripGo returns the empty string (a known invalid version). func stripGo(v string) string { + v, _, _ = strings.Cut(v, "-") // strip -bigcorp suffix. if len(v) < 2 || v[:2] != "go" { return "" } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/yaml/meta.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/yaml/meta.go new file mode 100644 index 000000000000..407a7419a690 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/yaml/meta.go @@ -0,0 +1,50 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package yaml + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/yaml" +) + +// DefaultMetaFactory is a default factory for versioning objects in JSON or +// YAML. The object in memory and in the default serialization will use the +// "kind" and "apiVersion" fields. +var DefaultMetaFactory = SimpleMetaFactory{} + +// SimpleMetaFactory provides default methods for retrieving the type and version of objects +// that are identified with an "apiVersion" and "kind" fields in their JSON +// serialization. It may be parameterized with the names of the fields in memory, or an +// optional list of base structs to search for those fields in memory. +type SimpleMetaFactory struct{} + +// Interpret will return the APIVersion and Kind of the JSON wire-format +// encoding of an object, or an error. +func (SimpleMetaFactory) Interpret(data []byte) (*schema.GroupVersionKind, error) { + gvk := runtime.TypeMeta{} + if err := yaml.Unmarshal(data, &gvk); err != nil { + return nil, fmt.Errorf("could not interpret GroupVersionKind; unmarshal error: %v", err) + } + gv, err := schema.ParseGroupVersion(gvk.APIVersion) + if err != nil { + return nil, err + } + return &schema.GroupVersionKind{Group: gv.Group, Version: gv.Version, Kind: gvk.Kind}, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/yaml/yaml.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/yaml/yaml.go new file mode 100644 index 000000000000..2fdd1d43d52f --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/yaml/yaml.go @@ -0,0 +1,46 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package yaml + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/yaml" +) + +// yamlSerializer converts YAML passed to the Decoder methods to JSON. +type yamlSerializer struct { + // the nested serializer + runtime.Serializer +} + +// yamlSerializer implements Serializer +var _ runtime.Serializer = yamlSerializer{} + +// NewDecodingSerializer adds YAML decoding support to a serializer that supports JSON. +func NewDecodingSerializer(jsonSerializer runtime.Serializer) runtime.Serializer { + return &yamlSerializer{jsonSerializer} +} + +func (c yamlSerializer) Decode(data []byte, gvk *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { + out, err := yaml.ToJSON(data) + if err != nil { + return nil, nil, err + } + data = out + return c.Serializer.Decode(data, gvk, into) +} diff --git a/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/fake/clientset_generated.go b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/fake/clientset_generated.go new file mode 100644 index 000000000000..e48af65d3fbb --- /dev/null +++ b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/fake/clientset_generated.go @@ -0,0 +1,92 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/discovery" + fakediscovery "k8s.io/client-go/discovery/fake" + "k8s.io/client-go/testing" + clientset "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset" + apiregistrationv1 "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1" + fakeapiregistrationv1 "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/fake" + apiregistrationv1beta1 "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1" + fakeapiregistrationv1beta1 "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1/fake" +) + +// NewSimpleClientset returns a clientset that will respond with the provided objects. +// It's backed by a very simple object tracker that processes creates, updates and deletions as-is, +// without applying any validations and/or defaults. It shouldn't be considered a replacement +// for a real clientset and is mostly useful in simple unit tests. +func NewSimpleClientset(objects ...runtime.Object) *Clientset { + o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) + for _, obj := range objects { + if err := o.Add(obj); err != nil { + panic(err) + } + } + + cs := &Clientset{tracker: o} + cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} + cs.AddReactor("*", "*", testing.ObjectReaction(o)) + cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { + gvr := action.GetResource() + ns := action.GetNamespace() + watch, err := o.Watch(gvr, ns) + if err != nil { + return false, nil, err + } + return true, watch, nil + }) + + return cs +} + +// Clientset implements clientset.Interface. Meant to be embedded into a +// struct to get a default implementation. This makes faking out just the method +// you want to test easier. +type Clientset struct { + testing.Fake + discovery *fakediscovery.FakeDiscovery + tracker testing.ObjectTracker +} + +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + return c.discovery +} + +func (c *Clientset) Tracker() testing.ObjectTracker { + return c.tracker +} + +var ( + _ clientset.Interface = &Clientset{} + _ testing.FakeClient = &Clientset{} +) + +// ApiregistrationV1 retrieves the ApiregistrationV1Client +func (c *Clientset) ApiregistrationV1() apiregistrationv1.ApiregistrationV1Interface { + return &fakeapiregistrationv1.FakeApiregistrationV1{Fake: &c.Fake} +} + +// ApiregistrationV1beta1 retrieves the ApiregistrationV1beta1Client +func (c *Clientset) ApiregistrationV1beta1() apiregistrationv1beta1.ApiregistrationV1beta1Interface { + return &fakeapiregistrationv1beta1.FakeApiregistrationV1beta1{Fake: &c.Fake} +} diff --git a/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/fake/doc.go b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/fake/doc.go new file mode 100644 index 000000000000..9b99e7167091 --- /dev/null +++ b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated fake clientset. +package fake diff --git a/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/fake/register.go b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/fake/register.go new file mode 100644 index 000000000000..880c67a3979a --- /dev/null +++ b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/fake/register.go @@ -0,0 +1,58 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" + apiregistrationv1beta1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1" +) + +var scheme = runtime.NewScheme() +var codecs = serializer.NewCodecFactory(scheme) + +var localSchemeBuilder = runtime.SchemeBuilder{ + apiregistrationv1.AddToScheme, + apiregistrationv1beta1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(scheme)) +} diff --git a/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/fake/doc.go b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/fake/doc.go new file mode 100644 index 000000000000..16f44399065e --- /dev/null +++ b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/fake/fake_apiregistration_client.go b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/fake/fake_apiregistration_client.go new file mode 100644 index 000000000000..3e563e6f07dd --- /dev/null +++ b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/fake/fake_apiregistration_client.go @@ -0,0 +1,40 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" + v1 "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1" +) + +type FakeApiregistrationV1 struct { + *testing.Fake +} + +func (c *FakeApiregistrationV1) APIServices() v1.APIServiceInterface { + return &FakeAPIServices{c} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeApiregistrationV1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/fake/fake_apiservice.go b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/fake/fake_apiservice.go new file mode 100644 index 000000000000..88c87954a5fd --- /dev/null +++ b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/fake/fake_apiservice.go @@ -0,0 +1,132 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" + v1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" +) + +// FakeAPIServices implements APIServiceInterface +type FakeAPIServices struct { + Fake *FakeApiregistrationV1 +} + +var apiservicesResource = v1.SchemeGroupVersion.WithResource("apiservices") + +var apiservicesKind = v1.SchemeGroupVersion.WithKind("APIService") + +// Get takes name of the aPIService, and returns the corresponding aPIService object, and an error if there is any. +func (c *FakeAPIServices) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.APIService, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(apiservicesResource, name), &v1.APIService{}) + if obj == nil { + return nil, err + } + return obj.(*v1.APIService), err +} + +// List takes label and field selectors, and returns the list of APIServices that match those selectors. +func (c *FakeAPIServices) List(ctx context.Context, opts metav1.ListOptions) (result *v1.APIServiceList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(apiservicesResource, apiservicesKind, opts), &v1.APIServiceList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1.APIServiceList{ListMeta: obj.(*v1.APIServiceList).ListMeta} + for _, item := range obj.(*v1.APIServiceList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested aPIServices. +func (c *FakeAPIServices) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(apiservicesResource, opts)) +} + +// Create takes the representation of a aPIService and creates it. Returns the server's representation of the aPIService, and an error, if there is any. +func (c *FakeAPIServices) Create(ctx context.Context, aPIService *v1.APIService, opts metav1.CreateOptions) (result *v1.APIService, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(apiservicesResource, aPIService), &v1.APIService{}) + if obj == nil { + return nil, err + } + return obj.(*v1.APIService), err +} + +// Update takes the representation of a aPIService and updates it. Returns the server's representation of the aPIService, and an error, if there is any. +func (c *FakeAPIServices) Update(ctx context.Context, aPIService *v1.APIService, opts metav1.UpdateOptions) (result *v1.APIService, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(apiservicesResource, aPIService), &v1.APIService{}) + if obj == nil { + return nil, err + } + return obj.(*v1.APIService), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeAPIServices) UpdateStatus(ctx context.Context, aPIService *v1.APIService, opts metav1.UpdateOptions) (*v1.APIService, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(apiservicesResource, "status", aPIService), &v1.APIService{}) + if obj == nil { + return nil, err + } + return obj.(*v1.APIService), err +} + +// Delete takes name of the aPIService and deletes it. Returns an error if one occurs. +func (c *FakeAPIServices) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(apiservicesResource, name, opts), &v1.APIService{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeAPIServices) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(apiservicesResource, listOpts) + + _, err := c.Fake.Invokes(action, &v1.APIServiceList{}) + return err +} + +// Patch applies the patch and returns the patched aPIService. +func (c *FakeAPIServices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.APIService, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(apiservicesResource, name, pt, data, subresources...), &v1.APIService{}) + if obj == nil { + return nil, err + } + return obj.(*v1.APIService), err +} diff --git a/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1/fake/doc.go b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1/fake/doc.go new file mode 100644 index 000000000000..16f44399065e --- /dev/null +++ b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1/fake/fake_apiregistration_client.go b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1/fake/fake_apiregistration_client.go new file mode 100644 index 000000000000..9b32316af6ae --- /dev/null +++ b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1/fake/fake_apiregistration_client.go @@ -0,0 +1,40 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" + v1beta1 "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1" +) + +type FakeApiregistrationV1beta1 struct { + *testing.Fake +} + +func (c *FakeApiregistrationV1beta1) APIServices() v1beta1.APIServiceInterface { + return &FakeAPIServices{c} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeApiregistrationV1beta1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1/fake/fake_apiservice.go b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1/fake/fake_apiservice.go new file mode 100644 index 000000000000..4c5a1868a9bf --- /dev/null +++ b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1/fake/fake_apiservice.go @@ -0,0 +1,132 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" + v1beta1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1" +) + +// FakeAPIServices implements APIServiceInterface +type FakeAPIServices struct { + Fake *FakeApiregistrationV1beta1 +} + +var apiservicesResource = v1beta1.SchemeGroupVersion.WithResource("apiservices") + +var apiservicesKind = v1beta1.SchemeGroupVersion.WithKind("APIService") + +// Get takes name of the aPIService, and returns the corresponding aPIService object, and an error if there is any. +func (c *FakeAPIServices) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.APIService, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(apiservicesResource, name), &v1beta1.APIService{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.APIService), err +} + +// List takes label and field selectors, and returns the list of APIServices that match those selectors. +func (c *FakeAPIServices) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.APIServiceList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(apiservicesResource, apiservicesKind, opts), &v1beta1.APIServiceList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1beta1.APIServiceList{ListMeta: obj.(*v1beta1.APIServiceList).ListMeta} + for _, item := range obj.(*v1beta1.APIServiceList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested aPIServices. +func (c *FakeAPIServices) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(apiservicesResource, opts)) +} + +// Create takes the representation of a aPIService and creates it. Returns the server's representation of the aPIService, and an error, if there is any. +func (c *FakeAPIServices) Create(ctx context.Context, aPIService *v1beta1.APIService, opts v1.CreateOptions) (result *v1beta1.APIService, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(apiservicesResource, aPIService), &v1beta1.APIService{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.APIService), err +} + +// Update takes the representation of a aPIService and updates it. Returns the server's representation of the aPIService, and an error, if there is any. +func (c *FakeAPIServices) Update(ctx context.Context, aPIService *v1beta1.APIService, opts v1.UpdateOptions) (result *v1beta1.APIService, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(apiservicesResource, aPIService), &v1beta1.APIService{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.APIService), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeAPIServices) UpdateStatus(ctx context.Context, aPIService *v1beta1.APIService, opts v1.UpdateOptions) (*v1beta1.APIService, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(apiservicesResource, "status", aPIService), &v1beta1.APIService{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.APIService), err +} + +// Delete takes name of the aPIService and deletes it. Returns an error if one occurs. +func (c *FakeAPIServices) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(apiservicesResource, name, opts), &v1beta1.APIService{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeAPIServices) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(apiservicesResource, listOpts) + + _, err := c.Fake.Invokes(action, &v1beta1.APIServiceList{}) + return err +} + +// Patch applies the patch and returns the patched aPIService. +func (c *FakeAPIServices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.APIService, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(apiservicesResource, name, pt, data, subresources...), &v1beta1.APIService{}) + if obj == nil { + return nil, err + } + return obj.(*v1beta1.APIService), err +} diff --git a/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go b/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go index 743f5b8b2e1a..1ffcf9094913 100644 --- a/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go +++ b/vendor/k8s.io/kube-openapi/pkg/generators/openapi.go @@ -704,7 +704,7 @@ func defaultFromComments(comments []string, commentPath string, t *types.Type) ( var i interface{} if id, ok := parseSymbolReference(tag, commentPath); ok { - klog.Errorf("%v, %v", id, commentPath) + klog.V(5).Infof("%v, %v", id, commentPath) return nil, &id, nil } else if err := json.Unmarshal([]byte(tag), &i); err != nil { return nil, nil, fmt.Errorf("failed to unmarshal default: %v", err) @@ -844,15 +844,9 @@ func (g openAPITypeWriter) generateDescription(CommentLines []string) { } } - postDoc := strings.TrimLeft(buffer.String(), "\n") - postDoc = strings.TrimRight(postDoc, "\n") - postDoc = strings.Replace(postDoc, "\\\"", "\"", -1) // replace user's \" to " - postDoc = strings.Replace(postDoc, "\"", "\\\"", -1) // Escape " - postDoc = strings.Replace(postDoc, "\n", "\\n", -1) - postDoc = strings.Replace(postDoc, "\t", "\\t", -1) - postDoc = strings.Trim(postDoc, " ") - if postDoc != "" { - g.Do("Description: \"$.$\",\n", postDoc) + postDoc := strings.TrimSpace(buffer.String()) + if len(postDoc) > 0 { + g.Do("Description: $.$,\n", fmt.Sprintf("%#v", postDoc)) } } diff --git a/vendor/k8s.io/kubectl/pkg/cmd/annotate/annotate.go b/vendor/k8s.io/kubectl/pkg/cmd/annotate/annotate.go new file mode 100644 index 000000000000..aeeb5cea372f --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/annotate/annotate.go @@ -0,0 +1,485 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package annotate + +import ( + "bytes" + "fmt" + "io" + + jsonpatch "github.com/evanphx/json-patch" + "github.com/spf13/cobra" + "k8s.io/klog/v2" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructuredscheme" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/json" + + "k8s.io/client-go/tools/clientcmd" + + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/cli-runtime/pkg/genericiooptions" + "k8s.io/cli-runtime/pkg/printers" + "k8s.io/cli-runtime/pkg/resource" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + "k8s.io/kubectl/pkg/polymorphichelpers" + "k8s.io/kubectl/pkg/scheme" + "k8s.io/kubectl/pkg/util/completion" + "k8s.io/kubectl/pkg/util/i18n" + "k8s.io/kubectl/pkg/util/templates" +) + +// AnnotateFlags directly reflect the information that CLI is gathering via flags. They will be converted to Options, which +// reflect the runtime requirements for the command. This structure reduces the transformation to wiring and makes +// the logic itself easy to unit test +type AnnotateFlags struct { + // Common user flags + All bool + AllNamespaces bool + DryRunStrategy cmdutil.DryRunStrategy + FieldManager string + FieldSelector string + resource.FilenameOptions + List bool + Local bool + OutputFormat string + overwrite bool + PrintFlags *genericclioptions.PrintFlags + RecordFlags *genericclioptions.RecordFlags + resourceVersion string + Selector string + + genericiooptions.IOStreams +} + +// NewAnnotateFlags returns a default AnnotateFlags +func NewAnnotateFlags(streams genericiooptions.IOStreams) *AnnotateFlags { + return &AnnotateFlags{ + PrintFlags: genericclioptions.NewPrintFlags("annotated").WithTypeSetter(scheme.Scheme), + RecordFlags: genericclioptions.NewRecordFlags(), + IOStreams: streams, + } +} + +// AnnotateOptions have the data required to perform the annotate operation +type AnnotateOptions struct { + all bool + allNamespaces bool + + builder *resource.Builder + dryRunStrategy cmdutil.DryRunStrategy + + enforceNamespace bool + fieldSelector string + fieldManager string + resource.FilenameOptions + + genericiooptions.IOStreams + + list bool + local bool + namespace string + newAnnotations map[string]string + overwrite bool + + PrintObj printers.ResourcePrinterFunc + + Recorder genericclioptions.Recorder + resources []string + resourceVersion string + removeAnnotations []string + selector string + + unstructuredClientForMapping func(mapping *meta.RESTMapping) (resource.RESTClient, error) +} + +var ( + annotateLong = templates.LongDesc(i18n.T(` + Update the annotations on one or more resources. + + All Kubernetes objects support the ability to store additional data with the object as + annotations. Annotations are key/value pairs that can be larger than labels and include + arbitrary string values such as structured JSON. Tools and system extensions may use + annotations to store their own data. + + Attempting to set an annotation that already exists will fail unless --overwrite is set. + If --resource-version is specified and does not match the current resource version on + the server the command will fail.`)) + + annotateExample = templates.Examples(i18n.T(` + # Update pod 'foo' with the annotation 'description' and the value 'my frontend' + # If the same annotation is set multiple times, only the last value will be applied + kubectl annotate pods foo description='my frontend' + + # Update a pod identified by type and name in "pod.json" + kubectl annotate -f pod.json description='my frontend' + + # Update pod 'foo' with the annotation 'description' and the value 'my frontend running nginx', overwriting any existing value + kubectl annotate --overwrite pods foo description='my frontend running nginx' + + # Update all pods in the namespace + kubectl annotate pods --all description='my frontend running nginx' + + # Update pod 'foo' only if the resource is unchanged from version 1 + kubectl annotate pods foo description='my frontend running nginx' --resource-version=1 + + # Update pod 'foo' by removing an annotation named 'description' if it exists + # Does not require the --overwrite flag + kubectl annotate pods foo description-`)) +) + +// NewCmdAnnotate creates the `annotate` command +func NewCmdAnnotate(parent string, f cmdutil.Factory, streams genericiooptions.IOStreams) *cobra.Command { + flags := NewAnnotateFlags(streams) + + cmd := &cobra.Command{ + Use: "annotate [--overwrite] (-f FILENAME | TYPE NAME) KEY_1=VAL_1 ... KEY_N=VAL_N [--resource-version=version]", + DisableFlagsInUseLine: true, + Short: i18n.T("Update the annotations on a resource"), + Long: annotateLong + "\n\n" + cmdutil.SuggestAPIResources(parent), + Example: annotateExample, + ValidArgsFunction: completion.ResourceTypeAndNameCompletionFunc(f), + Run: func(cmd *cobra.Command, args []string) { + o, err := flags.ToOptions(f, cmd, args) + cmdutil.CheckErr(err) + cmdutil.CheckErr(o.RunAnnotate()) + }, + } + + flags.AddFlags(cmd, streams) + + return cmd +} + +// AddFlags registers flags for a cli. +func (flags *AnnotateFlags) AddFlags(cmd *cobra.Command, ioStreams genericiooptions.IOStreams) { + flags.PrintFlags.AddFlags(cmd) + flags.RecordFlags.AddFlags(cmd) + + cmdutil.AddDryRunFlag(cmd) + + usage := "identifying the resource to update the annotation" + cmdutil.AddFilenameOptionFlags(cmd, &flags.FilenameOptions, usage) + cmdutil.AddFieldManagerFlagVar(cmd, &flags.FieldManager, "kubectl-annotate") + cmdutil.AddLabelSelectorFlagVar(cmd, &flags.Selector) + + cmd.Flags().BoolVar(&flags.overwrite, "overwrite", flags.overwrite, "If true, allow annotations to be overwritten, otherwise reject annotation updates that overwrite existing annotations.") + cmd.Flags().BoolVar(&flags.List, "list", flags.List, "If true, display the annotations for a given resource.") + cmd.Flags().BoolVar(&flags.Local, "local", flags.Local, "If true, annotation will NOT contact api-server but run locally.") + cmd.Flags().StringVar(&flags.FieldSelector, "field-selector", flags.FieldSelector, "Selector (field query) to filter on, supports '=', '==', and '!='.(e.g. --field-selector key1=value1,key2=value2). The server only supports a limited number of field queries per type.") + cmd.Flags().BoolVar(&flags.All, "all", flags.All, "Select all resources, in the namespace of the specified resource types.") + cmd.Flags().BoolVarP(&flags.AllNamespaces, "all-namespaces", "A", flags.AllNamespaces, "If true, check the specified action in all namespaces.") + cmd.Flags().StringVar(&flags.resourceVersion, "resource-version", flags.resourceVersion, i18n.T("If non-empty, the annotation update will only succeed if this is the current resource-version for the object. Only valid when specifying a single resource.")) +} + +// ToOptions converts from CLI inputs to runtime inputs. +func (flags *AnnotateFlags) ToOptions(f cmdutil.Factory, cmd *cobra.Command, args []string) (*AnnotateOptions, error) { + options := &AnnotateOptions{ + all: flags.All, + allNamespaces: flags.AllNamespaces, + FilenameOptions: flags.FilenameOptions, + fieldSelector: flags.FieldSelector, + fieldManager: flags.FieldManager, + IOStreams: flags.IOStreams, + local: flags.Local, + list: flags.List, + overwrite: flags.overwrite, + resourceVersion: flags.resourceVersion, + Recorder: genericclioptions.NoopRecorder{}, + selector: flags.Selector, + } + + var err error + + flags.RecordFlags.Complete(cmd) + options.Recorder, err = flags.RecordFlags.ToRecorder() + if err != nil { + return nil, err + } + + options.dryRunStrategy, err = cmdutil.GetDryRunStrategy(cmd) + if err != nil { + return nil, err + } + + cmdutil.PrintFlagsWithDryRunStrategy(flags.PrintFlags, options.dryRunStrategy) + printer, err := flags.PrintFlags.ToPrinter() + if err != nil { + return nil, err + } + options.PrintObj = func(obj runtime.Object, out io.Writer) error { + return printer.PrintObj(obj, out) + } + + options.namespace, options.enforceNamespace, err = f.ToRawKubeConfigLoader().Namespace() + if err != nil && !(options.local && clientcmd.IsEmptyConfig(err)) { + return nil, err + } + options.builder = f.NewBuilder() + options.unstructuredClientForMapping = f.UnstructuredClientForMapping + + // retrieves resource and annotation args from args + // also checks args to verify that all resources are specified before annotations + resources, annotationArgs, err := cmdutil.GetResourcesAndPairs(args, "annotation") + if err != nil { + return nil, err + } + options.resources = resources + options.newAnnotations, options.removeAnnotations, err = parseAnnotations(annotationArgs) + if err != nil { + return nil, err + } + + // Checks the options and flags to see if there is sufficient information run the command. + if flags.List && len(flags.OutputFormat) > 0 { + return nil, fmt.Errorf("--list and --output may not be specified together") + } + if flags.All && len(flags.Selector) > 0 { + return nil, fmt.Errorf("cannot set --all and --selector at the same time") + } + if flags.All && len(flags.FieldSelector) > 0 { + return nil, fmt.Errorf("cannot set --all and --field-selector at the same time") + } + + if !flags.Local { + if len(options.resources) < 1 && cmdutil.IsFilenameSliceEmpty(flags.Filenames, flags.Kustomize) { + return nil, fmt.Errorf("one or more resources must be specified as or /") + } + } else { + if options.dryRunStrategy == cmdutil.DryRunServer { + return nil, fmt.Errorf("cannot specify --local and --dry-run=server - did you mean --dry-run=client?") + } + if len(options.resources) > 0 { + return nil, fmt.Errorf("can only use local files by -f rsrc.yaml or --filename=rsrc.json when --local=true is set") + } + if cmdutil.IsFilenameSliceEmpty(flags.Filenames, flags.Kustomize) { + return nil, fmt.Errorf("one or more files must be specified as -f rsrc.yaml or --filename=rsrc.json") + } + } + if len(options.newAnnotations) < 1 && len(options.removeAnnotations) < 1 && !flags.List { + return nil, fmt.Errorf("at least one annotation update is required") + } + err = validateAnnotations(options.removeAnnotations, options.newAnnotations) + if err != nil { + return nil, err + } + + return options, nil +} + +// RunAnnotate does the work +func (o AnnotateOptions) RunAnnotate() error { + b := o.builder. + Unstructured(). + LocalParam(o.local). + ContinueOnError(). + NamespaceParam(o.namespace).DefaultNamespace(). + FilenameParam(o.enforceNamespace, &o.FilenameOptions). + Flatten() + + if !o.local { + b = b.LabelSelectorParam(o.selector). + FieldSelectorParam(o.fieldSelector). + AllNamespaces(o.allNamespaces). + ResourceTypeOrNameArgs(o.all, o.resources...). + Latest() + } + + r := b.Do() + if err := r.Err(); err != nil { + return err + } + + var singleItemImpliedResource bool + r.IntoSingleItemImplied(&singleItemImpliedResource) + + // only apply resource version locking on a single resource. + // we must perform this check after o.builder.Do() as + // []o.resources can not accurately return the proper number + // of resources when they are not passed in "resource/name" format. + if !singleItemImpliedResource && len(o.resourceVersion) > 0 { + return fmt.Errorf("--resource-version may only be used with a single resource") + } + + return r.Visit(func(info *resource.Info, err error) error { + if err != nil { + return err + } + + var outputObj runtime.Object + obj := info.Object + + if o.dryRunStrategy == cmdutil.DryRunClient || o.local || o.list { + if err := o.updateAnnotations(obj); err != nil { + return err + } + outputObj = obj + } else { + mapping := info.ResourceMapping() + name, namespace := info.Name, info.Namespace + + if len(o.resourceVersion) != 0 { + // ensure resourceVersion is always sent in the patch by clearing it from the starting JSON + accessor, err := meta.Accessor(obj) + if err != nil { + return err + } + accessor.SetResourceVersion("") + } + + oldData, err := json.Marshal(obj) + if err != nil { + return err + } + if err := o.Recorder.Record(info.Object); err != nil { + klog.V(4).Infof("error recording current command: %v", err) + } + if err := o.updateAnnotations(obj); err != nil { + return err + } + newData, err := json.Marshal(obj) + if err != nil { + return err + } + patchBytes, err := jsonpatch.CreateMergePatch(oldData, newData) + createdPatch := err == nil + if err != nil { + klog.V(2).Infof("couldn't compute patch: %v", err) + } + + client, err := o.unstructuredClientForMapping(mapping) + if err != nil { + return err + } + helper := resource. + NewHelper(client, mapping). + DryRun(o.dryRunStrategy == cmdutil.DryRunServer). + WithFieldManager(o.fieldManager) + + if createdPatch { + outputObj, err = helper.Patch(namespace, name, types.MergePatchType, patchBytes, nil) + } else { + outputObj, err = helper.Replace(namespace, name, false, obj) + } + if err != nil { + return err + } + } + + if o.list { + accessor, err := meta.Accessor(outputObj) + if err != nil { + return err + } + + indent := "" + if !singleItemImpliedResource { + indent = " " + gvks, _, err := unstructuredscheme.NewUnstructuredObjectTyper().ObjectKinds(info.Object) + if err != nil { + return err + } + fmt.Fprintf(o.Out, "Listing annotations for %s.%s/%s:\n", gvks[0].Kind, gvks[0].Group, info.Name) + } + for k, v := range accessor.GetAnnotations() { + fmt.Fprintf(o.Out, "%s%s=%s\n", indent, k, v) + } + + return nil + } + + return o.PrintObj(outputObj, o.Out) + }) +} + +// parseAnnotations retrieves new and remove annotations from annotation args +func parseAnnotations(annotationArgs []string) (map[string]string, []string, error) { + return cmdutil.ParsePairs(annotationArgs, "annotation", true) +} + +// validateAnnotations checks the format of annotation args and checks removed annotations aren't in the new annotations map +func validateAnnotations(removeAnnotations []string, newAnnotations map[string]string) error { + var modifyRemoveBuf bytes.Buffer + for _, removeAnnotation := range removeAnnotations { + if _, found := newAnnotations[removeAnnotation]; found { + if modifyRemoveBuf.Len() > 0 { + modifyRemoveBuf.WriteString(", ") + } + modifyRemoveBuf.WriteString(fmt.Sprint(removeAnnotation)) + } + } + if modifyRemoveBuf.Len() > 0 { + return fmt.Errorf("can not both modify and remove the following annotation(s) in the same command: %s", modifyRemoveBuf.String()) + } + + return nil +} + +// validateNoAnnotationOverwrites validates that when overwrite is false, to-be-updated annotations don't exist in the object annotation map (yet) +func validateNoAnnotationOverwrites(accessor metav1.Object, annotations map[string]string) error { + var buf bytes.Buffer + for key, value := range annotations { + // change-cause annotation can always be overwritten + if key == polymorphichelpers.ChangeCauseAnnotation { + continue + } + if currValue, found := accessor.GetAnnotations()[key]; found && currValue != value { + if buf.Len() > 0 { + buf.WriteString("; ") + } + buf.WriteString(fmt.Sprintf("'%s' already has a value (%s)", key, currValue)) + } + } + if buf.Len() > 0 { + return fmt.Errorf("--overwrite is false but found the following declared annotation(s): %s", buf.String()) + } + return nil +} + +// updateAnnotations updates annotations of obj +func (o AnnotateOptions) updateAnnotations(obj runtime.Object) error { + accessor, err := meta.Accessor(obj) + if err != nil { + return err + } + if !o.overwrite { + if err := validateNoAnnotationOverwrites(accessor, o.newAnnotations); err != nil { + return err + } + } + + annotations := accessor.GetAnnotations() + if annotations == nil { + annotations = make(map[string]string) + } + + for key, value := range o.newAnnotations { + annotations[key] = value + } + for _, annotation := range o.removeAnnotations { + delete(annotations, annotation) + } + accessor.SetAnnotations(annotations) + + if len(o.resourceVersion) != 0 { + accessor.SetResourceVersion(o.resourceVersion) + } + return nil +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/attach/attach.go b/vendor/k8s.io/kubectl/pkg/cmd/attach/attach.go new file mode 100644 index 000000000000..c1e8d83dc545 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/attach/attach.go @@ -0,0 +1,356 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package attach + +import ( + "context" + "fmt" + "io" + "net/url" + "strings" + "time" + + "github.com/spf13/cobra" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/httpstream" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/cli-runtime/pkg/genericiooptions" + "k8s.io/cli-runtime/pkg/resource" + restclient "k8s.io/client-go/rest" + "k8s.io/client-go/tools/remotecommand" + "k8s.io/kubectl/pkg/cmd/exec" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + "k8s.io/kubectl/pkg/cmd/util/podcmd" + "k8s.io/kubectl/pkg/polymorphichelpers" + "k8s.io/kubectl/pkg/scheme" + "k8s.io/kubectl/pkg/util/completion" + "k8s.io/kubectl/pkg/util/i18n" + "k8s.io/kubectl/pkg/util/templates" +) + +var ( + attachExample = templates.Examples(i18n.T(` + # Get output from running pod mypod; use the 'kubectl.kubernetes.io/default-container' annotation + # for selecting the container to be attached or the first container in the pod will be chosen + kubectl attach mypod + + # Get output from ruby-container from pod mypod + kubectl attach mypod -c ruby-container + + # Switch to raw terminal mode; sends stdin to 'bash' in ruby-container from pod mypod + # and sends stdout/stderr from 'bash' back to the client + kubectl attach mypod -c ruby-container -i -t + + # Get output from the first pod of a replica set named nginx + kubectl attach rs/nginx + `)) +) + +const ( + defaultPodAttachTimeout = 60 * time.Second + defaultPodLogsTimeout = 20 * time.Second +) + +// AttachOptions declare the arguments accepted by the Attach command +type AttachOptions struct { + exec.StreamOptions + + // whether to disable use of standard error when streaming output from tty + DisableStderr bool + + CommandName string + + Pod *corev1.Pod + + AttachFunc func(*AttachOptions, *corev1.Container, bool, remotecommand.TerminalSizeQueue) func() error + Resources []string + Builder func() *resource.Builder + AttachablePodFn polymorphichelpers.AttachablePodForObjectFunc + restClientGetter genericclioptions.RESTClientGetter + + Attach RemoteAttach + GetPodTimeout time.Duration + Config *restclient.Config +} + +// NewAttachOptions creates the options for attach +func NewAttachOptions(streams genericiooptions.IOStreams) *AttachOptions { + return &AttachOptions{ + StreamOptions: exec.StreamOptions{ + IOStreams: streams, + }, + Attach: &DefaultRemoteAttach{}, + AttachFunc: DefaultAttachFunc, + } +} + +// NewCmdAttach returns the attach Cobra command +func NewCmdAttach(f cmdutil.Factory, streams genericiooptions.IOStreams) *cobra.Command { + o := NewAttachOptions(streams) + cmd := &cobra.Command{ + Use: "attach (POD | TYPE/NAME) -c CONTAINER", + DisableFlagsInUseLine: true, + Short: i18n.T("Attach to a running container"), + Long: i18n.T("Attach to a process that is already running inside an existing container."), + Example: attachExample, + ValidArgsFunction: completion.PodResourceNameCompletionFunc(f), + Run: func(cmd *cobra.Command, args []string) { + cmdutil.CheckErr(o.Complete(f, cmd, args)) + cmdutil.CheckErr(o.Validate()) + cmdutil.CheckErr(o.Run()) + }, + } + cmdutil.AddPodRunningTimeoutFlag(cmd, defaultPodAttachTimeout) + cmdutil.AddContainerVarFlags(cmd, &o.ContainerName, o.ContainerName) + cmd.Flags().BoolVarP(&o.Stdin, "stdin", "i", o.Stdin, "Pass stdin to the container") + cmd.Flags().BoolVarP(&o.TTY, "tty", "t", o.TTY, "Stdin is a TTY") + cmd.Flags().BoolVarP(&o.Quiet, "quiet", "q", o.Quiet, "Only print output from the remote session") + return cmd +} + +// RemoteAttach defines the interface accepted by the Attach command - provided for test stubbing +type RemoteAttach interface { + Attach(url *url.URL, config *restclient.Config, stdin io.Reader, stdout, stderr io.Writer, tty bool, terminalSizeQueue remotecommand.TerminalSizeQueue) error +} + +// DefaultAttachFunc is the default AttachFunc used +func DefaultAttachFunc(o *AttachOptions, containerToAttach *corev1.Container, raw bool, sizeQueue remotecommand.TerminalSizeQueue) func() error { + return func() error { + restClient, err := restclient.RESTClientFor(o.Config) + if err != nil { + return err + } + req := restClient.Post(). + Resource("pods"). + Name(o.Pod.Name). + Namespace(o.Pod.Namespace). + SubResource("attach") + req.VersionedParams(&corev1.PodAttachOptions{ + Container: containerToAttach.Name, + Stdin: o.Stdin, + Stdout: o.Out != nil, + Stderr: !o.DisableStderr, + TTY: raw, + }, scheme.ParameterCodec) + + return o.Attach.Attach(req.URL(), o.Config, o.In, o.Out, o.ErrOut, raw, sizeQueue) + } +} + +// DefaultRemoteAttach is the standard implementation of attaching +type DefaultRemoteAttach struct{} + +// Attach executes attach to a running container +func (*DefaultRemoteAttach) Attach(url *url.URL, config *restclient.Config, stdin io.Reader, stdout, stderr io.Writer, tty bool, terminalSizeQueue remotecommand.TerminalSizeQueue) error { + exec, err := createExecutor(url, config) + if err != nil { + return err + } + return exec.StreamWithContext(context.Background(), remotecommand.StreamOptions{ + Stdin: stdin, + Stdout: stdout, + Stderr: stderr, + Tty: tty, + TerminalSizeQueue: terminalSizeQueue, + }) +} + +// createExecutor returns the Executor or an error if one occurred. +func createExecutor(url *url.URL, config *restclient.Config) (remotecommand.Executor, error) { + exec, err := remotecommand.NewSPDYExecutor(config, "POST", url) + if err != nil { + return nil, err + } + // Fallback executor is default, unless feature flag is explicitly disabled. + if !cmdutil.RemoteCommandWebsockets.IsDisabled() { + // WebSocketExecutor must be "GET" method as described in RFC 6455 Sec. 4.1 (page 17). + websocketExec, err := remotecommand.NewWebSocketExecutor(config, "GET", url.String()) + if err != nil { + return nil, err + } + exec, err = remotecommand.NewFallbackExecutor(websocketExec, exec, httpstream.IsUpgradeFailure) + if err != nil { + return nil, err + } + } + return exec, nil +} + +// Complete verifies command line arguments and loads data from the command environment +func (o *AttachOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { + var err error + o.Namespace, _, err = f.ToRawKubeConfigLoader().Namespace() + if err != nil { + return err + } + + o.AttachablePodFn = polymorphichelpers.AttachablePodForObjectFn + + o.GetPodTimeout, err = cmdutil.GetPodRunningTimeoutFlag(cmd) + if err != nil { + return cmdutil.UsageErrorf(cmd, err.Error()) + } + + o.Builder = f.NewBuilder + o.Resources = args + o.restClientGetter = f + + config, err := f.ToRESTConfig() + if err != nil { + return err + } + o.Config = config + + if o.CommandName == "" { + o.CommandName = cmd.CommandPath() + } + + return nil +} + +// Validate checks that the provided attach options are specified. +func (o *AttachOptions) Validate() error { + if len(o.Resources) == 0 { + return fmt.Errorf("at least 1 argument is required for attach") + } + if len(o.Resources) > 2 { + return fmt.Errorf("expected POD, TYPE/NAME, or TYPE NAME, (at most 2 arguments) saw %d: %v", len(o.Resources), o.Resources) + } + if o.GetPodTimeout <= 0 { + return fmt.Errorf("--pod-running-timeout must be higher than zero") + } + + return nil +} + +// Run executes a validated remote execution against a pod. +func (o *AttachOptions) Run() error { + if o.Pod == nil { + b := o.Builder(). + WithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...). + NamespaceParam(o.Namespace).DefaultNamespace() + + switch len(o.Resources) { + case 1: + b.ResourceNames("pods", o.Resources[0]) + case 2: + b.ResourceNames(o.Resources[0], o.Resources[1]) + } + + obj, err := b.Do().Object() + if err != nil { + return err + } + + o.Pod, err = o.findAttachablePod(obj) + if err != nil { + return err + } + + if o.Pod.Status.Phase == corev1.PodSucceeded || o.Pod.Status.Phase == corev1.PodFailed { + return fmt.Errorf("cannot attach a container in a completed pod; current phase is %s", o.Pod.Status.Phase) + } + // TODO: convert this to a clean "wait" behavior + } + + // check for TTY + containerToAttach, err := o.containerToAttachTo(o.Pod) + if err != nil { + return fmt.Errorf("cannot attach to the container: %v", err) + } + if o.TTY && !containerToAttach.TTY { + o.TTY = false + if !o.Quiet && o.ErrOut != nil { + fmt.Fprintf(o.ErrOut, "error: Unable to use a TTY - container %s did not allocate one\n", containerToAttach.Name) + } + } else if !o.TTY && containerToAttach.TTY { + // the container was launched with a TTY, so we have to force a TTY here, otherwise you'll get + // an error "Unrecognized input header" + o.TTY = true + } + + // ensure we can recover the terminal while attached + t := o.SetupTTY() + + var sizeQueue remotecommand.TerminalSizeQueue + if t.Raw { + if size := t.GetSize(); size != nil { + // fake resizing +1 and then back to normal so that attach-detach-reattach will result in the + // screen being redrawn + sizePlusOne := *size + sizePlusOne.Width++ + sizePlusOne.Height++ + + // this call spawns a goroutine to monitor/update the terminal size + sizeQueue = t.MonitorSize(&sizePlusOne, size) + } + + o.DisableStderr = true + } + + if !o.Quiet { + fmt.Fprintln(o.ErrOut, "If you don't see a command prompt, try pressing enter.") + } + if err := t.Safe(o.AttachFunc(o, containerToAttach, t.Raw, sizeQueue)); err != nil { + return err + } + + if msg := o.reattachMessage(containerToAttach.Name, t.Raw); msg != "" { + fmt.Fprintln(o.Out, msg) + } + return nil +} + +func (o *AttachOptions) findAttachablePod(obj runtime.Object) (*corev1.Pod, error) { + attachablePod, err := o.AttachablePodFn(o.restClientGetter, obj, o.GetPodTimeout) + if err != nil { + return nil, err + } + + o.StreamOptions.PodName = attachablePod.Name + return attachablePod, nil +} + +// containerToAttach returns a reference to the container to attach to, given by name. +// use the kubectl.kubernetes.io/default-container annotation for selecting the container to be attached +// or the first container in the pod will be chosen If name is empty. +func (o *AttachOptions) containerToAttachTo(pod *corev1.Pod) (*corev1.Container, error) { + return podcmd.FindOrDefaultContainerByName(pod, o.ContainerName, o.Quiet, o.ErrOut) +} + +// GetContainerName returns the name of the container to attach to, with a fallback. +func (o *AttachOptions) GetContainerName(pod *corev1.Pod) (string, error) { + c, err := o.containerToAttachTo(pod) + if err != nil { + return "", err + } + return c.Name, nil +} + +// reattachMessage returns a message to print after attach has completed, or +// the empty string if no message should be printed. +func (o *AttachOptions) reattachMessage(containerName string, rawTTY bool) string { + if o.Quiet || !o.Stdin || !rawTTY || o.Pod.Spec.RestartPolicy != corev1.RestartPolicyAlways { + return "" + } + if _, path := podcmd.FindContainerByName(o.Pod, containerName); strings.HasPrefix(path, "spec.ephemeralContainers") { + return fmt.Sprintf("Session ended, the ephemeral container will not be restarted but may be reattached using '%s %s -c %s -i -t' if it is still running", o.CommandName, o.Pod.Name, containerName) + } + return fmt.Sprintf("Session ended, resume using '%s %s -c %s -i -t' command when the pod is running", o.CommandName, o.Pod.Name, containerName) +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/create/create.go b/vendor/k8s.io/kubectl/pkg/cmd/create/create.go new file mode 100644 index 000000000000..17b21c4e08d3 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/create/create.go @@ -0,0 +1,472 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package create + +import ( + "context" + "fmt" + "io" + "net/url" + "runtime" + "strings" + + "github.com/spf13/cobra" + "k8s.io/klog/v2" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + kruntime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/cli-runtime/pkg/genericiooptions" + "k8s.io/cli-runtime/pkg/printers" + "k8s.io/cli-runtime/pkg/resource" + "k8s.io/client-go/dynamic" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + "k8s.io/kubectl/pkg/cmd/util/editor" + "k8s.io/kubectl/pkg/generate" + "k8s.io/kubectl/pkg/rawhttp" + "k8s.io/kubectl/pkg/scheme" + "k8s.io/kubectl/pkg/util" + "k8s.io/kubectl/pkg/util/i18n" + "k8s.io/kubectl/pkg/util/templates" +) + +// CreateOptions is the commandline options for 'create' sub command +type CreateOptions struct { + PrintFlags *genericclioptions.PrintFlags + RecordFlags *genericclioptions.RecordFlags + + DryRunStrategy cmdutil.DryRunStrategy + + ValidationDirective string + + fieldManager string + + FilenameOptions resource.FilenameOptions + Selector string + EditBeforeCreate bool + Raw string + + Recorder genericclioptions.Recorder + PrintObj func(obj kruntime.Object) error + + genericiooptions.IOStreams +} + +var ( + createLong = templates.LongDesc(i18n.T(` + Create a resource from a file or from stdin. + + JSON and YAML formats are accepted.`)) + + createExample = templates.Examples(i18n.T(` + # Create a pod using the data in pod.json + kubectl create -f ./pod.json + + # Create a pod based on the JSON passed into stdin + cat pod.json | kubectl create -f - + + # Edit the data in registry.yaml in JSON then create the resource using the edited data + kubectl create -f registry.yaml --edit -o json`)) +) + +// NewCreateOptions returns an initialized CreateOptions instance +func NewCreateOptions(ioStreams genericiooptions.IOStreams) *CreateOptions { + return &CreateOptions{ + PrintFlags: genericclioptions.NewPrintFlags("created").WithTypeSetter(scheme.Scheme), + RecordFlags: genericclioptions.NewRecordFlags(), + + Recorder: genericclioptions.NoopRecorder{}, + + IOStreams: ioStreams, + } +} + +// NewCmdCreate returns new initialized instance of create sub command +func NewCmdCreate(f cmdutil.Factory, ioStreams genericiooptions.IOStreams) *cobra.Command { + o := NewCreateOptions(ioStreams) + + cmd := &cobra.Command{ + Use: "create -f FILENAME", + DisableFlagsInUseLine: true, + Short: i18n.T("Create a resource from a file or from stdin"), + Long: createLong, + Example: createExample, + Run: func(cmd *cobra.Command, args []string) { + if cmdutil.IsFilenameSliceEmpty(o.FilenameOptions.Filenames, o.FilenameOptions.Kustomize) { + ioStreams.ErrOut.Write([]byte("Error: must specify one of -f and -k\n\n")) + defaultRunFunc := cmdutil.DefaultSubCommandRun(ioStreams.ErrOut) + defaultRunFunc(cmd, args) + return + } + cmdutil.CheckErr(o.Complete(f, cmd, args)) + cmdutil.CheckErr(o.Validate()) + cmdutil.CheckErr(o.RunCreate(f, cmd)) + }, + } + + // bind flag structs + o.RecordFlags.AddFlags(cmd) + + usage := "to use to create the resource" + cmdutil.AddFilenameOptionFlags(cmd, &o.FilenameOptions, usage) + cmdutil.AddValidateFlags(cmd) + cmd.Flags().BoolVar(&o.EditBeforeCreate, "edit", o.EditBeforeCreate, "Edit the API resource before creating") + cmd.Flags().Bool("windows-line-endings", runtime.GOOS == "windows", + "Only relevant if --edit=true. Defaults to the line ending native to your platform.") + cmdutil.AddApplyAnnotationFlags(cmd) + cmdutil.AddDryRunFlag(cmd) + cmdutil.AddLabelSelectorFlagVar(cmd, &o.Selector) + cmd.Flags().StringVar(&o.Raw, "raw", o.Raw, "Raw URI to POST to the server. Uses the transport specified by the kubeconfig file.") + cmdutil.AddFieldManagerFlagVar(cmd, &o.fieldManager, "kubectl-create") + + o.PrintFlags.AddFlags(cmd) + + // create subcommands + cmd.AddCommand(NewCmdCreateNamespace(f, ioStreams)) + cmd.AddCommand(NewCmdCreateQuota(f, ioStreams)) + cmd.AddCommand(NewCmdCreateSecret(f, ioStreams)) + cmd.AddCommand(NewCmdCreateConfigMap(f, ioStreams)) + cmd.AddCommand(NewCmdCreateServiceAccount(f, ioStreams)) + cmd.AddCommand(NewCmdCreateService(f, ioStreams)) + cmd.AddCommand(NewCmdCreateDeployment(f, ioStreams)) + cmd.AddCommand(NewCmdCreateClusterRole(f, ioStreams)) + cmd.AddCommand(NewCmdCreateClusterRoleBinding(f, ioStreams)) + cmd.AddCommand(NewCmdCreateRole(f, ioStreams)) + cmd.AddCommand(NewCmdCreateRoleBinding(f, ioStreams)) + cmd.AddCommand(NewCmdCreatePodDisruptionBudget(f, ioStreams)) + cmd.AddCommand(NewCmdCreatePriorityClass(f, ioStreams)) + cmd.AddCommand(NewCmdCreateJob(f, ioStreams)) + cmd.AddCommand(NewCmdCreateCronJob(f, ioStreams)) + cmd.AddCommand(NewCmdCreateIngress(f, ioStreams)) + cmd.AddCommand(NewCmdCreateToken(f, ioStreams)) + return cmd +} + +// Validate makes sure there is no discrepency in command options +func (o *CreateOptions) Validate() error { + if len(o.Raw) > 0 { + if o.EditBeforeCreate { + return fmt.Errorf("--raw and --edit are mutually exclusive") + } + if len(o.FilenameOptions.Filenames) != 1 { + return fmt.Errorf("--raw can only use a single local file or stdin") + } + if strings.Index(o.FilenameOptions.Filenames[0], "http://") == 0 || strings.Index(o.FilenameOptions.Filenames[0], "https://") == 0 { + return fmt.Errorf("--raw cannot read from a url") + } + if o.FilenameOptions.Recursive { + return fmt.Errorf("--raw and --recursive are mutually exclusive") + } + if len(o.Selector) > 0 { + return fmt.Errorf("--raw and --selector (-l) are mutually exclusive") + } + if o.PrintFlags.OutputFormat != nil && len(*o.PrintFlags.OutputFormat) > 0 { + return fmt.Errorf("--raw and --output are mutually exclusive") + } + if _, err := url.ParseRequestURI(o.Raw); err != nil { + return fmt.Errorf("--raw must be a valid URL path: %v", err) + } + } + + return nil +} + +// Complete completes all the required options +func (o *CreateOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { + if len(args) != 0 { + return cmdutil.UsageErrorf(cmd, "Unexpected args: %v", args) + } + var err error + o.RecordFlags.Complete(cmd) + o.Recorder, err = o.RecordFlags.ToRecorder() + if err != nil { + return err + } + + o.DryRunStrategy, err = cmdutil.GetDryRunStrategy(cmd) + if err != nil { + return err + } + cmdutil.PrintFlagsWithDryRunStrategy(o.PrintFlags, o.DryRunStrategy) + + o.ValidationDirective, err = cmdutil.GetValidationDirective(cmd) + if err != nil { + return err + } + + printer, err := o.PrintFlags.ToPrinter() + if err != nil { + return err + } + + o.PrintObj = func(obj kruntime.Object) error { + return printer.PrintObj(obj, o.Out) + } + + return nil +} + +// RunCreate performs the creation +func (o *CreateOptions) RunCreate(f cmdutil.Factory, cmd *cobra.Command) error { + // raw only makes sense for a single file resource multiple objects aren't likely to do what you want. + // the validator enforces this, so + if len(o.Raw) > 0 { + restClient, err := f.RESTClient() + if err != nil { + return err + } + return rawhttp.RawPost(restClient, o.IOStreams, o.Raw, o.FilenameOptions.Filenames[0]) + } + + if o.EditBeforeCreate { + return RunEditOnCreate(f, o.PrintFlags, o.RecordFlags, o.IOStreams, cmd, &o.FilenameOptions, o.fieldManager) + } + + schema, err := f.Validator(o.ValidationDirective) + if err != nil { + return err + } + + cmdNamespace, enforceNamespace, err := f.ToRawKubeConfigLoader().Namespace() + if err != nil { + return err + } + + r := f.NewBuilder(). + Unstructured(). + Schema(schema). + ContinueOnError(). + NamespaceParam(cmdNamespace).DefaultNamespace(). + FilenameParam(enforceNamespace, &o.FilenameOptions). + LabelSelectorParam(o.Selector). + Flatten(). + Do() + err = r.Err() + if err != nil { + return err + } + + count := 0 + err = r.Visit(func(info *resource.Info, err error) error { + if err != nil { + return err + } + if err := util.CreateOrUpdateAnnotation(cmdutil.GetFlagBool(cmd, cmdutil.ApplyAnnotationsFlag), info.Object, scheme.DefaultJSONEncoder()); err != nil { + return cmdutil.AddSourceToErr("creating", info.Source, err) + } + + if err := o.Recorder.Record(info.Object); err != nil { + klog.V(4).Infof("error recording current command: %v", err) + } + + if o.DryRunStrategy != cmdutil.DryRunClient { + obj, err := resource. + NewHelper(info.Client, info.Mapping). + DryRun(o.DryRunStrategy == cmdutil.DryRunServer). + WithFieldManager(o.fieldManager). + WithFieldValidation(o.ValidationDirective). + Create(info.Namespace, true, info.Object) + if err != nil { + return cmdutil.AddSourceToErr("creating", info.Source, err) + } + info.Refresh(obj, true) + } + + count++ + + return o.PrintObj(info.Object) + }) + if err != nil { + return err + } + if count == 0 { + return fmt.Errorf("no objects passed to create") + } + return nil +} + +// RunEditOnCreate performs edit on creation +func RunEditOnCreate(f cmdutil.Factory, printFlags *genericclioptions.PrintFlags, recordFlags *genericclioptions.RecordFlags, ioStreams genericiooptions.IOStreams, cmd *cobra.Command, options *resource.FilenameOptions, fieldManager string) error { + editOptions := editor.NewEditOptions(editor.EditBeforeCreateMode, ioStreams) + editOptions.FilenameOptions = *options + validationDirective, err := cmdutil.GetValidationDirective(cmd) + if err != nil { + return err + } + editOptions.ValidateOptions = cmdutil.ValidateOptions{ + ValidationDirective: string(validationDirective), + } + editOptions.PrintFlags = printFlags + editOptions.ApplyAnnotation = cmdutil.GetFlagBool(cmd, cmdutil.ApplyAnnotationsFlag) + editOptions.RecordFlags = recordFlags + editOptions.FieldManager = "kubectl-create" + + err = editOptions.Complete(f, []string{}, cmd) + if err != nil { + return err + } + return editOptions.Run() +} + +// NameFromCommandArgs is a utility function for commands that assume the first argument is a resource name +func NameFromCommandArgs(cmd *cobra.Command, args []string) (string, error) { + argsLen := cmd.ArgsLenAtDash() + // ArgsLenAtDash returns -1 when -- was not specified + if argsLen == -1 { + argsLen = len(args) + } + if argsLen != 1 { + return "", cmdutil.UsageErrorf(cmd, "exactly one NAME is required, got %d", argsLen) + } + return args[0], nil +} + +// CreateSubcommandOptions is an options struct to support create subcommands +type CreateSubcommandOptions struct { + // PrintFlags holds options necessary for obtaining a printer + PrintFlags *genericclioptions.PrintFlags + // Name of resource being created + Name string + // StructuredGenerator is the resource generator for the object being created + StructuredGenerator generate.StructuredGenerator + DryRunStrategy cmdutil.DryRunStrategy + CreateAnnotation bool + FieldManager string + ValidationDirective string + + Namespace string + EnforceNamespace bool + + Mapper meta.RESTMapper + DynamicClient dynamic.Interface + + PrintObj printers.ResourcePrinterFunc + + genericiooptions.IOStreams +} + +// NewCreateSubcommandOptions returns initialized CreateSubcommandOptions +func NewCreateSubcommandOptions(ioStreams genericiooptions.IOStreams) *CreateSubcommandOptions { + return &CreateSubcommandOptions{ + PrintFlags: genericclioptions.NewPrintFlags("created").WithTypeSetter(scheme.Scheme), + IOStreams: ioStreams, + } +} + +// Complete completes all the required options +func (o *CreateSubcommandOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string, generator generate.StructuredGenerator) error { + name, err := NameFromCommandArgs(cmd, args) + if err != nil { + return err + } + + o.Name = name + o.StructuredGenerator = generator + o.DryRunStrategy, err = cmdutil.GetDryRunStrategy(cmd) + if err != nil { + return err + } + o.CreateAnnotation = cmdutil.GetFlagBool(cmd, cmdutil.ApplyAnnotationsFlag) + + cmdutil.PrintFlagsWithDryRunStrategy(o.PrintFlags, o.DryRunStrategy) + printer, err := o.PrintFlags.ToPrinter() + if err != nil { + return err + } + + o.ValidationDirective, err = cmdutil.GetValidationDirective(cmd) + if err != nil { + return err + } + + o.PrintObj = func(obj kruntime.Object, out io.Writer) error { + return printer.PrintObj(obj, out) + } + + o.Namespace, o.EnforceNamespace, err = f.ToRawKubeConfigLoader().Namespace() + if err != nil { + return err + } + + o.DynamicClient, err = f.DynamicClient() + if err != nil { + return err + } + + o.Mapper, err = f.ToRESTMapper() + if err != nil { + return err + } + + return nil +} + +// Run executes a create subcommand using the specified options +func (o *CreateSubcommandOptions) Run() error { + obj, err := o.StructuredGenerator.StructuredGenerate() + if err != nil { + return err + } + if err := util.CreateOrUpdateAnnotation(o.CreateAnnotation, obj, scheme.DefaultJSONEncoder()); err != nil { + return err + } + if o.DryRunStrategy != cmdutil.DryRunClient { + // create subcommands have compiled knowledge of things they create, so type them directly + gvks, _, err := scheme.Scheme.ObjectKinds(obj) + if err != nil { + return err + } + gvk := gvks[0] + mapping, err := o.Mapper.RESTMapping(schema.GroupKind{Group: gvk.Group, Kind: gvk.Kind}, gvk.Version) + if err != nil { + return err + } + + asUnstructured := &unstructured.Unstructured{} + if err := scheme.Scheme.Convert(obj, asUnstructured, nil); err != nil { + return err + } + if mapping.Scope.Name() == meta.RESTScopeNameRoot { + o.Namespace = "" + } + createOptions := metav1.CreateOptions{} + if o.FieldManager != "" { + createOptions.FieldManager = o.FieldManager + } + createOptions.FieldValidation = o.ValidationDirective + + if o.DryRunStrategy == cmdutil.DryRunServer { + createOptions.DryRun = []string{metav1.DryRunAll} + } + actualObject, err := o.DynamicClient.Resource(mapping.Resource).Namespace(o.Namespace).Create(context.TODO(), asUnstructured, createOptions) + if err != nil { + return err + } + + // ensure we pass a versioned object to the printer + obj = actualObject + } else { + if meta, err := meta.Accessor(obj); err == nil && o.EnforceNamespace { + meta.SetNamespace(o.Namespace) + } + } + + return o.PrintObj(obj, o.Out) +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/create/create_clusterrole.go b/vendor/k8s.io/kubectl/pkg/cmd/create/create_clusterrole.go new file mode 100644 index 000000000000..6b55eab3ecd3 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/create/create_clusterrole.go @@ -0,0 +1,227 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package create + +import ( + "context" + "fmt" + "strings" + + "github.com/spf13/cobra" + + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/cli-runtime/pkg/genericiooptions" + cliflag "k8s.io/component-base/cli/flag" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + "k8s.io/kubectl/pkg/scheme" + "k8s.io/kubectl/pkg/util" + "k8s.io/kubectl/pkg/util/i18n" + "k8s.io/kubectl/pkg/util/templates" +) + +var ( + clusterRoleLong = templates.LongDesc(i18n.T(` + Create a cluster role.`)) + + clusterRoleExample = templates.Examples(i18n.T(` + # Create a cluster role named "pod-reader" that allows user to perform "get", "watch" and "list" on pods + kubectl create clusterrole pod-reader --verb=get,list,watch --resource=pods + + # Create a cluster role named "pod-reader" with ResourceName specified + kubectl create clusterrole pod-reader --verb=get --resource=pods --resource-name=readablepod --resource-name=anotherpod + + # Create a cluster role named "foo" with API Group specified + kubectl create clusterrole foo --verb=get,list,watch --resource=rs.apps + + # Create a cluster role named "foo" with SubResource specified + kubectl create clusterrole foo --verb=get,list,watch --resource=pods,pods/status + + # Create a cluster role name "foo" with NonResourceURL specified + kubectl create clusterrole "foo" --verb=get --non-resource-url=/logs/* + + # Create a cluster role name "monitoring" with AggregationRule specified + kubectl create clusterrole monitoring --aggregation-rule="rbac.example.com/aggregate-to-monitoring=true"`)) + + // Valid nonResource verb list for validation. + validNonResourceVerbs = []string{"*", "get", "post", "put", "delete", "patch", "head", "options"} +) + +// CreateClusterRoleOptions is returned by NewCmdCreateClusterRole +type CreateClusterRoleOptions struct { + *CreateRoleOptions + NonResourceURLs []string + AggregationRule map[string]string + FieldManager string +} + +// NewCmdCreateClusterRole initializes and returns new ClusterRoles command +func NewCmdCreateClusterRole(f cmdutil.Factory, ioStreams genericiooptions.IOStreams) *cobra.Command { + c := &CreateClusterRoleOptions{ + CreateRoleOptions: NewCreateRoleOptions(ioStreams), + AggregationRule: map[string]string{}, + } + cmd := &cobra.Command{ + Use: "clusterrole NAME --verb=verb --resource=resource.group [--resource-name=resourcename] [--dry-run=server|client|none]", + DisableFlagsInUseLine: true, + Short: i18n.T("Create a cluster role"), + Long: clusterRoleLong, + Example: clusterRoleExample, + Run: func(cmd *cobra.Command, args []string) { + cmdutil.CheckErr(c.Complete(f, cmd, args)) + cmdutil.CheckErr(c.Validate()) + cmdutil.CheckErr(c.RunCreateRole()) + }, + } + + c.PrintFlags.AddFlags(cmd) + + cmdutil.AddApplyAnnotationFlags(cmd) + cmdutil.AddValidateFlags(cmd) + cmdutil.AddDryRunFlag(cmd) + cmd.Flags().StringSliceVar(&c.Verbs, "verb", c.Verbs, "Verb that applies to the resources contained in the rule") + cmd.Flags().StringSliceVar(&c.NonResourceURLs, "non-resource-url", c.NonResourceURLs, "A partial url that user should have access to.") + cmd.Flags().StringSlice("resource", []string{}, "Resource that the rule applies to") + cmd.Flags().StringArrayVar(&c.ResourceNames, "resource-name", c.ResourceNames, "Resource in the white list that the rule applies to, repeat this flag for multiple items") + cmd.Flags().Var(cliflag.NewMapStringString(&c.AggregationRule), "aggregation-rule", "An aggregation label selector for combining ClusterRoles.") + cmdutil.AddFieldManagerFlagVar(cmd, &c.FieldManager, "kubectl-create") + + return cmd +} + +// Complete completes all the required options +func (c *CreateClusterRoleOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { + // Remove duplicate nonResourceURLs + nonResourceURLs := []string{} + for _, n := range c.NonResourceURLs { + if !arrayContains(nonResourceURLs, n) { + nonResourceURLs = append(nonResourceURLs, n) + } + } + c.NonResourceURLs = nonResourceURLs + + return c.CreateRoleOptions.Complete(f, cmd, args) +} + +// Validate makes sure there is no discrepency in CreateClusterRoleOptions +func (c *CreateClusterRoleOptions) Validate() error { + if c.Name == "" { + return fmt.Errorf("name must be specified") + } + + if len(c.AggregationRule) > 0 { + if len(c.NonResourceURLs) > 0 || len(c.Verbs) > 0 || len(c.Resources) > 0 || len(c.ResourceNames) > 0 { + return fmt.Errorf("aggregation rule must be specified without nonResourceURLs, verbs, resources or resourceNames") + } + return nil + } + + // validate verbs. + if len(c.Verbs) == 0 { + return fmt.Errorf("at least one verb must be specified") + } + + if len(c.Resources) == 0 && len(c.NonResourceURLs) == 0 { + return fmt.Errorf("one of resource or nonResourceURL must be specified") + } + + // validate resources + if len(c.Resources) > 0 { + for _, v := range c.Verbs { + if !arrayContains(validResourceVerbs, v) { + fmt.Fprintf(c.ErrOut, "Warning: '%s' is not a standard resource verb\n", v) + } + } + if err := c.validateResource(); err != nil { + return err + } + } + + //validate non-resource-url + if len(c.NonResourceURLs) > 0 { + for _, v := range c.Verbs { + if !arrayContains(validNonResourceVerbs, v) { + return fmt.Errorf("invalid verb: '%s' for nonResourceURL", v) + } + } + + for _, nonResourceURL := range c.NonResourceURLs { + if nonResourceURL == "*" { + continue + } + + if nonResourceURL == "" || !strings.HasPrefix(nonResourceURL, "/") { + return fmt.Errorf("nonResourceURL should start with /") + } + + if strings.ContainsRune(nonResourceURL[:len(nonResourceURL)-1], '*') { + return fmt.Errorf("nonResourceURL only supports wildcard matches when '*' is at the end") + } + } + } + + return nil + +} + +// RunCreateRole creates a new clusterRole +func (c *CreateClusterRoleOptions) RunCreateRole() error { + clusterRole := &rbacv1.ClusterRole{ + // this is ok because we know exactly how we want to be serialized + TypeMeta: metav1.TypeMeta{APIVersion: rbacv1.SchemeGroupVersion.String(), Kind: "ClusterRole"}, + } + clusterRole.Name = c.Name + + var err error + if len(c.AggregationRule) == 0 { + rules, err := generateResourcePolicyRules(c.Mapper, c.Verbs, c.Resources, c.ResourceNames, c.NonResourceURLs) + if err != nil { + return err + } + clusterRole.Rules = rules + } else { + clusterRole.AggregationRule = &rbacv1.AggregationRule{ + ClusterRoleSelectors: []metav1.LabelSelector{ + { + MatchLabels: c.AggregationRule, + }, + }, + } + } + + if err := util.CreateOrUpdateAnnotation(c.CreateAnnotation, clusterRole, scheme.DefaultJSONEncoder()); err != nil { + return err + } + + // Create ClusterRole. + if c.DryRunStrategy != cmdutil.DryRunClient { + createOptions := metav1.CreateOptions{} + if c.FieldManager != "" { + createOptions.FieldManager = c.FieldManager + } + createOptions.FieldValidation = c.ValidationDirective + if c.DryRunStrategy == cmdutil.DryRunServer { + createOptions.DryRun = []string{metav1.DryRunAll} + } + clusterRole, err = c.Client.ClusterRoles().Create(context.TODO(), clusterRole, createOptions) + if err != nil { + return err + } + } + + return c.PrintObj(clusterRole) +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/create/create_clusterrolebinding.go b/vendor/k8s.io/kubectl/pkg/cmd/create/create_clusterrolebinding.go new file mode 100644 index 000000000000..3b87019ac721 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/create/create_clusterrolebinding.go @@ -0,0 +1,228 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package create + +import ( + "context" + "fmt" + "strings" + + "github.com/spf13/cobra" + + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/cli-runtime/pkg/genericiooptions" + rbacclientv1 "k8s.io/client-go/kubernetes/typed/rbac/v1" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + "k8s.io/kubectl/pkg/scheme" + "k8s.io/kubectl/pkg/util" + "k8s.io/kubectl/pkg/util/completion" + "k8s.io/kubectl/pkg/util/i18n" + "k8s.io/kubectl/pkg/util/templates" +) + +var ( + clusterRoleBindingLong = templates.LongDesc(i18n.T(` + Create a cluster role binding for a particular cluster role.`)) + + clusterRoleBindingExample = templates.Examples(i18n.T(` + # Create a cluster role binding for user1, user2, and group1 using the cluster-admin cluster role + kubectl create clusterrolebinding cluster-admin --clusterrole=cluster-admin --user=user1 --user=user2 --group=group1`)) +) + +// ClusterRoleBindingOptions is returned by NewCmdCreateClusterRoleBinding +type ClusterRoleBindingOptions struct { + PrintFlags *genericclioptions.PrintFlags + PrintObj func(obj runtime.Object) error + + Name string + ClusterRole string + Users []string + Groups []string + ServiceAccounts []string + FieldManager string + CreateAnnotation bool + + Client rbacclientv1.RbacV1Interface + DryRunStrategy cmdutil.DryRunStrategy + ValidationDirective string + + genericiooptions.IOStreams +} + +// NewClusterRoleBindingOptions creates a new *ClusterRoleBindingOptions with sane defaults +func NewClusterRoleBindingOptions(ioStreams genericiooptions.IOStreams) *ClusterRoleBindingOptions { + return &ClusterRoleBindingOptions{ + Users: []string{}, + Groups: []string{}, + ServiceAccounts: []string{}, + PrintFlags: genericclioptions.NewPrintFlags("created").WithTypeSetter(scheme.Scheme), + IOStreams: ioStreams, + } +} + +// NewCmdCreateClusterRoleBinding returns an initialized command instance of ClusterRoleBinding +func NewCmdCreateClusterRoleBinding(f cmdutil.Factory, ioStreams genericiooptions.IOStreams) *cobra.Command { + o := NewClusterRoleBindingOptions(ioStreams) + + cmd := &cobra.Command{ + Use: "clusterrolebinding NAME --clusterrole=NAME [--user=username] [--group=groupname] [--serviceaccount=namespace:serviceaccountname] [--dry-run=server|client|none]", + DisableFlagsInUseLine: true, + Short: i18n.T("Create a cluster role binding for a particular cluster role"), + Long: clusterRoleBindingLong, + Example: clusterRoleBindingExample, + Run: func(cmd *cobra.Command, args []string) { + cmdutil.CheckErr(o.Complete(f, cmd, args)) + cmdutil.CheckErr(o.Run()) + }, + } + + o.PrintFlags.AddFlags(cmd) + + cmdutil.AddApplyAnnotationFlags(cmd) + cmdutil.AddValidateFlags(cmd) + cmdutil.AddDryRunFlag(cmd) + cmd.Flags().StringVar(&o.ClusterRole, "clusterrole", "", i18n.T("ClusterRole this ClusterRoleBinding should reference")) + cmd.MarkFlagRequired("clusterrole") + cmd.Flags().StringArrayVar(&o.Users, "user", o.Users, "Usernames to bind to the clusterrole. The flag can be repeated to add multiple users.") + cmd.Flags().StringArrayVar(&o.Groups, "group", o.Groups, "Groups to bind to the clusterrole. The flag can be repeated to add multiple groups.") + cmd.Flags().StringArrayVar(&o.ServiceAccounts, "serviceaccount", o.ServiceAccounts, "Service accounts to bind to the clusterrole, in the format :. The flag can be repeated to add multiple service accounts.") + cmdutil.AddFieldManagerFlagVar(cmd, &o.FieldManager, "kubectl-create") + + // Completion for relevant flags + cmdutil.CheckErr(cmd.RegisterFlagCompletionFunc( + "clusterrole", + func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return completion.CompGetResource(f, "clusterrole", toComplete), cobra.ShellCompDirectiveNoFileComp + })) + + return cmd +} + +// Complete completes all the required options +func (o *ClusterRoleBindingOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { + var err error + o.Name, err = NameFromCommandArgs(cmd, args) + if err != nil { + return err + } + + cs, err := f.KubernetesClientSet() + if err != nil { + return err + } + o.Client = cs.RbacV1() + + o.CreateAnnotation = cmdutil.GetFlagBool(cmd, cmdutil.ApplyAnnotationsFlag) + + o.DryRunStrategy, err = cmdutil.GetDryRunStrategy(cmd) + if err != nil { + return err + } + cmdutil.PrintFlagsWithDryRunStrategy(o.PrintFlags, o.DryRunStrategy) + + printer, err := o.PrintFlags.ToPrinter() + if err != nil { + return err + } + o.PrintObj = func(obj runtime.Object) error { + return printer.PrintObj(obj, o.Out) + } + o.ValidationDirective, err = cmdutil.GetValidationDirective(cmd) + if err != nil { + return err + } + + return nil +} + +// Run calls the CreateSubcommandOptions.Run in ClusterRoleBindingOptions instance +func (o *ClusterRoleBindingOptions) Run() error { + clusterRoleBinding, err := o.createClusterRoleBinding() + if err != nil { + return err + } + + if err := util.CreateOrUpdateAnnotation(o.CreateAnnotation, clusterRoleBinding, scheme.DefaultJSONEncoder()); err != nil { + return err + } + + if o.DryRunStrategy != cmdutil.DryRunClient { + createOptions := metav1.CreateOptions{} + if o.FieldManager != "" { + createOptions.FieldManager = o.FieldManager + } + createOptions.FieldValidation = o.ValidationDirective + if o.DryRunStrategy == cmdutil.DryRunServer { + createOptions.DryRun = []string{metav1.DryRunAll} + } + var err error + clusterRoleBinding, err = o.Client.ClusterRoleBindings().Create(context.TODO(), clusterRoleBinding, createOptions) + if err != nil { + return fmt.Errorf("failed to create clusterrolebinding: %v", err) + } + } + + return o.PrintObj(clusterRoleBinding) +} + +func (o *ClusterRoleBindingOptions) createClusterRoleBinding() (*rbacv1.ClusterRoleBinding, error) { + clusterRoleBinding := &rbacv1.ClusterRoleBinding{ + TypeMeta: metav1.TypeMeta{APIVersion: rbacv1.SchemeGroupVersion.String(), Kind: "ClusterRoleBinding"}, + ObjectMeta: metav1.ObjectMeta{ + Name: o.Name, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: rbacv1.GroupName, + Kind: "ClusterRole", + Name: o.ClusterRole, + }, + } + + for _, user := range o.Users { + clusterRoleBinding.Subjects = append(clusterRoleBinding.Subjects, rbacv1.Subject{ + Kind: rbacv1.UserKind, + APIGroup: rbacv1.GroupName, + Name: user, + }) + } + + for _, group := range o.Groups { + clusterRoleBinding.Subjects = append(clusterRoleBinding.Subjects, rbacv1.Subject{ + Kind: rbacv1.GroupKind, + APIGroup: rbacv1.GroupName, + Name: group, + }) + } + + for _, sa := range o.ServiceAccounts { + tokens := strings.Split(sa, ":") + if len(tokens) != 2 || tokens[0] == "" || tokens[1] == "" { + return nil, fmt.Errorf("serviceaccount must be :") + } + clusterRoleBinding.Subjects = append(clusterRoleBinding.Subjects, rbacv1.Subject{ + Kind: rbacv1.ServiceAccountKind, + APIGroup: "", + Namespace: tokens[0], + Name: tokens[1], + }) + } + + return clusterRoleBinding, nil +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/create/create_configmap.go b/vendor/k8s.io/kubectl/pkg/cmd/create/create_configmap.go new file mode 100644 index 000000000000..988cbb1efc6e --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/create/create_configmap.go @@ -0,0 +1,414 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package create + +import ( + "context" + "fmt" + "os" + "path" + "strings" + "unicode/utf8" + + "github.com/spf13/cobra" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/cli-runtime/pkg/genericiooptions" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + "k8s.io/kubectl/pkg/scheme" + "k8s.io/kubectl/pkg/util" + "k8s.io/kubectl/pkg/util/hash" + "k8s.io/kubectl/pkg/util/i18n" + "k8s.io/kubectl/pkg/util/templates" +) + +var ( + configMapLong = templates.LongDesc(i18n.T(` + Create a config map based on a file, directory, or specified literal value. + + A single config map may package one or more key/value pairs. + + When creating a config map based on a file, the key will default to the basename of the file, and the value will + default to the file content. If the basename is an invalid key, you may specify an alternate key. + + When creating a config map based on a directory, each file whose basename is a valid key in the directory will be + packaged into the config map. Any directory entries except regular files are ignored (e.g. subdirectories, + symlinks, devices, pipes, etc).`)) + + configMapExample = templates.Examples(i18n.T(` + # Create a new config map named my-config based on folder bar + kubectl create configmap my-config --from-file=path/to/bar + + # Create a new config map named my-config with specified keys instead of file basenames on disk + kubectl create configmap my-config --from-file=key1=/path/to/bar/file1.txt --from-file=key2=/path/to/bar/file2.txt + + # Create a new config map named my-config with key1=config1 and key2=config2 + kubectl create configmap my-config --from-literal=key1=config1 --from-literal=key2=config2 + + # Create a new config map named my-config from the key=value pairs in the file + kubectl create configmap my-config --from-file=path/to/bar + + # Create a new config map named my-config from an env file + kubectl create configmap my-config --from-env-file=path/to/foo.env --from-env-file=path/to/bar.env`)) +) + +// ConfigMapOptions holds properties for create configmap sub-command +type ConfigMapOptions struct { + // PrintFlags holds options necessary for obtaining a printer + PrintFlags *genericclioptions.PrintFlags + PrintObj func(obj runtime.Object) error + + // Name of configMap (required) + Name string + // Type of configMap (optional) + Type string + // FileSources to derive the configMap from (optional) + FileSources []string + // LiteralSources to derive the configMap from (optional) + LiteralSources []string + // EnvFileSources to derive the configMap from (optional) + EnvFileSources []string + // AppendHash; if true, derive a hash from the ConfigMap and append it to the name + AppendHash bool + + FieldManager string + CreateAnnotation bool + Namespace string + EnforceNamespace bool + + Client corev1client.CoreV1Interface + DryRunStrategy cmdutil.DryRunStrategy + ValidationDirective string + + genericiooptions.IOStreams +} + +// NewConfigMapOptions creates a new *ConfigMapOptions with default value +func NewConfigMapOptions(ioStreams genericiooptions.IOStreams) *ConfigMapOptions { + return &ConfigMapOptions{ + PrintFlags: genericclioptions.NewPrintFlags("created").WithTypeSetter(scheme.Scheme), + IOStreams: ioStreams, + } +} + +// NewCmdCreateConfigMap creates the `create configmap` Cobra command +func NewCmdCreateConfigMap(f cmdutil.Factory, ioStreams genericiooptions.IOStreams) *cobra.Command { + o := NewConfigMapOptions(ioStreams) + + cmd := &cobra.Command{ + Use: "configmap NAME [--from-file=[key=]source] [--from-literal=key1=value1] [--dry-run=server|client|none]", + DisableFlagsInUseLine: true, + Aliases: []string{"cm"}, + Short: i18n.T("Create a config map from a local file, directory or literal value"), + Long: configMapLong, + Example: configMapExample, + Run: func(cmd *cobra.Command, args []string) { + cmdutil.CheckErr(o.Complete(f, cmd, args)) + cmdutil.CheckErr(o.Validate()) + cmdutil.CheckErr(o.Run()) + }, + } + o.PrintFlags.AddFlags(cmd) + + cmdutil.AddApplyAnnotationFlags(cmd) + cmdutil.AddValidateFlags(cmd) + cmdutil.AddDryRunFlag(cmd) + + cmd.Flags().StringSliceVar(&o.FileSources, "from-file", o.FileSources, "Key file can be specified using its file path, in which case file basename will be used as configmap key, or optionally with a key and file path, in which case the given key will be used. Specifying a directory will iterate each named file in the directory whose basename is a valid configmap key.") + cmd.Flags().StringArrayVar(&o.LiteralSources, "from-literal", o.LiteralSources, "Specify a key and literal value to insert in configmap (i.e. mykey=somevalue)") + cmd.Flags().StringSliceVar(&o.EnvFileSources, "from-env-file", o.EnvFileSources, "Specify the path to a file to read lines of key=val pairs to create a configmap.") + cmd.Flags().BoolVar(&o.AppendHash, "append-hash", o.AppendHash, "Append a hash of the configmap to its name.") + + cmdutil.AddFieldManagerFlagVar(cmd, &o.FieldManager, "kubectl-create") + + return cmd +} + +// Complete loads data from the command line environment +func (o *ConfigMapOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { + var err error + o.Name, err = NameFromCommandArgs(cmd, args) + if err != nil { + return err + } + + restConfig, err := f.ToRESTConfig() + if err != nil { + return err + } + + o.Client, err = corev1client.NewForConfig(restConfig) + if err != nil { + return err + } + + o.CreateAnnotation = cmdutil.GetFlagBool(cmd, cmdutil.ApplyAnnotationsFlag) + + o.DryRunStrategy, err = cmdutil.GetDryRunStrategy(cmd) + if err != nil { + return err + } + + o.Namespace, o.EnforceNamespace, err = f.ToRawKubeConfigLoader().Namespace() + if err != nil { + return err + } + + cmdutil.PrintFlagsWithDryRunStrategy(o.PrintFlags, o.DryRunStrategy) + printer, err := o.PrintFlags.ToPrinter() + if err != nil { + return err + } + o.PrintObj = func(obj runtime.Object) error { + return printer.PrintObj(obj, o.Out) + } + + o.ValidationDirective, err = cmdutil.GetValidationDirective(cmd) + if err != nil { + return err + } + + return nil +} + +// Validate checks if ConfigMapOptions has sufficient value to run +func (o *ConfigMapOptions) Validate() error { + if len(o.Name) == 0 { + return fmt.Errorf("name must be specified") + } + if len(o.EnvFileSources) > 0 && (len(o.FileSources) > 0 || len(o.LiteralSources) > 0) { + return fmt.Errorf("from-env-file cannot be combined with from-file or from-literal") + } + return nil +} + +// Run calls createConfigMap and filled in value for configMap object +func (o *ConfigMapOptions) Run() error { + configMap, err := o.createConfigMap() + if err != nil { + return err + } + if err := util.CreateOrUpdateAnnotation(o.CreateAnnotation, configMap, scheme.DefaultJSONEncoder()); err != nil { + return err + } + if o.DryRunStrategy != cmdutil.DryRunClient { + createOptions := metav1.CreateOptions{} + if o.FieldManager != "" { + createOptions.FieldManager = o.FieldManager + } + createOptions.FieldValidation = o.ValidationDirective + if o.DryRunStrategy == cmdutil.DryRunServer { + createOptions.DryRun = []string{metav1.DryRunAll} + } + configMap, err = o.Client.ConfigMaps(o.Namespace).Create(context.TODO(), configMap, createOptions) + if err != nil { + return fmt.Errorf("failed to create configmap: %v", err) + } + } + + return o.PrintObj(configMap) +} + +// createConfigMap fills in key value pair from the information given in +// ConfigMapOptions into *corev1.ConfigMap +func (o *ConfigMapOptions) createConfigMap() (*corev1.ConfigMap, error) { + namespace := "" + if o.EnforceNamespace { + namespace = o.Namespace + } + + configMap := &corev1.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: corev1.SchemeGroupVersion.String(), + Kind: "ConfigMap", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: o.Name, + Namespace: namespace, + }, + } + configMap.Name = o.Name + configMap.Data = map[string]string{} + configMap.BinaryData = map[string][]byte{} + + if len(o.FileSources) > 0 { + if err := handleConfigMapFromFileSources(configMap, o.FileSources); err != nil { + return nil, err + } + } + if len(o.LiteralSources) > 0 { + if err := handleConfigMapFromLiteralSources(configMap, o.LiteralSources); err != nil { + return nil, err + } + } + if len(o.EnvFileSources) > 0 { + if err := handleConfigMapFromEnvFileSources(configMap, o.EnvFileSources); err != nil { + return nil, err + } + } + if o.AppendHash { + hash, err := hash.ConfigMapHash(configMap) + if err != nil { + return nil, err + } + configMap.Name = fmt.Sprintf("%s-%s", configMap.Name, hash) + } + + return configMap, nil +} + +// handleConfigMapFromLiteralSources adds the specified literal source +// information into the provided configMap. +func handleConfigMapFromLiteralSources(configMap *corev1.ConfigMap, literalSources []string) error { + for _, literalSource := range literalSources { + keyName, value, err := util.ParseLiteralSource(literalSource) + if err != nil { + return err + } + err = addKeyFromLiteralToConfigMap(configMap, keyName, value) + if err != nil { + return err + } + } + + return nil +} + +// handleConfigMapFromFileSources adds the specified file source information +// into the provided configMap +func handleConfigMapFromFileSources(configMap *corev1.ConfigMap, fileSources []string) error { + for _, fileSource := range fileSources { + keyName, filePath, err := util.ParseFileSource(fileSource) + if err != nil { + return err + } + info, err := os.Stat(filePath) + if err != nil { + switch err := err.(type) { + case *os.PathError: + return fmt.Errorf("error reading %s: %v", filePath, err.Err) + default: + return fmt.Errorf("error reading %s: %v", filePath, err) + } + + } + if info.IsDir() { + if strings.Contains(fileSource, "=") { + return fmt.Errorf("cannot give a key name for a directory path") + } + fileList, err := os.ReadDir(filePath) + if err != nil { + return fmt.Errorf("error listing files in %s: %v", filePath, err) + } + for _, item := range fileList { + itemPath := path.Join(filePath, item.Name()) + if item.Type().IsRegular() { + keyName = item.Name() + err = addKeyFromFileToConfigMap(configMap, keyName, itemPath) + if err != nil { + return err + } + } + } + } else { + if err := addKeyFromFileToConfigMap(configMap, keyName, filePath); err != nil { + return err + } + + } + } + return nil +} + +// handleConfigMapFromEnvFileSources adds the specified env file source information +// into the provided configMap +func handleConfigMapFromEnvFileSources(configMap *corev1.ConfigMap, envFileSources []string) error { + for _, envFileSource := range envFileSources { + info, err := os.Stat(envFileSource) + if err != nil { + switch err := err.(type) { + case *os.PathError: + return fmt.Errorf("error reading %s: %v", envFileSource, err.Err) + default: + return fmt.Errorf("error reading %s: %v", envFileSource, err) + } + } + if info.IsDir() { + return fmt.Errorf("env config file cannot be a directory") + } + err = cmdutil.AddFromEnvFile(envFileSource, func(key, value string) error { + return addKeyFromLiteralToConfigMap(configMap, key, value) + }) + if err != nil { + return err + } + } + + return nil +} + +// addKeyFromFileToConfigMap adds a key with the given name to a ConfigMap, populating +// the value with the content of the given file path, or returns an error. +func addKeyFromFileToConfigMap(configMap *corev1.ConfigMap, keyName, filePath string) error { + data, err := os.ReadFile(filePath) + if err != nil { + return err + } + if utf8.Valid(data) { + return addKeyFromLiteralToConfigMap(configMap, keyName, string(data)) + } + err = validateNewConfigMap(configMap, keyName) + if err != nil { + return err + } + configMap.BinaryData[keyName] = data + + return nil +} + +// addKeyFromLiteralToConfigMap adds the given key and data to the given config map, +// returning an error if the key is not valid or if the key already exists. +func addKeyFromLiteralToConfigMap(configMap *corev1.ConfigMap, keyName, data string) error { + err := validateNewConfigMap(configMap, keyName) + if err != nil { + return err + } + configMap.Data[keyName] = data + + return nil +} + +// validateNewConfigMap checks whether the keyname is valid +// Note, the rules for ConfigMap keys are the exact same as the ones for SecretKeys. +func validateNewConfigMap(configMap *corev1.ConfigMap, keyName string) error { + if errs := validation.IsConfigMapKey(keyName); len(errs) > 0 { + return fmt.Errorf("%q is not a valid key name for a ConfigMap: %s", keyName, strings.Join(errs, ",")) + } + if _, exists := configMap.Data[keyName]; exists { + return fmt.Errorf("cannot add key %q, another key by that name already exists in Data for ConfigMap %q", keyName, configMap.Name) + } + if _, exists := configMap.BinaryData[keyName]; exists { + return fmt.Errorf("cannot add key %q, another key by that name already exists in BinaryData for ConfigMap %q", keyName, configMap.Name) + } + + return nil +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/create/create_cronjob.go b/vendor/k8s.io/kubectl/pkg/cmd/create/create_cronjob.go new file mode 100644 index 000000000000..5c0821d9da6b --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/create/create_cronjob.go @@ -0,0 +1,226 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package create + +import ( + "context" + "fmt" + + "github.com/spf13/cobra" + + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/cli-runtime/pkg/genericiooptions" + "k8s.io/cli-runtime/pkg/resource" + batchv1client "k8s.io/client-go/kubernetes/typed/batch/v1" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + "k8s.io/kubectl/pkg/scheme" + "k8s.io/kubectl/pkg/util" + "k8s.io/kubectl/pkg/util/i18n" + "k8s.io/kubectl/pkg/util/templates" +) + +var ( + cronjobLong = templates.LongDesc(i18n.T(` + Create a cron job with the specified name.`)) + + cronjobExample = templates.Examples(` + # Create a cron job + kubectl create cronjob my-job --image=busybox --schedule="*/1 * * * *" + + # Create a cron job with a command + kubectl create cronjob my-job --image=busybox --schedule="*/1 * * * *" -- date`) +) + +// CreateCronJobOptions is returned by NewCreateCronJobOptions +type CreateCronJobOptions struct { + PrintFlags *genericclioptions.PrintFlags + + PrintObj func(obj runtime.Object) error + + Name string + Image string + Schedule string + Command []string + Restart string + + Namespace string + EnforceNamespace bool + Client batchv1client.BatchV1Interface + DryRunStrategy cmdutil.DryRunStrategy + ValidationDirective string + Builder *resource.Builder + FieldManager string + CreateAnnotation bool + + genericiooptions.IOStreams +} + +// NewCreateCronJobOptions returns an initialized CreateCronJobOptions instance +func NewCreateCronJobOptions(ioStreams genericiooptions.IOStreams) *CreateCronJobOptions { + return &CreateCronJobOptions{ + PrintFlags: genericclioptions.NewPrintFlags("created").WithTypeSetter(scheme.Scheme), + IOStreams: ioStreams, + } +} + +// NewCmdCreateCronJob is a command to create CronJobs. +func NewCmdCreateCronJob(f cmdutil.Factory, ioStreams genericiooptions.IOStreams) *cobra.Command { + o := NewCreateCronJobOptions(ioStreams) + cmd := &cobra.Command{ + Use: "cronjob NAME --image=image --schedule='0/5 * * * ?' -- [COMMAND] [args...]", + DisableFlagsInUseLine: false, + Aliases: []string{"cj"}, + Short: i18n.T("Create a cron job with the specified name"), + Long: cronjobLong, + Example: cronjobExample, + Run: func(cmd *cobra.Command, args []string) { + cmdutil.CheckErr(o.Complete(f, cmd, args)) + cmdutil.CheckErr(o.Run()) + }, + } + + o.PrintFlags.AddFlags(cmd) + + cmdutil.AddApplyAnnotationFlags(cmd) + cmdutil.AddValidateFlags(cmd) + cmdutil.AddDryRunFlag(cmd) + cmd.Flags().StringVar(&o.Image, "image", o.Image, "Image name to run.") + cmd.MarkFlagRequired("image") + cmd.Flags().StringVar(&o.Schedule, "schedule", o.Schedule, "A schedule in the Cron format the job should be run with.") + cmd.MarkFlagRequired("schedule") + cmd.Flags().StringVar(&o.Restart, "restart", o.Restart, "job's restart policy. supported values: OnFailure, Never") + cmdutil.AddFieldManagerFlagVar(cmd, &o.FieldManager, "kubectl-create") + + return cmd +} + +// Complete completes all the required options +func (o *CreateCronJobOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { + name, err := NameFromCommandArgs(cmd, args) + if err != nil { + return err + } + o.Name = name + if len(args) > 1 { + o.Command = args[1:] + } + if len(o.Restart) == 0 { + o.Restart = "OnFailure" + } + + clientConfig, err := f.ToRESTConfig() + if err != nil { + return err + } + o.Client, err = batchv1client.NewForConfig(clientConfig) + if err != nil { + return err + } + + o.Namespace, o.EnforceNamespace, err = f.ToRawKubeConfigLoader().Namespace() + if err != nil { + return err + } + o.Builder = f.NewBuilder() + + o.CreateAnnotation = cmdutil.GetFlagBool(cmd, cmdutil.ApplyAnnotationsFlag) + + o.DryRunStrategy, err = cmdutil.GetDryRunStrategy(cmd) + if err != nil { + return err + } + cmdutil.PrintFlagsWithDryRunStrategy(o.PrintFlags, o.DryRunStrategy) + printer, err := o.PrintFlags.ToPrinter() + if err != nil { + return err + } + o.PrintObj = func(obj runtime.Object) error { + return printer.PrintObj(obj, o.Out) + } + + o.ValidationDirective, err = cmdutil.GetValidationDirective(cmd) + if err != nil { + return err + } + + return nil +} + +// Run performs the execution of 'create cronjob' sub command +func (o *CreateCronJobOptions) Run() error { + cronJob := o.createCronJob() + if err := util.CreateOrUpdateAnnotation(o.CreateAnnotation, cronJob, scheme.DefaultJSONEncoder()); err != nil { + return err + } + + if o.DryRunStrategy != cmdutil.DryRunClient { + createOptions := metav1.CreateOptions{} + if o.FieldManager != "" { + createOptions.FieldManager = o.FieldManager + } + createOptions.FieldValidation = o.ValidationDirective + if o.DryRunStrategy == cmdutil.DryRunServer { + createOptions.DryRun = []string{metav1.DryRunAll} + } + var err error + cronJob, err = o.Client.CronJobs(o.Namespace).Create(context.TODO(), cronJob, createOptions) + if err != nil { + return fmt.Errorf("failed to create cronjob: %v", err) + } + } + + return o.PrintObj(cronJob) +} + +func (o *CreateCronJobOptions) createCronJob() *batchv1.CronJob { + cronjob := &batchv1.CronJob{ + TypeMeta: metav1.TypeMeta{APIVersion: batchv1.SchemeGroupVersion.String(), Kind: "CronJob"}, + ObjectMeta: metav1.ObjectMeta{ + Name: o.Name, + }, + Spec: batchv1.CronJobSpec{ + Schedule: o.Schedule, + JobTemplate: batchv1.JobTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: o.Name, + }, + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: o.Name, + Image: o.Image, + Command: o.Command, + }, + }, + RestartPolicy: corev1.RestartPolicy(o.Restart), + }, + }, + }, + }, + }, + } + if o.EnforceNamespace { + cronjob.Namespace = o.Namespace + } + return cronjob +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/create/create_deployment.go b/vendor/k8s.io/kubectl/pkg/cmd/create/create_deployment.go new file mode 100644 index 000000000000..519c7fb6223a --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/create/create_deployment.go @@ -0,0 +1,278 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package create + +import ( + "context" + "fmt" + "strings" + + "github.com/spf13/cobra" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + utilrand "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/cli-runtime/pkg/genericiooptions" + appsv1client "k8s.io/client-go/kubernetes/typed/apps/v1" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + "k8s.io/kubectl/pkg/scheme" + "k8s.io/kubectl/pkg/util" + "k8s.io/kubectl/pkg/util/i18n" + "k8s.io/kubectl/pkg/util/templates" +) + +var ( + deploymentLong = templates.LongDesc(i18n.T(` + Create a deployment with the specified name.`)) + + deploymentExample = templates.Examples(i18n.T(` + # Create a deployment named my-dep that runs the busybox image + kubectl create deployment my-dep --image=busybox + + # Create a deployment with a command + kubectl create deployment my-dep --image=busybox -- date + + # Create a deployment named my-dep that runs the nginx image with 3 replicas + kubectl create deployment my-dep --image=nginx --replicas=3 + + # Create a deployment named my-dep that runs the busybox image and expose port 5701 + kubectl create deployment my-dep --image=busybox --port=5701 + + # Create a deployment named my-dep that runs multiple containers + kubectl create deployment my-dep --image=busybox:latest --image=ubuntu:latest --image=nginx`)) +) + +// CreateDeploymentOptions is returned by NewCmdCreateDeployment +type CreateDeploymentOptions struct { + PrintFlags *genericclioptions.PrintFlags + + PrintObj func(obj runtime.Object) error + + Name string + Images []string + Port int32 + Replicas int32 + Command []string + Namespace string + EnforceNamespace bool + FieldManager string + CreateAnnotation bool + + Client appsv1client.AppsV1Interface + DryRunStrategy cmdutil.DryRunStrategy + ValidationDirective string + + genericiooptions.IOStreams +} + +// NewCreateDeploymentOptions returns an initialized CreateDeploymentOptions instance +func NewCreateDeploymentOptions(ioStreams genericiooptions.IOStreams) *CreateDeploymentOptions { + return &CreateDeploymentOptions{ + Port: -1, + Replicas: 1, + PrintFlags: genericclioptions.NewPrintFlags("created").WithTypeSetter(scheme.Scheme), + IOStreams: ioStreams, + } +} + +// NewCmdCreateDeployment is a macro command to create a new deployment. +// This command is better known to users as `kubectl create deployment`. +func NewCmdCreateDeployment(f cmdutil.Factory, ioStreams genericiooptions.IOStreams) *cobra.Command { + o := NewCreateDeploymentOptions(ioStreams) + cmd := &cobra.Command{ + Use: "deployment NAME --image=image -- [COMMAND] [args...]", + DisableFlagsInUseLine: true, + Aliases: []string{"deploy"}, + Short: i18n.T("Create a deployment with the specified name"), + Long: deploymentLong, + Example: deploymentExample, + Run: func(cmd *cobra.Command, args []string) { + cmdutil.CheckErr(o.Complete(f, cmd, args)) + cmdutil.CheckErr(o.Validate()) + cmdutil.CheckErr(o.Run()) + }, + } + + o.PrintFlags.AddFlags(cmd) + + cmdutil.AddApplyAnnotationFlags(cmd) + cmdutil.AddValidateFlags(cmd) + cmdutil.AddDryRunFlag(cmd) + cmd.Flags().StringSliceVar(&o.Images, "image", o.Images, "Image names to run. A deployment can have multiple images set for multi-container pod.") + cmd.MarkFlagRequired("image") + cmd.Flags().Int32Var(&o.Port, "port", o.Port, "The containerPort that this deployment exposes.") + cmd.Flags().Int32VarP(&o.Replicas, "replicas", "r", o.Replicas, "Number of replicas to create. Default is 1.") + cmdutil.AddFieldManagerFlagVar(cmd, &o.FieldManager, "kubectl-create") + + return cmd +} + +// Complete completes all the options +func (o *CreateDeploymentOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { + name, err := NameFromCommandArgs(cmd, args) + if err != nil { + return err + } + o.Name = name + if len(args) > 1 { + o.Command = args[1:] + } + + clientConfig, err := f.ToRESTConfig() + if err != nil { + return err + } + o.Client, err = appsv1client.NewForConfig(clientConfig) + if err != nil { + return err + } + + o.Namespace, o.EnforceNamespace, err = f.ToRawKubeConfigLoader().Namespace() + if err != nil { + return err + } + + o.CreateAnnotation = cmdutil.GetFlagBool(cmd, cmdutil.ApplyAnnotationsFlag) + + o.DryRunStrategy, err = cmdutil.GetDryRunStrategy(cmd) + if err != nil { + return err + } + cmdutil.PrintFlagsWithDryRunStrategy(o.PrintFlags, o.DryRunStrategy) + + printer, err := o.PrintFlags.ToPrinter() + if err != nil { + return err + } + o.PrintObj = func(obj runtime.Object) error { + return printer.PrintObj(obj, o.Out) + } + + o.ValidationDirective, err = cmdutil.GetValidationDirective(cmd) + if err != nil { + return err + } + + return nil +} + +// Validate makes sure there is no discrepency in provided option values +func (o *CreateDeploymentOptions) Validate() error { + if len(o.Images) > 1 && len(o.Command) > 0 { + return fmt.Errorf("cannot specify multiple --image options and command") + } + return nil +} + +// Run performs the execution of 'create deployment' sub command +func (o *CreateDeploymentOptions) Run() error { + deploy := o.createDeployment() + + if err := util.CreateOrUpdateAnnotation(o.CreateAnnotation, deploy, scheme.DefaultJSONEncoder()); err != nil { + return err + } + + if o.DryRunStrategy != cmdutil.DryRunClient { + createOptions := metav1.CreateOptions{} + if o.FieldManager != "" { + createOptions.FieldManager = o.FieldManager + } + createOptions.FieldValidation = o.ValidationDirective + if o.DryRunStrategy == cmdutil.DryRunServer { + createOptions.DryRun = []string{metav1.DryRunAll} + } + var err error + deploy, err = o.Client.Deployments(o.Namespace).Create(context.TODO(), deploy, createOptions) + if err != nil { + return fmt.Errorf("failed to create deployment: %v", err) + } + } + + return o.PrintObj(deploy) +} + +func (o *CreateDeploymentOptions) createDeployment() *appsv1.Deployment { + labels := map[string]string{"app": o.Name} + selector := metav1.LabelSelector{MatchLabels: labels} + namespace := "" + if o.EnforceNamespace { + namespace = o.Namespace + } + + deploy := &appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{APIVersion: appsv1.SchemeGroupVersion.String(), Kind: "Deployment"}, + ObjectMeta: metav1.ObjectMeta{ + Name: o.Name, + Labels: labels, + Namespace: namespace, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: &o.Replicas, + Selector: &selector, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labels, + }, + Spec: o.buildPodSpec(), + }, + }, + } + + if o.Port >= 0 && len(deploy.Spec.Template.Spec.Containers) > 0 { + deploy.Spec.Template.Spec.Containers[0].Ports = []corev1.ContainerPort{{ContainerPort: o.Port}} + } + return deploy +} + +// buildPodSpec parses the image strings and assemble them into the Containers +// of a PodSpec. This is all you need to create the PodSpec for a deployment. +func (o *CreateDeploymentOptions) buildPodSpec() corev1.PodSpec { + podSpec := corev1.PodSpec{Containers: []corev1.Container{}} + for _, imageString := range o.Images { + // Retain just the image name + imageSplit := strings.Split(imageString, "/") + name := imageSplit[len(imageSplit)-1] + // Remove any tag or hash + if strings.Contains(name, ":") { + name = strings.Split(name, ":")[0] + } + if strings.Contains(name, "@") { + name = strings.Split(name, "@")[0] + } + name = sanitizeAndUniquify(name) + podSpec.Containers = append(podSpec.Containers, corev1.Container{ + Name: name, + Image: imageString, + Command: o.Command, + }) + } + return podSpec +} + +// sanitizeAndUniquify replaces characters like "." or "_" into "-" to follow DNS1123 rules. +// Then add random suffix to make it uniquified. +func sanitizeAndUniquify(name string) string { + if strings.ContainsAny(name, "_.") { + name = strings.Replace(name, "_", "-", -1) + name = strings.Replace(name, ".", "-", -1) + name = fmt.Sprintf("%s-%s", name, utilrand.String(5)) + } + return name +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/create/create_ingress.go b/vendor/k8s.io/kubectl/pkg/cmd/create/create_ingress.go new file mode 100644 index 000000000000..d71048cdcb9f --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/create/create_ingress.go @@ -0,0 +1,458 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package create + +import ( + "context" + "fmt" + "regexp" + "strings" + + "github.com/spf13/cobra" + + networkingv1 "k8s.io/api/networking/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/cli-runtime/pkg/genericiooptions" + networkingv1client "k8s.io/client-go/kubernetes/typed/networking/v1" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + "k8s.io/kubectl/pkg/scheme" + "k8s.io/kubectl/pkg/util" + "k8s.io/kubectl/pkg/util/i18n" + "k8s.io/kubectl/pkg/util/templates" +) + +var ( + // Explaining the Regex below: + // ^(?P[\w\*\-\.]*) -> Indicates the host - 0-N characters of letters, number, underscore, '-', '.' and '*' + // (?P/.*) -> Indicates the path and MUST start with '/' - / + 0-N characters + // Separator from host/path to svcname:svcport -> "=" + // (?P[\w\-]+) -> Service Name (letters, numbers, '-') -> 1-N characters + // Separator from svcname to svcport -> ":" + // (?P[\w\-]+) -> Service Port (letters, numbers, '-') -> 1-N characters + regexHostPathSvc = `^(?P[\w\*\-\.]*)(?P/.*)=(?P[\w\-]+):(?P[\w\-]+)` + + // This Regex is optional -> (....)? + // (?Ptls) -> Verify if the argument after "," is 'tls' + // Optional Separator from tls to the secret name -> "=?" + // (?P[\w\-]+)? -> Optional secret name after the separator -> 1-N characters + regexTLS = `(,(?Ptls)=?(?P[\w\-]+)?)?` + + // The validation Regex is the concatenation of hostPathSvc validation regex + // and the TLS validation regex + ruleRegex = regexHostPathSvc + regexTLS + + ingressLong = templates.LongDesc(i18n.T(` + Create an ingress with the specified name.`)) + + ingressExample = templates.Examples(i18n.T(` + # Create a single ingress called 'simple' that directs requests to foo.com/bar to svc + # svc1:8080 with a TLS secret "my-cert" + kubectl create ingress simple --rule="foo.com/bar=svc1:8080,tls=my-cert" + + # Create a catch all ingress of "/path" pointing to service svc:port and Ingress Class as "otheringress" + kubectl create ingress catch-all --class=otheringress --rule="/path=svc:port" + + # Create an ingress with two annotations: ingress.annotation1 and ingress.annotations2 + kubectl create ingress annotated --class=default --rule="foo.com/bar=svc:port" \ + --annotation ingress.annotation1=foo \ + --annotation ingress.annotation2=bla + + # Create an ingress with the same host and multiple paths + kubectl create ingress multipath --class=default \ + --rule="foo.com/=svc:port" \ + --rule="foo.com/admin/=svcadmin:portadmin" + + # Create an ingress with multiple hosts and the pathType as Prefix + kubectl create ingress ingress1 --class=default \ + --rule="foo.com/path*=svc:8080" \ + --rule="bar.com/admin*=svc2:http" + + # Create an ingress with TLS enabled using the default ingress certificate and different path types + kubectl create ingress ingtls --class=default \ + --rule="foo.com/=svc:https,tls" \ + --rule="foo.com/path/subpath*=othersvc:8080" + + # Create an ingress with TLS enabled using a specific secret and pathType as Prefix + kubectl create ingress ingsecret --class=default \ + --rule="foo.com/*=svc:8080,tls=secret1" + + # Create an ingress with a default backend + kubectl create ingress ingdefault --class=default \ + --default-backend=defaultsvc:http \ + --rule="foo.com/*=svc:8080,tls=secret1" + + `)) +) + +// CreateIngressOptions is returned by NewCmdCreateIngress +type CreateIngressOptions struct { + PrintFlags *genericclioptions.PrintFlags + + PrintObj func(obj runtime.Object) error + + Name string + IngressClass string + Rules []string + Annotations []string + DefaultBackend string + Namespace string + EnforceNamespace bool + CreateAnnotation bool + + Client networkingv1client.NetworkingV1Interface + DryRunStrategy cmdutil.DryRunStrategy + ValidationDirective string + + FieldManager string + + genericiooptions.IOStreams +} + +// NewCreateIngressOptions creates the CreateIngressOptions to be used later +func NewCreateIngressOptions(ioStreams genericiooptions.IOStreams) *CreateIngressOptions { + return &CreateIngressOptions{ + PrintFlags: genericclioptions.NewPrintFlags("created").WithTypeSetter(scheme.Scheme), + IOStreams: ioStreams, + } +} + +// NewCmdCreateIngress is a macro command to create a new ingress. +// This command is better known to users as `kubectl create ingress`. +func NewCmdCreateIngress(f cmdutil.Factory, ioStreams genericiooptions.IOStreams) *cobra.Command { + o := NewCreateIngressOptions(ioStreams) + + cmd := &cobra.Command{ + Use: "ingress NAME --rule=host/path=service:port[,tls[=secret]] ", + DisableFlagsInUseLine: true, + Aliases: []string{"ing"}, + Short: i18n.T("Create an ingress with the specified name"), + Long: ingressLong, + Example: ingressExample, + Run: func(cmd *cobra.Command, args []string) { + cmdutil.CheckErr(o.Complete(f, cmd, args)) + cmdutil.CheckErr(o.Validate()) + cmdutil.CheckErr(o.Run()) + }, + } + + o.PrintFlags.AddFlags(cmd) + + cmdutil.AddApplyAnnotationFlags(cmd) + cmdutil.AddValidateFlags(cmd) + cmdutil.AddDryRunFlag(cmd) + cmd.Flags().StringVar(&o.IngressClass, "class", o.IngressClass, "Ingress Class to be used") + cmd.Flags().StringArrayVar(&o.Rules, "rule", o.Rules, "Rule in format host/path=service:port[,tls=secretname]. Paths containing the leading character '*' are considered pathType=Prefix. tls argument is optional.") + cmd.Flags().StringVar(&o.DefaultBackend, "default-backend", o.DefaultBackend, "Default service for backend, in format of svcname:port") + cmd.Flags().StringArrayVar(&o.Annotations, "annotation", o.Annotations, "Annotation to insert in the ingress object, in the format annotation=value") + cmdutil.AddFieldManagerFlagVar(cmd, &o.FieldManager, "kubectl-create") + + return cmd +} + +// Complete completes all the options +func (o *CreateIngressOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { + name, err := NameFromCommandArgs(cmd, args) + if err != nil { + return err + } + o.Name = name + + clientConfig, err := f.ToRESTConfig() + if err != nil { + return err + } + o.Client, err = networkingv1client.NewForConfig(clientConfig) + if err != nil { + return err + } + + o.Namespace, o.EnforceNamespace, err = f.ToRawKubeConfigLoader().Namespace() + if err != nil { + return err + } + + o.CreateAnnotation = cmdutil.GetFlagBool(cmd, cmdutil.ApplyAnnotationsFlag) + + o.DryRunStrategy, err = cmdutil.GetDryRunStrategy(cmd) + if err != nil { + return err + } + cmdutil.PrintFlagsWithDryRunStrategy(o.PrintFlags, o.DryRunStrategy) + + printer, err := o.PrintFlags.ToPrinter() + if err != nil { + return err + } + o.PrintObj = func(obj runtime.Object) error { + return printer.PrintObj(obj, o.Out) + } + + o.ValidationDirective, err = cmdutil.GetValidationDirective(cmd) + return err +} + +// Validate validates the Ingress object to be created +func (o *CreateIngressOptions) Validate() error { + if len(o.DefaultBackend) == 0 && len(o.Rules) == 0 { + return fmt.Errorf("not enough information provided: every ingress has to either specify a default-backend (which catches all traffic) or a list of rules (which catch specific paths)") + } + + rulevalidation, err := regexp.Compile(ruleRegex) + if err != nil { + return fmt.Errorf("failed to compile the regex") + } + + for _, rule := range o.Rules { + if match := rulevalidation.MatchString(rule); !match { + return fmt.Errorf("rule %s is invalid and should be in format host/path=svcname:svcport[,tls[=secret]]", rule) + } + } + + for _, annotation := range o.Annotations { + if an := strings.SplitN(annotation, "=", 2); len(an) != 2 { + return fmt.Errorf("annotation %s is invalid and should be in format key=[value]", annotation) + } + } + + if len(o.DefaultBackend) > 0 && len(strings.Split(o.DefaultBackend, ":")) != 2 { + return fmt.Errorf("default-backend should be in format servicename:serviceport") + } + + return nil +} + +// Run performs the execution of 'create ingress' sub command +func (o *CreateIngressOptions) Run() error { + ingress := o.createIngress() + + if err := util.CreateOrUpdateAnnotation(o.CreateAnnotation, ingress, scheme.DefaultJSONEncoder()); err != nil { + return err + } + + if o.DryRunStrategy != cmdutil.DryRunClient { + createOptions := metav1.CreateOptions{} + if o.FieldManager != "" { + createOptions.FieldManager = o.FieldManager + } + createOptions.FieldValidation = o.ValidationDirective + if o.DryRunStrategy == cmdutil.DryRunServer { + createOptions.DryRun = []string{metav1.DryRunAll} + } + var err error + ingress, err = o.Client.Ingresses(o.Namespace).Create(context.TODO(), ingress, createOptions) + if err != nil { + return fmt.Errorf("failed to create ingress: %v", err) + } + } + return o.PrintObj(ingress) +} + +func (o *CreateIngressOptions) createIngress() *networkingv1.Ingress { + namespace := "" + if o.EnforceNamespace { + namespace = o.Namespace + } + + annotations := o.buildAnnotations() + spec := o.buildIngressSpec() + + ingress := &networkingv1.Ingress{ + TypeMeta: metav1.TypeMeta{APIVersion: networkingv1.SchemeGroupVersion.String(), Kind: "Ingress"}, + ObjectMeta: metav1.ObjectMeta{ + Name: o.Name, + Namespace: namespace, + Annotations: annotations, + }, + Spec: spec, + } + return ingress +} + +func (o *CreateIngressOptions) buildAnnotations() map[string]string { + + var annotations = make(map[string]string) + + for _, annotation := range o.Annotations { + an := strings.SplitN(annotation, "=", 2) + annotations[an[0]] = an[1] + } + return annotations +} + +// buildIngressSpec builds the .spec from the diverse arguments passed to kubectl +func (o *CreateIngressOptions) buildIngressSpec() networkingv1.IngressSpec { + var ingressSpec networkingv1.IngressSpec + + if len(o.IngressClass) > 0 { + ingressSpec.IngressClassName = &o.IngressClass + } + + if len(o.DefaultBackend) > 0 { + defaultbackend := buildIngressBackendSvc(o.DefaultBackend) + ingressSpec.DefaultBackend = &defaultbackend + } + ingressSpec.TLS = o.buildTLSRules() + ingressSpec.Rules = o.buildIngressRules() + + return ingressSpec +} + +func (o *CreateIngressOptions) buildTLSRules() []networkingv1.IngressTLS { + hostAlreadyPresent := make(map[string]struct{}) + + ingressTLSs := []networkingv1.IngressTLS{} + var secret string + + for _, rule := range o.Rules { + tls := strings.Split(rule, ",") + + if len(tls) == 2 { + ingressTLS := networkingv1.IngressTLS{} + host := strings.SplitN(rule, "/", 2)[0] + secret = "" + secretName := strings.Split(tls[1], "=") + + if len(secretName) > 1 { + secret = secretName[1] + } + + idxSecret := getIndexSecret(secret, ingressTLSs) + // We accept the same host into TLS secrets only once + if _, ok := hostAlreadyPresent[host]; !ok { + if idxSecret > -1 { + ingressTLSs[idxSecret].Hosts = append(ingressTLSs[idxSecret].Hosts, host) + hostAlreadyPresent[host] = struct{}{} + continue + } + if host != "" { + ingressTLS.Hosts = append(ingressTLS.Hosts, host) + } + if secret != "" { + ingressTLS.SecretName = secret + } + if len(ingressTLS.SecretName) > 0 || len(ingressTLS.Hosts) > 0 { + ingressTLSs = append(ingressTLSs, ingressTLS) + } + hostAlreadyPresent[host] = struct{}{} + } + } + } + return ingressTLSs +} + +// buildIngressRules builds the .spec.rules for an ingress object. +func (o *CreateIngressOptions) buildIngressRules() []networkingv1.IngressRule { + ingressRules := []networkingv1.IngressRule{} + + for _, rule := range o.Rules { + removeTLS := strings.Split(rule, ",")[0] + hostSplit := strings.SplitN(removeTLS, "/", 2) + host := hostSplit[0] + ingressPath := buildHTTPIngressPath(hostSplit[1]) + ingressRule := networkingv1.IngressRule{} + + if host != "" { + ingressRule.Host = host + } + + idxHost := getIndexHost(ingressRule.Host, ingressRules) + if idxHost > -1 { + ingressRules[idxHost].IngressRuleValue.HTTP.Paths = append(ingressRules[idxHost].IngressRuleValue.HTTP.Paths, ingressPath) + continue + } + + ingressRule.IngressRuleValue = networkingv1.IngressRuleValue{ + HTTP: &networkingv1.HTTPIngressRuleValue{ + Paths: []networkingv1.HTTPIngressPath{ + ingressPath, + }, + }, + } + ingressRules = append(ingressRules, ingressRule) + } + return ingressRules +} + +func buildHTTPIngressPath(pathsvc string) networkingv1.HTTPIngressPath { + pathsvcsplit := strings.Split(pathsvc, "=") + path := "/" + pathsvcsplit[0] + service := pathsvcsplit[1] + + var pathType networkingv1.PathType + pathType = "Exact" + + // If * in the End, turn pathType=Prefix but remove the * from the end + if path[len(path)-1:] == "*" { + pathType = "Prefix" + path = path[0 : len(path)-1] + } + + httpIngressPath := networkingv1.HTTPIngressPath{ + Path: path, + PathType: &pathType, + Backend: buildIngressBackendSvc(service), + } + return httpIngressPath +} + +func buildIngressBackendSvc(service string) networkingv1.IngressBackend { + svcname := strings.Split(service, ":")[0] + svcport := strings.Split(service, ":")[1] + + ingressBackend := networkingv1.IngressBackend{ + Service: &networkingv1.IngressServiceBackend{ + Name: svcname, + Port: parseServiceBackendPort(svcport), + }, + } + return ingressBackend +} + +func parseServiceBackendPort(port string) networkingv1.ServiceBackendPort { + var backendPort networkingv1.ServiceBackendPort + portIntOrStr := intstr.Parse(port) + + if portIntOrStr.Type == intstr.Int { + backendPort.Number = portIntOrStr.IntVal + } + + if portIntOrStr.Type == intstr.String { + backendPort.Name = portIntOrStr.StrVal + } + return backendPort +} + +func getIndexHost(host string, rules []networkingv1.IngressRule) int { + for index, v := range rules { + if v.Host == host { + return index + } + } + return -1 +} + +func getIndexSecret(secretname string, tls []networkingv1.IngressTLS) int { + for index, v := range tls { + if v.SecretName == secretname { + return index + } + } + return -1 +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/create/create_job.go b/vendor/k8s.io/kubectl/pkg/cmd/create/create_job.go new file mode 100644 index 000000000000..811720d4f126 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/create/create_job.go @@ -0,0 +1,287 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package create + +import ( + "context" + "fmt" + + "github.com/spf13/cobra" + + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/cli-runtime/pkg/genericiooptions" + "k8s.io/cli-runtime/pkg/resource" + batchv1client "k8s.io/client-go/kubernetes/typed/batch/v1" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + "k8s.io/kubectl/pkg/scheme" + "k8s.io/kubectl/pkg/util" + "k8s.io/kubectl/pkg/util/i18n" + "k8s.io/kubectl/pkg/util/templates" + "k8s.io/utils/ptr" +) + +var ( + jobLong = templates.LongDesc(i18n.T(` + Create a job with the specified name.`)) + + jobExample = templates.Examples(i18n.T(` + # Create a job + kubectl create job my-job --image=busybox + + # Create a job with a command + kubectl create job my-job --image=busybox -- date + + # Create a job from a cron job named "a-cronjob" + kubectl create job test-job --from=cronjob/a-cronjob`)) +) + +// CreateJobOptions is the command line options for 'create job' +type CreateJobOptions struct { + PrintFlags *genericclioptions.PrintFlags + + PrintObj func(obj runtime.Object) error + + Name string + Image string + From string + Command []string + + Namespace string + EnforceNamespace bool + Client batchv1client.BatchV1Interface + DryRunStrategy cmdutil.DryRunStrategy + ValidationDirective string + Builder *resource.Builder + FieldManager string + CreateAnnotation bool + + genericiooptions.IOStreams +} + +// NewCreateJobOptions initializes and returns new CreateJobOptions instance +func NewCreateJobOptions(ioStreams genericiooptions.IOStreams) *CreateJobOptions { + return &CreateJobOptions{ + PrintFlags: genericclioptions.NewPrintFlags("created").WithTypeSetter(scheme.Scheme), + IOStreams: ioStreams, + } +} + +// NewCmdCreateJob is a command to ease creating Jobs from CronJobs. +func NewCmdCreateJob(f cmdutil.Factory, ioStreams genericiooptions.IOStreams) *cobra.Command { + o := NewCreateJobOptions(ioStreams) + cmd := &cobra.Command{ + Use: "job NAME --image=image [--from=cronjob/name] -- [COMMAND] [args...]", + DisableFlagsInUseLine: true, + Short: i18n.T("Create a job with the specified name"), + Long: jobLong, + Example: jobExample, + Run: func(cmd *cobra.Command, args []string) { + cmdutil.CheckErr(o.Complete(f, cmd, args)) + cmdutil.CheckErr(o.Validate()) + cmdutil.CheckErr(o.Run()) + }, + } + + o.PrintFlags.AddFlags(cmd) + + cmdutil.AddApplyAnnotationFlags(cmd) + cmdutil.AddValidateFlags(cmd) + cmdutil.AddDryRunFlag(cmd) + cmd.Flags().StringVar(&o.Image, "image", o.Image, "Image name to run.") + cmd.Flags().StringVar(&o.From, "from", o.From, "The name of the resource to create a Job from (only cronjob is supported).") + cmdutil.AddFieldManagerFlagVar(cmd, &o.FieldManager, "kubectl-create") + return cmd +} + +// Complete completes all the required options +func (o *CreateJobOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { + name, err := NameFromCommandArgs(cmd, args) + if err != nil { + return err + } + o.Name = name + if len(args) > 1 { + o.Command = args[1:] + } + + clientConfig, err := f.ToRESTConfig() + if err != nil { + return err + } + o.Client, err = batchv1client.NewForConfig(clientConfig) + if err != nil { + return err + } + + o.CreateAnnotation = cmdutil.GetFlagBool(cmd, cmdutil.ApplyAnnotationsFlag) + + o.Namespace, o.EnforceNamespace, err = f.ToRawKubeConfigLoader().Namespace() + if err != nil { + return err + } + o.Builder = f.NewBuilder() + + o.DryRunStrategy, err = cmdutil.GetDryRunStrategy(cmd) + if err != nil { + return err + } + cmdutil.PrintFlagsWithDryRunStrategy(o.PrintFlags, o.DryRunStrategy) + printer, err := o.PrintFlags.ToPrinter() + if err != nil { + return err + } + o.PrintObj = func(obj runtime.Object) error { + return printer.PrintObj(obj, o.Out) + } + + o.ValidationDirective, err = cmdutil.GetValidationDirective(cmd) + if err != nil { + return err + } + + return nil +} + +// Validate makes sure provided values and valid Job options +func (o *CreateJobOptions) Validate() error { + if (len(o.Image) == 0 && len(o.From) == 0) || (len(o.Image) != 0 && len(o.From) != 0) { + return fmt.Errorf("either --image or --from must be specified") + } + if o.Command != nil && len(o.Command) != 0 && len(o.From) != 0 { + return fmt.Errorf("cannot specify --from and command") + } + return nil +} + +// Run performs the execution of 'create job' sub command +func (o *CreateJobOptions) Run() error { + var job *batchv1.Job + if len(o.Image) > 0 { + job = o.createJob() + } else { + infos, err := o.Builder. + WithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...). + NamespaceParam(o.Namespace).DefaultNamespace(). + ResourceTypeOrNameArgs(false, o.From). + Flatten(). + Latest(). + Do(). + Infos() + if err != nil { + return err + } + if len(infos) != 1 { + return fmt.Errorf("from must be an existing cronjob") + } + + switch obj := infos[0].Object.(type) { + case *batchv1.CronJob: + job = o.createJobFromCronJob(obj) + default: + return fmt.Errorf("unknown object type %T", obj) + } + } + + if err := util.CreateOrUpdateAnnotation(o.CreateAnnotation, job, scheme.DefaultJSONEncoder()); err != nil { + return err + } + + if o.DryRunStrategy != cmdutil.DryRunClient { + createOptions := metav1.CreateOptions{} + if o.FieldManager != "" { + createOptions.FieldManager = o.FieldManager + } + createOptions.FieldValidation = o.ValidationDirective + if o.DryRunStrategy == cmdutil.DryRunServer { + createOptions.DryRun = []string{metav1.DryRunAll} + } + var err error + job, err = o.Client.Jobs(o.Namespace).Create(context.TODO(), job, createOptions) + if err != nil { + return fmt.Errorf("failed to create job: %v", err) + } + } + + return o.PrintObj(job) +} + +func (o *CreateJobOptions) createJob() *batchv1.Job { + job := &batchv1.Job{ + // this is ok because we know exactly how we want to be serialized + TypeMeta: metav1.TypeMeta{APIVersion: batchv1.SchemeGroupVersion.String(), Kind: "Job"}, + ObjectMeta: metav1.ObjectMeta{ + Name: o.Name, + }, + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: o.Name, + Image: o.Image, + Command: o.Command, + }, + }, + RestartPolicy: corev1.RestartPolicyNever, + }, + }, + }, + } + if o.EnforceNamespace { + job.Namespace = o.Namespace + } + return job +} + +func (o *CreateJobOptions) createJobFromCronJob(cronJob *batchv1.CronJob) *batchv1.Job { + annotations := make(map[string]string) + annotations["cronjob.kubernetes.io/instantiate"] = "manual" + for k, v := range cronJob.Spec.JobTemplate.Annotations { + annotations[k] = v + } + + job := &batchv1.Job{ + // this is ok because we know exactly how we want to be serialized + TypeMeta: metav1.TypeMeta{APIVersion: batchv1.SchemeGroupVersion.String(), Kind: "Job"}, + ObjectMeta: metav1.ObjectMeta{ + Name: o.Name, + Annotations: annotations, + Labels: cronJob.Spec.JobTemplate.Labels, + OwnerReferences: []metav1.OwnerReference{ + { + // we are not using metav1.NewControllerRef because it + // sets BlockOwnerDeletion to true which additionally mandates + // cronjobs/finalizer role and not backwards-compatible. + APIVersion: batchv1.SchemeGroupVersion.String(), + Kind: "CronJob", + Name: cronJob.GetName(), + UID: cronJob.GetUID(), + Controller: ptr.To(true), + }, + }, + }, + Spec: cronJob.Spec.JobTemplate.Spec, + } + if o.EnforceNamespace { + job.Namespace = o.Namespace + } + return job +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/create/create_namespace.go b/vendor/k8s.io/kubectl/pkg/cmd/create/create_namespace.go new file mode 100644 index 000000000000..047e973fa894 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/create/create_namespace.go @@ -0,0 +1,179 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package create + +import ( + "context" + "fmt" + + "github.com/spf13/cobra" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "k8s.io/apimachinery/pkg/runtime" + coreclient "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/kubectl/pkg/scheme" + "k8s.io/kubectl/pkg/util" + + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/cli-runtime/pkg/genericiooptions" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + "k8s.io/kubectl/pkg/util/i18n" + "k8s.io/kubectl/pkg/util/templates" +) + +var ( + namespaceLong = templates.LongDesc(i18n.T(` + Create a namespace with the specified name.`)) + + namespaceExample = templates.Examples(i18n.T(` + # Create a new namespace named my-namespace + kubectl create namespace my-namespace`)) +) + +// NamespaceOptions is the options for 'create namespace' sub command +type NamespaceOptions struct { + // PrintFlags holds options necessary for obtaining a printer + PrintFlags *genericclioptions.PrintFlags + // Name of resource being created + Name string + + DryRunStrategy cmdutil.DryRunStrategy + ValidationDirective string + CreateAnnotation bool + FieldManager string + + Client *coreclient.CoreV1Client + + PrintObj func(obj runtime.Object) error + + genericiooptions.IOStreams +} + +// NewNamespaceOptions creates a new *NamespaceOptions with sane defaults +func NewNamespaceOptions(ioStreams genericiooptions.IOStreams) *NamespaceOptions { + return &NamespaceOptions{ + PrintFlags: genericclioptions.NewPrintFlags("created").WithTypeSetter(scheme.Scheme), + IOStreams: ioStreams, + } +} + +// NewCmdCreateNamespace is a macro command to create a new namespace +func NewCmdCreateNamespace(f cmdutil.Factory, ioStreams genericiooptions.IOStreams) *cobra.Command { + + o := NewNamespaceOptions(ioStreams) + + cmd := &cobra.Command{ + Use: "namespace NAME [--dry-run=server|client|none]", + DisableFlagsInUseLine: true, + Aliases: []string{"ns"}, + Short: i18n.T("Create a namespace with the specified name"), + Long: namespaceLong, + Example: namespaceExample, + Run: func(cmd *cobra.Command, args []string) { + cmdutil.CheckErr(o.Complete(f, cmd, args)) + cmdutil.CheckErr(o.Validate()) + cmdutil.CheckErr(o.Run()) + }, + } + + o.PrintFlags.AddFlags(cmd) + + cmdutil.AddApplyAnnotationFlags(cmd) + cmdutil.AddValidateFlags(cmd) + cmdutil.AddDryRunFlag(cmd) + cmdutil.AddFieldManagerFlagVar(cmd, &o.FieldManager, "kubectl-create") + + return cmd +} + +// Complete completes all the required options +func (o *NamespaceOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { + name, err := NameFromCommandArgs(cmd, args) + if err != nil { + return err + } + + restConfig, err := f.ToRESTConfig() + if err != nil { + return err + } + o.Client, err = coreclient.NewForConfig(restConfig) + if err != nil { + return err + } + + o.Name = name + o.DryRunStrategy, err = cmdutil.GetDryRunStrategy(cmd) + if err != nil { + return err + } + o.CreateAnnotation = cmdutil.GetFlagBool(cmd, cmdutil.ApplyAnnotationsFlag) + cmdutil.PrintFlagsWithDryRunStrategy(o.PrintFlags, o.DryRunStrategy) + printer, err := o.PrintFlags.ToPrinter() + if err != nil { + return err + } + o.PrintObj = func(obj runtime.Object) error { + return printer.PrintObj(obj, o.Out) + } + + o.ValidationDirective, err = cmdutil.GetValidationDirective(cmd) + return err +} + +// Run calls the CreateSubcommandOptions.Run in NamespaceOpts instance +func (o *NamespaceOptions) Run() error { + namespace := o.createNamespace() + if err := util.CreateOrUpdateAnnotation(o.CreateAnnotation, namespace, scheme.DefaultJSONEncoder()); err != nil { + return err + } + + if o.DryRunStrategy != cmdutil.DryRunClient { + createOptions := metav1.CreateOptions{} + if o.FieldManager != "" { + createOptions.FieldManager = o.FieldManager + } + createOptions.FieldValidation = o.ValidationDirective + if o.DryRunStrategy == cmdutil.DryRunServer { + createOptions.DryRun = []string{metav1.DryRunAll} + } + var err error + namespace, err = o.Client.Namespaces().Create(context.TODO(), namespace, createOptions) + if err != nil { + return err + } + } + return o.PrintObj(namespace) +} + +// createNamespace outputs a namespace object using the configured fields +func (o *NamespaceOptions) createNamespace() *corev1.Namespace { + namespace := &corev1.Namespace{ + TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String(), Kind: "Namespace"}, + ObjectMeta: metav1.ObjectMeta{Name: o.Name}, + } + return namespace +} + +// Validate validates required fields are set to support structured generation +func (o *NamespaceOptions) Validate() error { + if len(o.Name) == 0 { + return fmt.Errorf("name must be specified") + } + return nil +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/create/create_pdb.go b/vendor/k8s.io/kubectl/pkg/cmd/create/create_pdb.go new file mode 100644 index 000000000000..0023e748106d --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/create/create_pdb.go @@ -0,0 +1,261 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package create + +import ( + "context" + "fmt" + "regexp" + + "github.com/spf13/cobra" + + policyv1 "k8s.io/api/policy/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/cli-runtime/pkg/genericiooptions" + policyv1client "k8s.io/client-go/kubernetes/typed/policy/v1" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + "k8s.io/kubectl/pkg/scheme" + "k8s.io/kubectl/pkg/util" + "k8s.io/kubectl/pkg/util/i18n" + "k8s.io/kubectl/pkg/util/templates" +) + +var ( + pdbLong = templates.LongDesc(i18n.T(` + Create a pod disruption budget with the specified name, selector, and desired minimum available pods.`)) + + pdbExample = templates.Examples(i18n.T(` + # Create a pod disruption budget named my-pdb that will select all pods with the app=rails label + # and require at least one of them being available at any point in time + kubectl create poddisruptionbudget my-pdb --selector=app=rails --min-available=1 + + # Create a pod disruption budget named my-pdb that will select all pods with the app=nginx label + # and require at least half of the pods selected to be available at any point in time + kubectl create pdb my-pdb --selector=app=nginx --min-available=50%`)) +) + +// PodDisruptionBudgetOpts holds the command-line options for poddisruptionbudget sub command +type PodDisruptionBudgetOpts struct { + // PrintFlags holds options necessary for obtaining a printer + PrintFlags *genericclioptions.PrintFlags + PrintObj func(obj runtime.Object) error + // Name of resource being created + Name string + + MinAvailable string + MaxUnavailable string + + // A label selector to use for this budget + Selector string + CreateAnnotation bool + FieldManager string + Namespace string + EnforceNamespace bool + + Client *policyv1client.PolicyV1Client + DryRunStrategy cmdutil.DryRunStrategy + ValidationDirective string + + genericiooptions.IOStreams +} + +// NewPodDisruptionBudgetOpts creates a new *PodDisruptionBudgetOpts with sane defaults +func NewPodDisruptionBudgetOpts(ioStreams genericiooptions.IOStreams) *PodDisruptionBudgetOpts { + return &PodDisruptionBudgetOpts{ + PrintFlags: genericclioptions.NewPrintFlags("created").WithTypeSetter(scheme.Scheme), + IOStreams: ioStreams, + } +} + +// NewCmdCreatePodDisruptionBudget is a macro command to create a new pod disruption budget. +func NewCmdCreatePodDisruptionBudget(f cmdutil.Factory, ioStreams genericiooptions.IOStreams) *cobra.Command { + o := NewPodDisruptionBudgetOpts(ioStreams) + + cmd := &cobra.Command{ + Use: "poddisruptionbudget NAME --selector=SELECTOR --min-available=N [--dry-run=server|client|none]", + DisableFlagsInUseLine: true, + Aliases: []string{"pdb"}, + Short: i18n.T("Create a pod disruption budget with the specified name"), + Long: pdbLong, + Example: pdbExample, + Run: func(cmd *cobra.Command, args []string) { + cmdutil.CheckErr(o.Complete(f, cmd, args)) + cmdutil.CheckErr(o.Validate()) + cmdutil.CheckErr(o.Run()) + }, + } + + o.PrintFlags.AddFlags(cmd) + + cmdutil.AddApplyAnnotationFlags(cmd) + cmdutil.AddValidateFlags(cmd) + cmdutil.AddDryRunFlag(cmd) + + cmd.Flags().StringVar(&o.MinAvailable, "min-available", o.MinAvailable, i18n.T("The minimum number or percentage of available pods this budget requires.")) + cmd.Flags().StringVar(&o.MaxUnavailable, "max-unavailable", o.MaxUnavailable, i18n.T("The maximum number or percentage of unavailable pods this budget requires.")) + cmd.Flags().StringVar(&o.Selector, "selector", o.Selector, i18n.T("A label selector to use for this budget. Only equality-based selector requirements are supported.")) + cmdutil.AddFieldManagerFlagVar(cmd, &o.FieldManager, "kubectl-create") + return cmd +} + +// Complete completes all the required options +func (o *PodDisruptionBudgetOpts) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { + var err error + o.Name, err = NameFromCommandArgs(cmd, args) + if err != nil { + return err + } + + restConfig, err := f.ToRESTConfig() + if err != nil { + return err + } + o.Client, err = policyv1client.NewForConfig(restConfig) + if err != nil { + return err + } + + o.CreateAnnotation = cmdutil.GetFlagBool(cmd, cmdutil.ApplyAnnotationsFlag) + + o.DryRunStrategy, err = cmdutil.GetDryRunStrategy(cmd) + if err != nil { + return err + } + + o.Namespace, o.EnforceNamespace, err = f.ToRawKubeConfigLoader().Namespace() + if err != nil { + return err + } + + cmdutil.PrintFlagsWithDryRunStrategy(o.PrintFlags, o.DryRunStrategy) + + printer, err := o.PrintFlags.ToPrinter() + if err != nil { + return err + } + + o.PrintObj = func(obj runtime.Object) error { + return printer.PrintObj(obj, o.Out) + } + + o.ValidationDirective, err = cmdutil.GetValidationDirective(cmd) + if err != nil { + return err + } + + return nil +} + +// Validate checks to the PodDisruptionBudgetOpts to see if there is sufficient information run the command +func (o *PodDisruptionBudgetOpts) Validate() error { + if len(o.Name) == 0 { + return fmt.Errorf("name must be specified") + } + + if len(o.Selector) == 0 { + return fmt.Errorf("a selector must be specified") + } + + if len(o.MaxUnavailable) == 0 && len(o.MinAvailable) == 0 { + return fmt.Errorf("one of min-available or max-unavailable must be specified") + } + + if len(o.MaxUnavailable) > 0 && len(o.MinAvailable) > 0 { + return fmt.Errorf("min-available and max-unavailable cannot be both specified") + } + + // The following regex matches the following values: + // 10, 20, 30%, 50% (number and percentage) + // but not 10Gb, 20Mb + re := regexp.MustCompile(`^[0-9]+%?$`) + + switch { + case len(o.MinAvailable) > 0 && !re.MatchString(o.MinAvailable): + return fmt.Errorf("invalid format specified for min-available") + case len(o.MaxUnavailable) > 0 && !re.MatchString(o.MaxUnavailable): + return fmt.Errorf("invalid format specified for max-unavailable") + } + + return nil +} + +// Run calls the CreateSubcommandOptions.Run in PodDisruptionBudgetOpts instance +func (o *PodDisruptionBudgetOpts) Run() error { + podDisruptionBudget, err := o.createPodDisruptionBudgets() + if err != nil { + return err + } + + if err := util.CreateOrUpdateAnnotation(o.CreateAnnotation, podDisruptionBudget, scheme.DefaultJSONEncoder()); err != nil { + return err + } + + if o.DryRunStrategy != cmdutil.DryRunClient { + createOptions := metav1.CreateOptions{} + if o.FieldManager != "" { + createOptions.FieldManager = o.FieldManager + } + createOptions.FieldValidation = o.ValidationDirective + if o.DryRunStrategy == cmdutil.DryRunServer { + createOptions.DryRun = []string{metav1.DryRunAll} + } + podDisruptionBudget, err = o.Client.PodDisruptionBudgets(o.Namespace).Create(context.TODO(), podDisruptionBudget, createOptions) + if err != nil { + return fmt.Errorf("failed to create poddisruptionbudgets: %v", err) + } + } + return o.PrintObj(podDisruptionBudget) +} + +func (o *PodDisruptionBudgetOpts) createPodDisruptionBudgets() (*policyv1.PodDisruptionBudget, error) { + namespace := "" + if o.EnforceNamespace { + namespace = o.Namespace + } + + podDisruptionBudget := &policyv1.PodDisruptionBudget{ + TypeMeta: metav1.TypeMeta{ + APIVersion: policyv1.SchemeGroupVersion.String(), + Kind: "PodDisruptionBudget", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: o.Name, + Namespace: namespace, + }, + } + + selector, err := metav1.ParseToLabelSelector(o.Selector) + if err != nil { + return nil, err + } + + podDisruptionBudget.Spec.Selector = selector + + switch { + case len(o.MinAvailable) > 0: + minAvailable := intstr.Parse(o.MinAvailable) + podDisruptionBudget.Spec.MinAvailable = &minAvailable + case len(o.MaxUnavailable) > 0: + maxUnavailable := intstr.Parse(o.MaxUnavailable) + podDisruptionBudget.Spec.MaxUnavailable = &maxUnavailable + } + + return podDisruptionBudget, nil +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/create/create_priorityclass.go b/vendor/k8s.io/kubectl/pkg/cmd/create/create_priorityclass.go new file mode 100644 index 000000000000..fe1cd51fccb3 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/create/create_priorityclass.go @@ -0,0 +1,198 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package create + +import ( + "context" + "fmt" + + "github.com/spf13/cobra" + + corev1 "k8s.io/api/core/v1" + schedulingv1 "k8s.io/api/scheduling/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/cli-runtime/pkg/genericiooptions" + schedulingv1client "k8s.io/client-go/kubernetes/typed/scheduling/v1" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + "k8s.io/kubectl/pkg/scheme" + "k8s.io/kubectl/pkg/util" + "k8s.io/kubectl/pkg/util/i18n" + "k8s.io/kubectl/pkg/util/templates" +) + +var ( + pcLong = templates.LongDesc(i18n.T(` + Create a priority class with the specified name, value, globalDefault and description.`)) + + pcExample = templates.Examples(i18n.T(` + # Create a priority class named high-priority + kubectl create priorityclass high-priority --value=1000 --description="high priority" + + # Create a priority class named default-priority that is considered as the global default priority + kubectl create priorityclass default-priority --value=1000 --global-default=true --description="default priority" + + # Create a priority class named high-priority that cannot preempt pods with lower priority + kubectl create priorityclass high-priority --value=1000 --description="high priority" --preemption-policy="Never"`)) +) + +// PriorityClassOptions holds the options for 'create priorityclass' sub command +type PriorityClassOptions struct { + PrintFlags *genericclioptions.PrintFlags + PrintObj func(obj runtime.Object) error + + Name string + Value int32 + GlobalDefault bool + Description string + PreemptionPolicy string + FieldManager string + CreateAnnotation bool + + Client *schedulingv1client.SchedulingV1Client + DryRunStrategy cmdutil.DryRunStrategy + ValidationDirective string + + genericiooptions.IOStreams +} + +// NewPriorityClassOptions returns an initialized PriorityClassOptions instance +func NewPriorityClassOptions(ioStreams genericiooptions.IOStreams) *PriorityClassOptions { + return &PriorityClassOptions{ + Value: 0, + PreemptionPolicy: "PreemptLowerPriority", + PrintFlags: genericclioptions.NewPrintFlags("created").WithTypeSetter(scheme.Scheme), + IOStreams: ioStreams, + } +} + +// NewCmdCreatePriorityClass is a macro command to create a new priorityClass. +func NewCmdCreatePriorityClass(f cmdutil.Factory, ioStreams genericiooptions.IOStreams) *cobra.Command { + o := NewPriorityClassOptions(ioStreams) + + cmd := &cobra.Command{ + Use: "priorityclass NAME --value=VALUE --global-default=BOOL [--dry-run=server|client|none]", + DisableFlagsInUseLine: true, + Aliases: []string{"pc"}, + Short: i18n.T("Create a priority class with the specified name"), + Long: pcLong, + Example: pcExample, + Run: func(cmd *cobra.Command, args []string) { + cmdutil.CheckErr(o.Complete(f, cmd, args)) + cmdutil.CheckErr(o.Run()) + }, + } + + o.PrintFlags.AddFlags(cmd) + + cmdutil.AddApplyAnnotationFlags(cmd) + cmdutil.AddValidateFlags(cmd) + cmdutil.AddDryRunFlag(cmd) + cmd.Flags().Int32Var(&o.Value, "value", o.Value, i18n.T("the value of this priority class.")) + cmd.Flags().BoolVar(&o.GlobalDefault, "global-default", o.GlobalDefault, i18n.T("global-default specifies whether this PriorityClass should be considered as the default priority.")) + cmd.Flags().StringVar(&o.Description, "description", o.Description, i18n.T("description is an arbitrary string that usually provides guidelines on when this priority class should be used.")) + cmd.Flags().StringVar(&o.PreemptionPolicy, "preemption-policy", o.PreemptionPolicy, i18n.T("preemption-policy is the policy for preempting pods with lower priority.")) + cmdutil.AddFieldManagerFlagVar(cmd, &o.FieldManager, "kubectl-create") + return cmd +} + +// Complete completes all the required options +func (o *PriorityClassOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { + var err error + o.Name, err = NameFromCommandArgs(cmd, args) + if err != nil { + return err + } + + restConfig, err := f.ToRESTConfig() + if err != nil { + return err + } + o.Client, err = schedulingv1client.NewForConfig(restConfig) + if err != nil { + return err + } + + o.CreateAnnotation = cmdutil.GetFlagBool(cmd, cmdutil.ApplyAnnotationsFlag) + + o.DryRunStrategy, err = cmdutil.GetDryRunStrategy(cmd) + if err != nil { + return err + } + cmdutil.PrintFlagsWithDryRunStrategy(o.PrintFlags, o.DryRunStrategy) + + printer, err := o.PrintFlags.ToPrinter() + if err != nil { + return err + } + o.PrintObj = func(obj runtime.Object) error { + return printer.PrintObj(obj, o.Out) + } + + o.ValidationDirective, err = cmdutil.GetValidationDirective(cmd) + if err != nil { + return err + } + + return nil +} + +// Run calls the CreateSubcommandOptions.Run in the PriorityClassOptions instance +func (o *PriorityClassOptions) Run() error { + priorityClass, err := o.createPriorityClass() + if err != nil { + return err + } + + if err := util.CreateOrUpdateAnnotation(o.CreateAnnotation, priorityClass, scheme.DefaultJSONEncoder()); err != nil { + return err + } + + if o.DryRunStrategy != cmdutil.DryRunClient { + createOptions := metav1.CreateOptions{} + if o.FieldManager != "" { + createOptions.FieldManager = o.FieldManager + } + createOptions.FieldValidation = o.ValidationDirective + if o.DryRunStrategy == cmdutil.DryRunServer { + createOptions.DryRun = []string{metav1.DryRunAll} + } + var err error + priorityClass, err = o.Client.PriorityClasses().Create(context.TODO(), priorityClass, createOptions) + if err != nil { + return fmt.Errorf("failed to create priorityclass: %v", err) + } + } + + return o.PrintObj(priorityClass) +} + +func (o *PriorityClassOptions) createPriorityClass() (*schedulingv1.PriorityClass, error) { + preemptionPolicy := corev1.PreemptionPolicy(o.PreemptionPolicy) + return &schedulingv1.PriorityClass{ + // this is ok because we know exactly how we want to be serialized + TypeMeta: metav1.TypeMeta{APIVersion: schedulingv1.SchemeGroupVersion.String(), Kind: "PriorityClass"}, + ObjectMeta: metav1.ObjectMeta{ + Name: o.Name, + }, + Value: o.Value, + GlobalDefault: o.GlobalDefault, + Description: o.Description, + PreemptionPolicy: &preemptionPolicy, + }, nil +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/create/create_quota.go b/vendor/k8s.io/kubectl/pkg/cmd/create/create_quota.go new file mode 100644 index 000000000000..d3966274709b --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/create/create_quota.go @@ -0,0 +1,268 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package create + +import ( + "context" + "fmt" + "strings" + + "github.com/spf13/cobra" + + corev1 "k8s.io/api/core/v1" + resourceapi "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/cli-runtime/pkg/genericiooptions" + coreclient "k8s.io/client-go/kubernetes/typed/core/v1" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + "k8s.io/kubectl/pkg/scheme" + "k8s.io/kubectl/pkg/util" + "k8s.io/kubectl/pkg/util/i18n" + "k8s.io/kubectl/pkg/util/templates" +) + +var ( + quotaLong = templates.LongDesc(i18n.T(` + Create a resource quota with the specified name, hard limits, and optional scopes.`)) + + quotaExample = templates.Examples(i18n.T(` + # Create a new resource quota named my-quota + kubectl create quota my-quota --hard=cpu=1,memory=1G,pods=2,services=3,replicationcontrollers=2,resourcequotas=1,secrets=5,persistentvolumeclaims=10 + + # Create a new resource quota named best-effort + kubectl create quota best-effort --hard=pods=100 --scopes=BestEffort`)) +) + +// QuotaOpts holds the command-line options for 'create quota' sub command +type QuotaOpts struct { + // PrintFlags holds options necessary for obtaining a printer + PrintFlags *genericclioptions.PrintFlags + PrintObj func(obj runtime.Object) error + // The name of a quota object. + Name string + // The hard resource limit string before parsing. + Hard string + // The scopes of a quota object before parsing. + Scopes string + CreateAnnotation bool + FieldManager string + Namespace string + EnforceNamespace bool + + Client *coreclient.CoreV1Client + DryRunStrategy cmdutil.DryRunStrategy + ValidationDirective string + + genericiooptions.IOStreams +} + +// NewQuotaOpts creates a new *QuotaOpts with sane defaults +func NewQuotaOpts(ioStreams genericiooptions.IOStreams) *QuotaOpts { + return &QuotaOpts{ + PrintFlags: genericclioptions.NewPrintFlags("created").WithTypeSetter(scheme.Scheme), + IOStreams: ioStreams, + } +} + +// NewCmdCreateQuota is a macro command to create a new quota +func NewCmdCreateQuota(f cmdutil.Factory, ioStreams genericiooptions.IOStreams) *cobra.Command { + o := NewQuotaOpts(ioStreams) + + cmd := &cobra.Command{ + Use: "quota NAME [--hard=key1=value1,key2=value2] [--scopes=Scope1,Scope2] [--dry-run=server|client|none]", + DisableFlagsInUseLine: true, + Aliases: []string{"resourcequota"}, + Short: i18n.T("Create a quota with the specified name"), + Long: quotaLong, + Example: quotaExample, + Run: func(cmd *cobra.Command, args []string) { + cmdutil.CheckErr(o.Complete(f, cmd, args)) + cmdutil.CheckErr(o.Validate()) + cmdutil.CheckErr(o.Run()) + }, + } + + o.PrintFlags.AddFlags(cmd) + + cmdutil.AddApplyAnnotationFlags(cmd) + cmdutil.AddValidateFlags(cmd) + cmdutil.AddDryRunFlag(cmd) + cmd.Flags().StringVar(&o.Hard, "hard", o.Hard, i18n.T("A comma-delimited set of resource=quantity pairs that define a hard limit.")) + cmd.Flags().StringVar(&o.Scopes, "scopes", o.Scopes, i18n.T("A comma-delimited set of quota scopes that must all match each object tracked by the quota.")) + cmdutil.AddFieldManagerFlagVar(cmd, &o.FieldManager, "kubectl-create") + return cmd +} + +// Complete completes all the required options +func (o *QuotaOpts) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { + var err error + o.Name, err = NameFromCommandArgs(cmd, args) + if err != nil { + return err + } + + restConfig, err := f.ToRESTConfig() + if err != nil { + return err + } + o.Client, err = coreclient.NewForConfig(restConfig) + if err != nil { + return err + } + + o.CreateAnnotation = cmdutil.GetFlagBool(cmd, cmdutil.ApplyAnnotationsFlag) + + o.DryRunStrategy, err = cmdutil.GetDryRunStrategy(cmd) + if err != nil { + return err + } + + o.Namespace, o.EnforceNamespace, err = f.ToRawKubeConfigLoader().Namespace() + if err != nil { + return err + } + + cmdutil.PrintFlagsWithDryRunStrategy(o.PrintFlags, o.DryRunStrategy) + + printer, err := o.PrintFlags.ToPrinter() + if err != nil { + return err + } + + o.PrintObj = func(obj runtime.Object) error { + return printer.PrintObj(obj, o.Out) + } + + o.ValidationDirective, err = cmdutil.GetValidationDirective(cmd) + if err != nil { + return err + } + + return nil +} + +// Validate checks to the QuotaOpts to see if there is sufficient information run the command. +func (o *QuotaOpts) Validate() error { + if len(o.Name) == 0 { + return fmt.Errorf("name must be specified") + } + return nil +} + +// Run does the work +func (o *QuotaOpts) Run() error { + resourceQuota, err := o.createQuota() + if err != nil { + return err + } + + if err := util.CreateOrUpdateAnnotation(o.CreateAnnotation, resourceQuota, scheme.DefaultJSONEncoder()); err != nil { + return err + } + + if o.DryRunStrategy != cmdutil.DryRunClient { + createOptions := metav1.CreateOptions{} + if o.FieldManager != "" { + createOptions.FieldManager = o.FieldManager + } + createOptions.FieldValidation = o.ValidationDirective + if o.DryRunStrategy == cmdutil.DryRunServer { + createOptions.DryRun = []string{metav1.DryRunAll} + } + resourceQuota, err = o.Client.ResourceQuotas(o.Namespace).Create(context.TODO(), resourceQuota, createOptions) + if err != nil { + return fmt.Errorf("failed to create quota: %v", err) + } + } + return o.PrintObj(resourceQuota) +} + +func (o *QuotaOpts) createQuota() (*corev1.ResourceQuota, error) { + namespace := "" + if o.EnforceNamespace { + namespace = o.Namespace + } + resourceQuota := &corev1.ResourceQuota{ + TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String(), Kind: "ResourceQuota"}, + ObjectMeta: metav1.ObjectMeta{ + Name: o.Name, + Namespace: namespace, + }, + } + + resourceList, err := populateResourceListV1(o.Hard) + if err != nil { + return nil, err + } + + scopes, err := parseScopes(o.Scopes) + if err != nil { + return nil, err + } + + resourceQuota.Spec.Hard = resourceList + resourceQuota.Spec.Scopes = scopes + + return resourceQuota, nil +} + +// populateResourceListV1 takes strings of form =,= +// and returns ResourceList. +func populateResourceListV1(spec string) (corev1.ResourceList, error) { + // empty input gets a nil response to preserve generator test expected behaviors + if spec == "" { + return nil, nil + } + + result := corev1.ResourceList{} + resourceStatements := strings.Split(spec, ",") + for _, resourceStatement := range resourceStatements { + parts := strings.Split(resourceStatement, "=") + if len(parts) != 2 { + return nil, fmt.Errorf("Invalid argument syntax %v, expected =", resourceStatement) + } + resourceName := corev1.ResourceName(parts[0]) + resourceQuantity, err := resourceapi.ParseQuantity(parts[1]) + if err != nil { + return nil, err + } + result[resourceName] = resourceQuantity + } + return result, nil +} + +func parseScopes(spec string) ([]corev1.ResourceQuotaScope, error) { + // empty input gets a nil response to preserve test expected behaviors + if spec == "" { + return nil, nil + } + + scopes := strings.Split(spec, ",") + result := make([]corev1.ResourceQuotaScope, 0, len(scopes)) + for _, scope := range scopes { + // intentionally do not verify the scope against the valid scope list. This is done by the apiserver anyway. + + if scope == "" { + return nil, fmt.Errorf("invalid resource quota scope \"\"") + } + + result = append(result, corev1.ResourceQuotaScope(scope)) + } + return result, nil +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/create/create_role.go b/vendor/k8s.io/kubectl/pkg/cmd/create/create_role.go new file mode 100644 index 000000000000..822ed11e7043 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/create/create_role.go @@ -0,0 +1,445 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package create + +import ( + "context" + "fmt" + "strings" + + "github.com/spf13/cobra" + + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/cli-runtime/pkg/genericiooptions" + clientgorbacv1 "k8s.io/client-go/kubernetes/typed/rbac/v1" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + "k8s.io/kubectl/pkg/scheme" + "k8s.io/kubectl/pkg/util" + "k8s.io/kubectl/pkg/util/i18n" + "k8s.io/kubectl/pkg/util/templates" +) + +var ( + roleLong = templates.LongDesc(i18n.T(` + Create a role with single rule.`)) + + roleExample = templates.Examples(i18n.T(` + # Create a role named "pod-reader" that allows user to perform "get", "watch" and "list" on pods + kubectl create role pod-reader --verb=get --verb=list --verb=watch --resource=pods + + # Create a role named "pod-reader" with ResourceName specified + kubectl create role pod-reader --verb=get --resource=pods --resource-name=readablepod --resource-name=anotherpod + + # Create a role named "foo" with API Group specified + kubectl create role foo --verb=get,list,watch --resource=rs.apps + + # Create a role named "foo" with SubResource specified + kubectl create role foo --verb=get,list,watch --resource=pods,pods/status`)) + + // Valid resource verb list for validation. + validResourceVerbs = []string{"*", "get", "delete", "list", "create", "update", "patch", "watch", "proxy", "deletecollection", "use", "bind", "escalate", "impersonate"} + + // Specialized verbs and GroupResources + specialVerbs = map[string][]schema.GroupResource{ + "use": { + { + Group: "policy", + Resource: "podsecuritypolicies", + }, + { + Group: "extensions", + Resource: "podsecuritypolicies", + }, + }, + "bind": { + { + Group: "rbac.authorization.k8s.io", + Resource: "roles", + }, + { + Group: "rbac.authorization.k8s.io", + Resource: "clusterroles", + }, + }, + "escalate": { + { + Group: "rbac.authorization.k8s.io", + Resource: "roles", + }, + { + Group: "rbac.authorization.k8s.io", + Resource: "clusterroles", + }, + }, + "impersonate": { + { + Group: "", + Resource: "users", + }, + { + Group: "", + Resource: "serviceaccounts", + }, + { + Group: "", + Resource: "groups", + }, + { + Group: "authentication.k8s.io", + Resource: "userextras", + }, + }, + } +) + +// AddSpecialVerb allows the addition of items to the `specialVerbs` map for non-k8s native resources. +func AddSpecialVerb(verb string, gr schema.GroupResource) { + resources, ok := specialVerbs[verb] + if !ok { + resources = make([]schema.GroupResource, 1) + } + resources = append(resources, gr) + specialVerbs[verb] = resources +} + +// ResourceOptions holds the related options for '--resource' option +type ResourceOptions struct { + Group string + Resource string + SubResource string +} + +// CreateRoleOptions holds the options for 'create role' sub command +type CreateRoleOptions struct { + PrintFlags *genericclioptions.PrintFlags + + Name string + Verbs []string + Resources []ResourceOptions + ResourceNames []string + + DryRunStrategy cmdutil.DryRunStrategy + ValidationDirective string + OutputFormat string + Namespace string + EnforceNamespace bool + Client clientgorbacv1.RbacV1Interface + Mapper meta.RESTMapper + PrintObj func(obj runtime.Object) error + FieldManager string + CreateAnnotation bool + + genericiooptions.IOStreams +} + +// NewCreateRoleOptions returns an initialized CreateRoleOptions instance +func NewCreateRoleOptions(ioStreams genericiooptions.IOStreams) *CreateRoleOptions { + return &CreateRoleOptions{ + PrintFlags: genericclioptions.NewPrintFlags("created").WithTypeSetter(scheme.Scheme), + + IOStreams: ioStreams, + } +} + +// NewCmdCreateRole returnns an initialized Command instance for 'create role' sub command +func NewCmdCreateRole(f cmdutil.Factory, ioStreams genericiooptions.IOStreams) *cobra.Command { + o := NewCreateRoleOptions(ioStreams) + + cmd := &cobra.Command{ + Use: "role NAME --verb=verb --resource=resource.group/subresource [--resource-name=resourcename] [--dry-run=server|client|none]", + DisableFlagsInUseLine: true, + Short: i18n.T("Create a role with single rule"), + Long: roleLong, + Example: roleExample, + Run: func(cmd *cobra.Command, args []string) { + cmdutil.CheckErr(o.Complete(f, cmd, args)) + cmdutil.CheckErr(o.Validate()) + cmdutil.CheckErr(o.RunCreateRole()) + }, + } + + o.PrintFlags.AddFlags(cmd) + + cmdutil.AddApplyAnnotationFlags(cmd) + cmdutil.AddValidateFlags(cmd) + cmdutil.AddDryRunFlag(cmd) + cmd.Flags().StringSliceVar(&o.Verbs, "verb", o.Verbs, "Verb that applies to the resources contained in the rule") + cmd.Flags().StringSlice("resource", []string{}, "Resource that the rule applies to") + cmd.Flags().StringArrayVar(&o.ResourceNames, "resource-name", o.ResourceNames, "Resource in the white list that the rule applies to, repeat this flag for multiple items") + cmdutil.AddFieldManagerFlagVar(cmd, &o.FieldManager, "kubectl-create") + return cmd +} + +// Complete completes all the required options +func (o *CreateRoleOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { + name, err := NameFromCommandArgs(cmd, args) + if err != nil { + return err + } + o.Name = name + + // Remove duplicate verbs. + verbs := []string{} + for _, v := range o.Verbs { + // VerbAll respresents all kinds of verbs. + if v == "*" { + verbs = []string{"*"} + break + } + if !arrayContains(verbs, v) { + verbs = append(verbs, v) + } + } + o.Verbs = verbs + + // Support resource.group pattern. If no API Group specified, use "" as core API Group. + // e.g. --resource=pods,deployments.extensions + resources := cmdutil.GetFlagStringSlice(cmd, "resource") + for _, r := range resources { + sections := strings.SplitN(r, "/", 2) + + resource := &ResourceOptions{} + if len(sections) == 2 { + resource.SubResource = sections[1] + } + + parts := strings.SplitN(sections[0], ".", 2) + if len(parts) == 2 { + resource.Group = parts[1] + } + resource.Resource = parts[0] + + if resource.Resource == "*" && len(parts) == 1 && len(sections) == 1 { + o.Resources = []ResourceOptions{*resource} + break + } + + o.Resources = append(o.Resources, *resource) + } + + // Remove duplicate resource names. + resourceNames := []string{} + for _, n := range o.ResourceNames { + if !arrayContains(resourceNames, n) { + resourceNames = append(resourceNames, n) + } + } + o.ResourceNames = resourceNames + + // Complete other options for Run. + o.Mapper, err = f.ToRESTMapper() + if err != nil { + return err + } + + o.DryRunStrategy, err = cmdutil.GetDryRunStrategy(cmd) + if err != nil { + return err + } + o.OutputFormat = cmdutil.GetFlagString(cmd, "output") + o.CreateAnnotation = cmdutil.GetFlagBool(cmd, cmdutil.ApplyAnnotationsFlag) + + cmdutil.PrintFlagsWithDryRunStrategy(o.PrintFlags, o.DryRunStrategy) + printer, err := o.PrintFlags.ToPrinter() + if err != nil { + return err + } + o.PrintObj = func(obj runtime.Object) error { + return printer.PrintObj(obj, o.Out) + } + + o.ValidationDirective, err = cmdutil.GetValidationDirective(cmd) + if err != nil { + return err + } + + o.Namespace, o.EnforceNamespace, err = f.ToRawKubeConfigLoader().Namespace() + if err != nil { + return err + } + + clientset, err := f.KubernetesClientSet() + if err != nil { + return err + } + o.Client = clientset.RbacV1() + + return nil +} + +// Validate makes sure there is no discrepency in provided option values +func (o *CreateRoleOptions) Validate() error { + if o.Name == "" { + return fmt.Errorf("name must be specified") + } + + // validate verbs. + if len(o.Verbs) == 0 { + return fmt.Errorf("at least one verb must be specified") + } + + for _, v := range o.Verbs { + if !arrayContains(validResourceVerbs, v) { + fmt.Fprintf(o.ErrOut, "Warning: '%s' is not a standard resource verb\n", v) + } + } + + // validate resources. + if len(o.Resources) == 0 { + return fmt.Errorf("at least one resource must be specified") + } + + return o.validateResource() +} + +func (o *CreateRoleOptions) validateResource() error { + for _, r := range o.Resources { + if len(r.Resource) == 0 { + return fmt.Errorf("resource must be specified if apiGroup/subresource specified") + } + if r.Resource == "*" { + return nil + } + + resource := schema.GroupVersionResource{Resource: r.Resource, Group: r.Group} + groupVersionResource, err := o.Mapper.ResourceFor(schema.GroupVersionResource{Resource: r.Resource, Group: r.Group}) + if err == nil { + resource = groupVersionResource + } + + for _, v := range o.Verbs { + if groupResources, ok := specialVerbs[v]; ok { + match := false + for _, extra := range groupResources { + if resource.Resource == extra.Resource && resource.Group == extra.Group { + match = true + err = nil + break + } + } + if !match { + return fmt.Errorf("can not perform '%s' on '%s' in group '%s'", v, resource.Resource, resource.Group) + } + } + } + + if err != nil { + return err + } + } + return nil +} + +// RunCreateRole performs the execution of 'create role' sub command +func (o *CreateRoleOptions) RunCreateRole() error { + role := &rbacv1.Role{ + // this is ok because we know exactly how we want to be serialized + TypeMeta: metav1.TypeMeta{APIVersion: rbacv1.SchemeGroupVersion.String(), Kind: "Role"}, + } + role.Name = o.Name + rules, err := generateResourcePolicyRules(o.Mapper, o.Verbs, o.Resources, o.ResourceNames, []string{}) + if err != nil { + return err + } + role.Rules = rules + if o.EnforceNamespace { + role.Namespace = o.Namespace + } + + if err := util.CreateOrUpdateAnnotation(o.CreateAnnotation, role, scheme.DefaultJSONEncoder()); err != nil { + return err + } + + // Create role. + if o.DryRunStrategy != cmdutil.DryRunClient { + createOptions := metav1.CreateOptions{} + if o.FieldManager != "" { + createOptions.FieldManager = o.FieldManager + } + createOptions.FieldValidation = o.ValidationDirective + if o.DryRunStrategy == cmdutil.DryRunServer { + createOptions.DryRun = []string{metav1.DryRunAll} + } + role, err = o.Client.Roles(o.Namespace).Create(context.TODO(), role, createOptions) + if err != nil { + return err + } + } + + return o.PrintObj(role) +} + +func arrayContains(s []string, e string) bool { + for _, a := range s { + if a == e { + return true + } + } + return false +} + +func generateResourcePolicyRules(mapper meta.RESTMapper, verbs []string, resources []ResourceOptions, resourceNames []string, nonResourceURLs []string) ([]rbacv1.PolicyRule, error) { + // groupResourceMapping is a apigroup-resource map. The key of this map is api group, while the value + // is a string array of resources under this api group. + // E.g. groupResourceMapping = {"extensions": ["replicasets", "deployments"], "batch":["jobs"]} + groupResourceMapping := map[string][]string{} + + // This loop does the following work: + // 1. Constructs groupResourceMapping based on input resources. + // 2. Prevents pointing to non-existent resources. + // 3. Transfers resource short name to long name. E.g. rs.extensions is transferred to replicasets.extensions + for _, r := range resources { + resource := schema.GroupVersionResource{Resource: r.Resource, Group: r.Group} + groupVersionResource, err := mapper.ResourceFor(schema.GroupVersionResource{Resource: r.Resource, Group: r.Group}) + if err == nil { + resource = groupVersionResource + } + + if len(r.SubResource) > 0 { + resource.Resource = resource.Resource + "/" + r.SubResource + } + if !arrayContains(groupResourceMapping[resource.Group], resource.Resource) { + groupResourceMapping[resource.Group] = append(groupResourceMapping[resource.Group], resource.Resource) + } + } + + // Create separate rule for each of the api group. + rules := []rbacv1.PolicyRule{} + for _, g := range sets.StringKeySet(groupResourceMapping).List() { + rule := rbacv1.PolicyRule{} + rule.Verbs = verbs + rule.Resources = groupResourceMapping[g] + rule.APIGroups = []string{g} + rule.ResourceNames = resourceNames + rules = append(rules, rule) + } + + if len(nonResourceURLs) > 0 { + rule := rbacv1.PolicyRule{} + rule.Verbs = verbs + rule.NonResourceURLs = nonResourceURLs + rules = append(rules, rule) + } + + return rules, nil +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go b/vendor/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go new file mode 100644 index 000000000000..237a54422bfa --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go @@ -0,0 +1,251 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package create + +import ( + "context" + "fmt" + "strings" + + "github.com/spf13/cobra" + + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/cli-runtime/pkg/genericiooptions" + rbacclientv1 "k8s.io/client-go/kubernetes/typed/rbac/v1" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + "k8s.io/kubectl/pkg/scheme" + "k8s.io/kubectl/pkg/util" + "k8s.io/kubectl/pkg/util/i18n" + "k8s.io/kubectl/pkg/util/templates" +) + +var ( + roleBindingLong = templates.LongDesc(i18n.T(` + Create a role binding for a particular role or cluster role.`)) + + roleBindingExample = templates.Examples(i18n.T(` + # Create a role binding for user1, user2, and group1 using the admin cluster role + kubectl create rolebinding admin --clusterrole=admin --user=user1 --user=user2 --group=group1 + + # Create a role binding for serviceaccount monitoring:sa-dev using the admin role + kubectl create rolebinding admin-binding --role=admin --serviceaccount=monitoring:sa-dev`)) +) + +// RoleBindingOptions holds the options for 'create rolebinding' sub command +type RoleBindingOptions struct { + PrintFlags *genericclioptions.PrintFlags + PrintObj func(obj runtime.Object) error + + Name string + Namespace string + EnforceNamespace bool + ClusterRole string + Role string + Users []string + Groups []string + ServiceAccounts []string + FieldManager string + CreateAnnotation bool + + Client rbacclientv1.RbacV1Interface + DryRunStrategy cmdutil.DryRunStrategy + ValidationDirective string + + genericiooptions.IOStreams +} + +// NewRoleBindingOptions creates a new *RoleBindingOptions with sane defaults +func NewRoleBindingOptions(ioStreams genericiooptions.IOStreams) *RoleBindingOptions { + return &RoleBindingOptions{ + Users: []string{}, + Groups: []string{}, + ServiceAccounts: []string{}, + PrintFlags: genericclioptions.NewPrintFlags("created").WithTypeSetter(scheme.Scheme), + IOStreams: ioStreams, + } +} + +// NewCmdCreateRoleBinding returns an initialized Command instance for 'create rolebinding' sub command +func NewCmdCreateRoleBinding(f cmdutil.Factory, ioStreams genericiooptions.IOStreams) *cobra.Command { + o := NewRoleBindingOptions(ioStreams) + + cmd := &cobra.Command{ + Use: "rolebinding NAME --clusterrole=NAME|--role=NAME [--user=username] [--group=groupname] [--serviceaccount=namespace:serviceaccountname] [--dry-run=server|client|none]", + DisableFlagsInUseLine: true, + Short: i18n.T("Create a role binding for a particular role or cluster role"), + Long: roleBindingLong, + Example: roleBindingExample, + Run: func(cmd *cobra.Command, args []string) { + cmdutil.CheckErr(o.Complete(f, cmd, args)) + cmdutil.CheckErr(o.Validate()) + cmdutil.CheckErr(o.Run()) + }, + } + + o.PrintFlags.AddFlags(cmd) + + cmdutil.AddApplyAnnotationFlags(cmd) + cmdutil.AddValidateFlags(cmd) + cmdutil.AddDryRunFlag(cmd) + cmd.Flags().StringVar(&o.ClusterRole, "clusterrole", "", i18n.T("ClusterRole this RoleBinding should reference")) + cmd.Flags().StringVar(&o.Role, "role", "", i18n.T("Role this RoleBinding should reference")) + cmd.Flags().StringArrayVar(&o.Users, "user", o.Users, "Usernames to bind to the role. The flag can be repeated to add multiple users.") + cmd.Flags().StringArrayVar(&o.Groups, "group", o.Groups, "Groups to bind to the role. The flag can be repeated to add multiple groups.") + cmd.Flags().StringArrayVar(&o.ServiceAccounts, "serviceaccount", o.ServiceAccounts, "Service accounts to bind to the role, in the format :. The flag can be repeated to add multiple service accounts.") + cmdutil.AddFieldManagerFlagVar(cmd, &o.FieldManager, "kubectl-create") + return cmd +} + +// Complete completes all the required options +func (o *RoleBindingOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { + var err error + o.Name, err = NameFromCommandArgs(cmd, args) + if err != nil { + return err + } + clientConfig, err := f.ToRESTConfig() + if err != nil { + return err + } + o.Client, err = rbacclientv1.NewForConfig(clientConfig) + if err != nil { + return err + } + + o.Namespace, o.EnforceNamespace, err = f.ToRawKubeConfigLoader().Namespace() + if err != nil { + return err + } + + o.CreateAnnotation = cmdutil.GetFlagBool(cmd, cmdutil.ApplyAnnotationsFlag) + o.DryRunStrategy, err = cmdutil.GetDryRunStrategy(cmd) + if err != nil { + return err + } + cmdutil.PrintFlagsWithDryRunStrategy(o.PrintFlags, o.DryRunStrategy) + printer, err := o.PrintFlags.ToPrinter() + if err != nil { + return err + } + o.PrintObj = func(obj runtime.Object) error { + return printer.PrintObj(obj, o.Out) + } + + o.ValidationDirective, err = cmdutil.GetValidationDirective(cmd) + return err +} + +// Validate validates required fields are set +func (o *RoleBindingOptions) Validate() error { + if len(o.Name) == 0 { + return fmt.Errorf("name must be specified") + } + if (len(o.ClusterRole) == 0) == (len(o.Role) == 0) { + return fmt.Errorf("exactly one of clusterrole or role must be specified") + } + return nil +} + +// Run performs the execution of 'create rolebinding' sub command +func (o *RoleBindingOptions) Run() error { + roleBinding, err := o.createRoleBinding() + if err != nil { + return err + } + if err := util.CreateOrUpdateAnnotation(o.CreateAnnotation, roleBinding, scheme.DefaultJSONEncoder()); err != nil { + return err + } + + if o.DryRunStrategy != cmdutil.DryRunClient { + createOptions := metav1.CreateOptions{} + if o.FieldManager != "" { + createOptions.FieldManager = o.FieldManager + } + createOptions.FieldValidation = o.ValidationDirective + if o.DryRunStrategy == cmdutil.DryRunServer { + createOptions.DryRun = []string{metav1.DryRunAll} + } + roleBinding, err = o.Client.RoleBindings(o.Namespace).Create(context.TODO(), roleBinding, createOptions) + if err != nil { + return fmt.Errorf("failed to create rolebinding: %v", err) + } + } + return o.PrintObj(roleBinding) +} + +func (o *RoleBindingOptions) createRoleBinding() (*rbacv1.RoleBinding, error) { + namespace := "" + if o.EnforceNamespace { + namespace = o.Namespace + } + + roleBinding := &rbacv1.RoleBinding{ + TypeMeta: metav1.TypeMeta{APIVersion: rbacv1.SchemeGroupVersion.String(), Kind: "RoleBinding"}, + ObjectMeta: metav1.ObjectMeta{ + Name: o.Name, + Namespace: namespace, + }, + } + + switch { + case len(o.Role) > 0: + roleBinding.RoleRef = rbacv1.RoleRef{ + APIGroup: rbacv1.GroupName, + Kind: "Role", + Name: o.Role, + } + case len(o.ClusterRole) > 0: + roleBinding.RoleRef = rbacv1.RoleRef{ + APIGroup: rbacv1.GroupName, + Kind: "ClusterRole", + Name: o.ClusterRole, + } + } + + for _, user := range o.Users { + roleBinding.Subjects = append(roleBinding.Subjects, rbacv1.Subject{ + Kind: rbacv1.UserKind, + APIGroup: rbacv1.GroupName, + Name: user, + }) + } + + for _, group := range o.Groups { + roleBinding.Subjects = append(roleBinding.Subjects, rbacv1.Subject{ + Kind: rbacv1.GroupKind, + APIGroup: rbacv1.GroupName, + Name: group, + }) + } + + for _, sa := range o.ServiceAccounts { + tokens := strings.Split(sa, ":") + if len(tokens) != 2 || tokens[0] == "" || tokens[1] == "" { + return nil, fmt.Errorf("serviceaccount must be :") + } + roleBinding.Subjects = append(roleBinding.Subjects, rbacv1.Subject{ + Kind: rbacv1.ServiceAccountKind, + APIGroup: "", + Namespace: tokens[0], + Name: tokens[1], + }) + } + return roleBinding, nil +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/create/create_secret.go b/vendor/k8s.io/kubectl/pkg/cmd/create/create_secret.go new file mode 100644 index 000000000000..b9bb191f9f5c --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/create/create_secret.go @@ -0,0 +1,421 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package create + +import ( + "context" + "fmt" + "os" + "path" + "strings" + + "github.com/spf13/cobra" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/cli-runtime/pkg/genericiooptions" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + "k8s.io/kubectl/pkg/scheme" + "k8s.io/kubectl/pkg/util" + "k8s.io/kubectl/pkg/util/hash" + "k8s.io/kubectl/pkg/util/i18n" + "k8s.io/kubectl/pkg/util/templates" +) + +// NewCmdCreateSecret groups subcommands to create various types of secrets. +// This is the entry point of create_secret.go which will be called by create.go +func NewCmdCreateSecret(f cmdutil.Factory, ioStreams genericiooptions.IOStreams) *cobra.Command { + cmd := &cobra.Command{ + Use: "secret (docker-registry | generic | tls)", + DisableFlagsInUseLine: true, + Short: i18n.T("Create a secret using a specified subcommand"), + Long: secretLong, + Run: cmdutil.DefaultSubCommandRun(ioStreams.ErrOut), + } + cmd.AddCommand(NewCmdCreateSecretDockerRegistry(f, ioStreams)) + cmd.AddCommand(NewCmdCreateSecretTLS(f, ioStreams)) + cmd.AddCommand(NewCmdCreateSecretGeneric(f, ioStreams)) + + return cmd +} + +var ( + secretLong = templates.LongDesc(i18n.T(` + Create a secret with specified type. + + A docker-registry type secret is for accessing a container registry. + + A generic type secret indicate an Opaque secret type. + + A tls type secret holds TLS certificate and its associated key.`)) + + secretForGenericLong = templates.LongDesc(i18n.T(` + Create a secret based on a file, directory, or specified literal value. + + A single secret may package one or more key/value pairs. + + When creating a secret based on a file, the key will default to the basename of the file, and the value will + default to the file content. If the basename is an invalid key or you wish to chose your own, you may specify + an alternate key. + + When creating a secret based on a directory, each file whose basename is a valid key in the directory will be + packaged into the secret. Any directory entries except regular files are ignored (e.g. subdirectories, + symlinks, devices, pipes, etc).`)) + + secretForGenericExample = templates.Examples(i18n.T(` + # Create a new secret named my-secret with keys for each file in folder bar + kubectl create secret generic my-secret --from-file=path/to/bar + + # Create a new secret named my-secret with specified keys instead of names on disk + kubectl create secret generic my-secret --from-file=ssh-privatekey=path/to/id_rsa --from-file=ssh-publickey=path/to/id_rsa.pub + + # Create a new secret named my-secret with key1=supersecret and key2=topsecret + kubectl create secret generic my-secret --from-literal=key1=supersecret --from-literal=key2=topsecret + + # Create a new secret named my-secret using a combination of a file and a literal + kubectl create secret generic my-secret --from-file=ssh-privatekey=path/to/id_rsa --from-literal=passphrase=topsecret + + # Create a new secret named my-secret from env files + kubectl create secret generic my-secret --from-env-file=path/to/foo.env --from-env-file=path/to/bar.env`)) +) + +// CreateSecretOptions holds the options for 'create secret' sub command +type CreateSecretOptions struct { + // PrintFlags holds options necessary for obtaining a printer + PrintFlags *genericclioptions.PrintFlags + PrintObj func(obj runtime.Object) error + + // Name of secret (required) + Name string + // Type of secret (optional) + Type string + // FileSources to derive the secret from (optional) + FileSources []string + // LiteralSources to derive the secret from (optional) + LiteralSources []string + // EnvFileSources to derive the secret from (optional) + EnvFileSources []string + // AppendHash; if true, derive a hash from the Secret data and type and append it to the name + AppendHash bool + + FieldManager string + CreateAnnotation bool + Namespace string + EnforceNamespace bool + + Client corev1client.CoreV1Interface + DryRunStrategy cmdutil.DryRunStrategy + ValidationDirective string + + genericiooptions.IOStreams +} + +// NewSecretOptions creates a new *CreateSecretOptions with default value +func NewSecretOptions(ioStreams genericiooptions.IOStreams) *CreateSecretOptions { + return &CreateSecretOptions{ + PrintFlags: genericclioptions.NewPrintFlags("created").WithTypeSetter(scheme.Scheme), + IOStreams: ioStreams, + } +} + +// NewCmdCreateSecretGeneric is a command to create generic secrets from files, directories, or literal values +func NewCmdCreateSecretGeneric(f cmdutil.Factory, ioStreams genericiooptions.IOStreams) *cobra.Command { + o := NewSecretOptions(ioStreams) + + cmd := &cobra.Command{ + Use: "generic NAME [--type=string] [--from-file=[key=]source] [--from-literal=key1=value1] [--dry-run=server|client|none]", + DisableFlagsInUseLine: true, + Short: i18n.T("Create a secret from a local file, directory, or literal value"), + Long: secretForGenericLong, + Example: secretForGenericExample, + Run: func(cmd *cobra.Command, args []string) { + cmdutil.CheckErr(o.Complete(f, cmd, args)) + cmdutil.CheckErr(o.Validate()) + cmdutil.CheckErr(o.Run()) + }, + } + o.PrintFlags.AddFlags(cmd) + + cmdutil.AddApplyAnnotationFlags(cmd) + cmdutil.AddValidateFlags(cmd) + cmdutil.AddDryRunFlag(cmd) + + cmd.Flags().StringSliceVar(&o.FileSources, "from-file", o.FileSources, "Key files can be specified using their file path, in which case a default name will be given to them, or optionally with a name and file path, in which case the given name will be used. Specifying a directory will iterate each named file in the directory that is a valid secret key.") + cmd.Flags().StringArrayVar(&o.LiteralSources, "from-literal", o.LiteralSources, "Specify a key and literal value to insert in secret (i.e. mykey=somevalue)") + cmd.Flags().StringSliceVar(&o.EnvFileSources, "from-env-file", o.EnvFileSources, "Specify the path to a file to read lines of key=val pairs to create a secret.") + cmd.Flags().StringVar(&o.Type, "type", o.Type, i18n.T("The type of secret to create")) + cmd.Flags().BoolVar(&o.AppendHash, "append-hash", o.AppendHash, "Append a hash of the secret to its name.") + + cmdutil.AddFieldManagerFlagVar(cmd, &o.FieldManager, "kubectl-create") + + return cmd +} + +// Complete loads data from the command line environment +func (o *CreateSecretOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { + var err error + o.Name, err = NameFromCommandArgs(cmd, args) + if err != nil { + return err + } + + restConfig, err := f.ToRESTConfig() + if err != nil { + return err + } + + o.Client, err = corev1client.NewForConfig(restConfig) + if err != nil { + return err + } + + o.CreateAnnotation = cmdutil.GetFlagBool(cmd, cmdutil.ApplyAnnotationsFlag) + + o.DryRunStrategy, err = cmdutil.GetDryRunStrategy(cmd) + if err != nil { + return err + } + + o.Namespace, o.EnforceNamespace, err = f.ToRawKubeConfigLoader().Namespace() + if err != nil { + return err + } + + cmdutil.PrintFlagsWithDryRunStrategy(o.PrintFlags, o.DryRunStrategy) + printer, err := o.PrintFlags.ToPrinter() + if err != nil { + return err + } + + o.PrintObj = func(obj runtime.Object) error { + return printer.PrintObj(obj, o.Out) + } + + o.ValidationDirective, err = cmdutil.GetValidationDirective(cmd) + if err != nil { + return err + } + + return nil +} + +// Validate checks if CreateSecretOptions has sufficient value to run +func (o *CreateSecretOptions) Validate() error { + if len(o.Name) == 0 { + return fmt.Errorf("name must be specified") + } + if len(o.EnvFileSources) > 0 && (len(o.FileSources) > 0 || len(o.LiteralSources) > 0) { + return fmt.Errorf("from-env-file cannot be combined with from-file or from-literal") + } + return nil +} + +// Run calls createSecret which will create secret based on CreateSecretOptions +// and makes an API call to the server +func (o *CreateSecretOptions) Run() error { + secret, err := o.createSecret() + if err != nil { + return err + } + err = util.CreateOrUpdateAnnotation(o.CreateAnnotation, secret, scheme.DefaultJSONEncoder()) + if err != nil { + return err + } + if o.DryRunStrategy != cmdutil.DryRunClient { + createOptions := metav1.CreateOptions{} + if o.FieldManager != "" { + createOptions.FieldManager = o.FieldManager + } + createOptions.FieldValidation = o.ValidationDirective + if o.DryRunStrategy == cmdutil.DryRunServer { + createOptions.DryRun = []string{metav1.DryRunAll} + } + secret, err = o.Client.Secrets(o.Namespace).Create(context.TODO(), secret, createOptions) + if err != nil { + return fmt.Errorf("failed to create secret %v", err) + } + } + + return o.PrintObj(secret) +} + +// createSecret fills in key value pair from the information given in +// CreateSecretOptions into *corev1.Secret +func (o *CreateSecretOptions) createSecret() (*corev1.Secret, error) { + namespace := "" + if o.EnforceNamespace { + namespace = o.Namespace + } + secret := newSecretObj(o.Name, namespace, corev1.SecretType(o.Type)) + if len(o.LiteralSources) > 0 { + if err := handleSecretFromLiteralSources(secret, o.LiteralSources); err != nil { + return nil, err + } + } + if len(o.FileSources) > 0 { + if err := handleSecretFromFileSources(secret, o.FileSources); err != nil { + return nil, err + } + } + if len(o.EnvFileSources) > 0 { + if err := handleSecretFromEnvFileSources(secret, o.EnvFileSources); err != nil { + return nil, err + } + } + if o.AppendHash { + hash, err := hash.SecretHash(secret) + if err != nil { + return nil, err + } + secret.Name = fmt.Sprintf("%s-%s", secret.Name, hash) + } + + return secret, nil +} + +// newSecretObj will create a new Secret Object given name, namespace and secretType +func newSecretObj(name, namespace string, secretType corev1.SecretType) *corev1.Secret { + return &corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + APIVersion: corev1.SchemeGroupVersion.String(), + Kind: "Secret", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Type: secretType, + Data: map[string][]byte{}, + } +} + +// handleSecretFromLiteralSources adds the specified literal source +// information into the provided secret +func handleSecretFromLiteralSources(secret *corev1.Secret, literalSources []string) error { + for _, literalSource := range literalSources { + keyName, value, err := util.ParseLiteralSource(literalSource) + if err != nil { + return err + } + if err = addKeyFromLiteralToSecret(secret, keyName, []byte(value)); err != nil { + return err + } + } + + return nil +} + +// handleSecretFromFileSources adds the specified file source information into the provided secret +func handleSecretFromFileSources(secret *corev1.Secret, fileSources []string) error { + for _, fileSource := range fileSources { + keyName, filePath, err := util.ParseFileSource(fileSource) + if err != nil { + return err + } + fileInfo, err := os.Stat(filePath) + if err != nil { + switch err := err.(type) { + case *os.PathError: + return fmt.Errorf("error reading %s: %v", filePath, err.Err) + default: + return fmt.Errorf("error reading %s: %v", filePath, err) + } + } + // if the filePath is a directory + if fileInfo.IsDir() { + if strings.Contains(fileSource, "=") { + return fmt.Errorf("cannot give a key name for a directory path") + } + fileList, err := os.ReadDir(filePath) + if err != nil { + return fmt.Errorf("error listing files in %s: %v", filePath, err) + } + for _, item := range fileList { + itemPath := path.Join(filePath, item.Name()) + if item.Type().IsRegular() { + keyName = item.Name() + if err := addKeyFromFileToSecret(secret, keyName, itemPath); err != nil { + return err + } + } + } + // if the filepath is a file + } else { + if err := addKeyFromFileToSecret(secret, keyName, filePath); err != nil { + return err + } + } + + } + + return nil +} + +// handleSecretFromEnvFileSources adds the specified env files source information +// into the provided secret +func handleSecretFromEnvFileSources(secret *corev1.Secret, envFileSources []string) error { + for _, envFileSource := range envFileSources { + info, err := os.Stat(envFileSource) + if err != nil { + switch err := err.(type) { + case *os.PathError: + return fmt.Errorf("error reading %s: %v", envFileSource, err.Err) + default: + return fmt.Errorf("error reading %s: %v", envFileSource, err) + } + } + if info.IsDir() { + return fmt.Errorf("env secret file cannot be a directory") + } + err = cmdutil.AddFromEnvFile(envFileSource, func(key, value string) error { + return addKeyFromLiteralToSecret(secret, key, []byte(value)) + }) + if err != nil { + return err + } + } + + return nil +} + +// addKeyFromFileToSecret adds a key with the given name to a Secret, populating +// the value with the content of the given file path, or returns an error. +func addKeyFromFileToSecret(secret *corev1.Secret, keyName, filePath string) error { + data, err := os.ReadFile(filePath) + if err != nil { + return err + } + return addKeyFromLiteralToSecret(secret, keyName, data) +} + +// addKeyFromLiteralToSecret adds the given key and data to the given secret, +// returning an error if the key is not valid or if the key already exists. +func addKeyFromLiteralToSecret(secret *corev1.Secret, keyName string, data []byte) error { + if errs := validation.IsConfigMapKey(keyName); len(errs) != 0 { + return fmt.Errorf("%q is not valid key name for a Secret %s", keyName, strings.Join(errs, ";")) + } + if _, entryExists := secret.Data[keyName]; entryExists { + return fmt.Errorf("cannot add key %s, another key by that name already exists", keyName) + } + secret.Data[keyName] = data + + return nil +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go b/vendor/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go new file mode 100644 index 000000000000..d8acc90d4a99 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go @@ -0,0 +1,298 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package create + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + + "github.com/spf13/cobra" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/cli-runtime/pkg/genericiooptions" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + "k8s.io/kubectl/pkg/scheme" + "k8s.io/kubectl/pkg/util" + "k8s.io/kubectl/pkg/util/hash" + "k8s.io/kubectl/pkg/util/i18n" + "k8s.io/kubectl/pkg/util/templates" +) + +var ( + secretForDockerRegistryLong = templates.LongDesc(i18n.T(` + Create a new secret for use with Docker registries. + + Dockercfg secrets are used to authenticate against Docker registries. + + When using the Docker command line to push images, you can authenticate to a given registry by running: + '$ docker login DOCKER_REGISTRY_SERVER --username=DOCKER_USER --password=DOCKER_PASSWORD --email=DOCKER_EMAIL'. + + That produces a ~/.dockercfg file that is used by subsequent 'docker push' and 'docker pull' commands to + authenticate to the registry. The email address is optional. + + When creating applications, you may have a Docker registry that requires authentication. In order for the + nodes to pull images on your behalf, they must have the credentials. You can provide this information + by creating a dockercfg secret and attaching it to your service account.`)) + + secretForDockerRegistryExample = templates.Examples(i18n.T(` + # If you do not already have a .dockercfg file, create a dockercfg secret directly + kubectl create secret docker-registry my-secret --docker-server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL + + # Create a new secret named my-secret from ~/.docker/config.json + kubectl create secret docker-registry my-secret --from-file=.dockerconfigjson=path/to/.docker/config.json`)) +) + +// DockerConfigJSON represents a local docker auth config file +// for pulling images. +type DockerConfigJSON struct { + Auths DockerConfig `json:"auths" datapolicy:"token"` + // +optional + HttpHeaders map[string]string `json:"HttpHeaders,omitempty" datapolicy:"token"` +} + +// DockerConfig represents the config file used by the docker CLI. +// This config that represents the credentials that should be used +// when pulling images from specific image repositories. +type DockerConfig map[string]DockerConfigEntry + +// DockerConfigEntry holds the user information that grant the access to docker registry +type DockerConfigEntry struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty" datapolicy:"password"` + Email string `json:"email,omitempty"` + Auth string `json:"auth,omitempty" datapolicy:"token"` +} + +// CreateSecretDockerRegistryOptions holds the options for 'create secret docker-registry' sub command +type CreateSecretDockerRegistryOptions struct { + // PrintFlags holds options necessary for obtaining a printer + PrintFlags *genericclioptions.PrintFlags + PrintObj func(obj runtime.Object) error + + // Name of secret (required) + Name string + // FileSources to derive the secret from (optional) + FileSources []string + // Username for registry (required) + Username string + // Email for registry (optional) + Email string + // Password for registry (required) + Password string `datapolicy:"password"` + // Server for registry (required) + Server string + // AppendHash; if true, derive a hash from the Secret and append it to the name + AppendHash bool + + FieldManager string + CreateAnnotation bool + Namespace string + EnforceNamespace bool + + Client corev1client.CoreV1Interface + DryRunStrategy cmdutil.DryRunStrategy + ValidationDirective string + + genericiooptions.IOStreams +} + +// NewSecretDockerRegistryOptions creates a new *CreateSecretDockerRegistryOptions with default value +func NewSecretDockerRegistryOptions(ioStreams genericiooptions.IOStreams) *CreateSecretDockerRegistryOptions { + return &CreateSecretDockerRegistryOptions{ + Server: "https://index.docker.io/v1/", + PrintFlags: genericclioptions.NewPrintFlags("created").WithTypeSetter(scheme.Scheme), + IOStreams: ioStreams, + } +} + +// NewCmdCreateSecretDockerRegistry is a macro command for creating secrets to work with Docker registries +func NewCmdCreateSecretDockerRegistry(f cmdutil.Factory, ioStreams genericiooptions.IOStreams) *cobra.Command { + o := NewSecretDockerRegistryOptions(ioStreams) + + cmd := &cobra.Command{ + Use: "docker-registry NAME --docker-username=user --docker-password=password --docker-email=email [--docker-server=string] [--from-file=[key=]source] [--dry-run=server|client|none]", + DisableFlagsInUseLine: true, + Short: i18n.T("Create a secret for use with a Docker registry"), + Long: secretForDockerRegistryLong, + Example: secretForDockerRegistryExample, + Run: func(cmd *cobra.Command, args []string) { + cmdutil.CheckErr(o.Complete(f, cmd, args)) + cmdutil.CheckErr(o.Validate()) + cmdutil.CheckErr(o.Run()) + }, + } + + o.PrintFlags.AddFlags(cmd) + + cmdutil.AddApplyAnnotationFlags(cmd) + cmdutil.AddValidateFlags(cmd) + cmdutil.AddDryRunFlag(cmd) + + cmd.Flags().StringVar(&o.Username, "docker-username", o.Username, i18n.T("Username for Docker registry authentication")) + cmd.Flags().StringVar(&o.Password, "docker-password", o.Password, i18n.T("Password for Docker registry authentication")) + cmd.Flags().StringVar(&o.Email, "docker-email", o.Email, i18n.T("Email for Docker registry")) + cmd.Flags().StringVar(&o.Server, "docker-server", o.Server, i18n.T("Server location for Docker registry")) + cmd.Flags().BoolVar(&o.AppendHash, "append-hash", o.AppendHash, "Append a hash of the secret to its name.") + cmd.Flags().StringSliceVar(&o.FileSources, "from-file", o.FileSources, "Key files can be specified using their file path, in which case a default name will be given to them, or optionally with a name and file path, in which case the given name will be used. Specifying a directory will iterate each named file in the directory that is a valid secret key.") + + cmdutil.AddFieldManagerFlagVar(cmd, &o.FieldManager, "kubectl-create") + + return cmd +} + +// Complete loads data from the command line environment +func (o *CreateSecretDockerRegistryOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { + var err error + o.Name, err = NameFromCommandArgs(cmd, args) + if err != nil { + return err + } + + restConfig, err := f.ToRESTConfig() + if err != nil { + return err + } + + o.Client, err = corev1client.NewForConfig(restConfig) + if err != nil { + return err + } + + o.CreateAnnotation = cmdutil.GetFlagBool(cmd, cmdutil.ApplyAnnotationsFlag) + + o.DryRunStrategy, err = cmdutil.GetDryRunStrategy(cmd) + if err != nil { + return err + } + + o.Namespace, o.EnforceNamespace, err = f.ToRawKubeConfigLoader().Namespace() + if err != nil { + return err + } + + cmdutil.PrintFlagsWithDryRunStrategy(o.PrintFlags, o.DryRunStrategy) + printer, err := o.PrintFlags.ToPrinter() + if err != nil { + return err + } + + o.PrintObj = func(obj runtime.Object) error { + return printer.PrintObj(obj, o.Out) + } + + o.ValidationDirective, err = cmdutil.GetValidationDirective(cmd) + if err != nil { + return err + } + + return nil +} + +// Validate checks if CreateSecretDockerRegistryOptions has sufficient value to run +func (o *CreateSecretDockerRegistryOptions) Validate() error { + if len(o.Name) == 0 { + return fmt.Errorf("name must be specified") + } + if len(o.FileSources) == 0 && (len(o.Username) == 0 || len(o.Password) == 0 || len(o.Server) == 0) { + return fmt.Errorf("either --from-file or the combination of --docker-username, --docker-password and --docker-server is required") + } + return nil +} + +// Run calls createSecretDockerRegistry which will create secretDockerRegistry based on CreateSecretDockerRegistryOptions +// and makes an API call to the server +func (o *CreateSecretDockerRegistryOptions) Run() error { + secretDockerRegistry, err := o.createSecretDockerRegistry() + if err != nil { + return err + } + err = util.CreateOrUpdateAnnotation(o.CreateAnnotation, secretDockerRegistry, scheme.DefaultJSONEncoder()) + if err != nil { + return err + } + if o.DryRunStrategy != cmdutil.DryRunClient { + createOptions := metav1.CreateOptions{} + if o.FieldManager != "" { + createOptions.FieldManager = o.FieldManager + } + createOptions.FieldValidation = o.ValidationDirective + if o.DryRunStrategy == cmdutil.DryRunServer { + createOptions.DryRun = []string{metav1.DryRunAll} + } + secretDockerRegistry, err = o.Client.Secrets(o.Namespace).Create(context.TODO(), secretDockerRegistry, createOptions) + if err != nil { + return fmt.Errorf("failed to create secret %v", err) + } + } + + return o.PrintObj(secretDockerRegistry) +} + +// createSecretDockerRegistry fills in key value pair from the information given in +// CreateSecretDockerRegistryOptions into *corev1.Secret +func (o *CreateSecretDockerRegistryOptions) createSecretDockerRegistry() (*corev1.Secret, error) { + namespace := "" + if o.EnforceNamespace { + namespace = o.Namespace + } + secretDockerRegistry := newSecretObj(o.Name, namespace, corev1.SecretTypeDockerConfigJson) + if len(o.FileSources) > 0 { + if err := handleSecretFromFileSources(secretDockerRegistry, o.FileSources); err != nil { + return nil, err + } + } else { + dockerConfigJSONContent, err := handleDockerCfgJSONContent(o.Username, o.Password, o.Email, o.Server) + if err != nil { + return nil, err + } + secretDockerRegistry.Data[corev1.DockerConfigJsonKey] = dockerConfigJSONContent + } + if o.AppendHash { + hash, err := hash.SecretHash(secretDockerRegistry) + if err != nil { + return nil, err + } + secretDockerRegistry.Name = fmt.Sprintf("%s-%s", secretDockerRegistry.Name, hash) + } + return secretDockerRegistry, nil +} + +// handleDockerCfgJSONContent serializes a ~/.docker/config.json file +func handleDockerCfgJSONContent(username, password, email, server string) ([]byte, error) { + dockerConfigAuth := DockerConfigEntry{ + Username: username, + Password: password, + Email: email, + Auth: encodeDockerConfigFieldAuth(username, password), + } + dockerConfigJSON := DockerConfigJSON{ + Auths: map[string]DockerConfigEntry{server: dockerConfigAuth}, + } + + return json.Marshal(dockerConfigJSON) +} + +// encodeDockerConfigFieldAuth returns base64 encoding of the username and password string +func encodeDockerConfigFieldAuth(username, password string) string { + fieldValue := username + ":" + password + return base64.StdEncoding.EncodeToString([]byte(fieldValue)) +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go b/vendor/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go new file mode 100644 index 000000000000..a23e4ca62e15 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go @@ -0,0 +1,249 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package create + +import ( + "context" + "crypto/tls" + "fmt" + "os" + + "github.com/spf13/cobra" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/cli-runtime/pkg/genericiooptions" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + "k8s.io/kubectl/pkg/scheme" + "k8s.io/kubectl/pkg/util" + "k8s.io/kubectl/pkg/util/hash" + "k8s.io/kubectl/pkg/util/i18n" + "k8s.io/kubectl/pkg/util/templates" +) + +var ( + secretForTLSLong = templates.LongDesc(i18n.T(` + Create a TLS secret from the given public/private key pair. + + The public/private key pair must exist beforehand. The public key certificate must be .PEM encoded and match + the given private key.`)) + + secretForTLSExample = templates.Examples(i18n.T(` + # Create a new TLS secret named tls-secret with the given key pair + kubectl create secret tls tls-secret --cert=path/to/tls.crt --key=path/to/tls.key`)) +) + +// CreateSecretTLSOptions holds the options for 'create secret tls' sub command +type CreateSecretTLSOptions struct { + // PrintFlags holds options necessary for obtaining a printer + PrintFlags *genericclioptions.PrintFlags + PrintObj func(obj runtime.Object) error + + // Name is the name of this TLS secret. + Name string + // Key is the path to the user's private key. + Key string + // Cert is the path to the user's public key certificate. + Cert string + // AppendHash; if true, derive a hash from the Secret and append it to the name + AppendHash bool + + FieldManager string + CreateAnnotation bool + Namespace string + EnforceNamespace bool + + Client corev1client.CoreV1Interface + DryRunStrategy cmdutil.DryRunStrategy + ValidationDirective string + + genericiooptions.IOStreams +} + +// NewSecretTLSOptions creates a new *CreateSecretTLSOptions with default value +func NewSecretTLSOptions(ioStrems genericiooptions.IOStreams) *CreateSecretTLSOptions { + return &CreateSecretTLSOptions{ + PrintFlags: genericclioptions.NewPrintFlags("created").WithTypeSetter(scheme.Scheme), + IOStreams: ioStrems, + } +} + +// NewCmdCreateSecretTLS is a macro command for creating secrets to work with TLS client or server +func NewCmdCreateSecretTLS(f cmdutil.Factory, ioStreams genericiooptions.IOStreams) *cobra.Command { + o := NewSecretTLSOptions(ioStreams) + + cmd := &cobra.Command{ + Use: "tls NAME --cert=path/to/cert/file --key=path/to/key/file [--dry-run=server|client|none]", + DisableFlagsInUseLine: true, + Short: i18n.T("Create a TLS secret"), + Long: secretForTLSLong, + Example: secretForTLSExample, + Run: func(cmd *cobra.Command, args []string) { + cmdutil.CheckErr(o.Complete(f, cmd, args)) + cmdutil.CheckErr(o.Validate()) + cmdutil.CheckErr(o.Run()) + }, + } + + o.PrintFlags.AddFlags(cmd) + + cmdutil.AddApplyAnnotationFlags(cmd) + cmdutil.AddValidateFlags(cmd) + cmdutil.AddDryRunFlag(cmd) + + cmd.Flags().StringVar(&o.Cert, "cert", o.Cert, i18n.T("Path to PEM encoded public key certificate.")) + cmd.Flags().StringVar(&o.Key, "key", o.Key, i18n.T("Path to private key associated with given certificate.")) + cmd.Flags().BoolVar(&o.AppendHash, "append-hash", o.AppendHash, "Append a hash of the secret to its name.") + + cmdutil.AddFieldManagerFlagVar(cmd, &o.FieldManager, "kubectl-create") + + return cmd +} + +// Complete loads data from the command line environment +func (o *CreateSecretTLSOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { + var err error + o.Name, err = NameFromCommandArgs(cmd, args) + if err != nil { + return err + } + + restConfig, err := f.ToRESTConfig() + if err != nil { + return err + } + + o.Client, err = corev1client.NewForConfig(restConfig) + if err != nil { + return err + } + + o.CreateAnnotation = cmdutil.GetFlagBool(cmd, cmdutil.ApplyAnnotationsFlag) + + o.DryRunStrategy, err = cmdutil.GetDryRunStrategy(cmd) + if err != nil { + return err + } + + o.Namespace, o.EnforceNamespace, err = f.ToRawKubeConfigLoader().Namespace() + if err != nil { + return err + } + + cmdutil.PrintFlagsWithDryRunStrategy(o.PrintFlags, o.DryRunStrategy) + printer, err := o.PrintFlags.ToPrinter() + if err != nil { + return err + } + + o.PrintObj = func(obj runtime.Object) error { + return printer.PrintObj(obj, o.Out) + } + + o.ValidationDirective, err = cmdutil.GetValidationDirective(cmd) + if err != nil { + return err + } + + return nil +} + +// Validate checks if CreateSecretTLSOptions hass sufficient value to run +func (o *CreateSecretTLSOptions) Validate() error { + // TODO: This is not strictly necessary. We can generate a self signed cert + // if no key/cert is given. The only requirement is that we either get both + // or none. See test/e2e/ingress_utils for self signed cert generation. + if len(o.Key) == 0 || len(o.Cert) == 0 { + return fmt.Errorf("key and cert must be specified") + } + return nil +} + +// Run calls createSecretTLS which will create secretTLS based on CreateSecretTLSOptions +// and makes an API call to the server +func (o *CreateSecretTLSOptions) Run() error { + secretTLS, err := o.createSecretTLS() + if err != nil { + return err + } + err = util.CreateOrUpdateAnnotation(o.CreateAnnotation, secretTLS, scheme.DefaultJSONEncoder()) + if err != nil { + return err + } + if o.DryRunStrategy != cmdutil.DryRunClient { + createOptions := metav1.CreateOptions{} + if o.FieldManager != "" { + createOptions.FieldManager = o.FieldManager + } + createOptions.FieldValidation = o.ValidationDirective + if o.DryRunStrategy == cmdutil.DryRunServer { + createOptions.DryRun = []string{metav1.DryRunAll} + } + secretTLS, err = o.Client.Secrets(o.Namespace).Create(context.TODO(), secretTLS, createOptions) + if err != nil { + return fmt.Errorf("failed to create secret %v", err) + } + } + return o.PrintObj(secretTLS) +} + +// createSecretTLS fills in key value pair from the information given in +// CreateSecretTLSOptions into *corev1.Secret +func (o *CreateSecretTLSOptions) createSecretTLS() (*corev1.Secret, error) { + namespace := "" + if o.EnforceNamespace { + namespace = o.Namespace + } + tlsCert, err := readFile(o.Cert) + if err != nil { + return nil, err + } + tlsKey, err := readFile(o.Key) + if err != nil { + return nil, err + } + if _, err := tls.X509KeyPair(tlsCert, tlsKey); err != nil { + return nil, err + } + // TODO: Add more validation. + // 1. If the certificate contains intermediates, it is a valid chain. + // 2. Format etc. + + secretTLS := newSecretObj(o.Name, namespace, corev1.SecretTypeTLS) + secretTLS.Data[corev1.TLSCertKey] = []byte(tlsCert) + secretTLS.Data[corev1.TLSPrivateKeyKey] = []byte(tlsKey) + if o.AppendHash { + hash, err := hash.SecretHash(secretTLS) + if err != nil { + return nil, err + } + secretTLS.Name = fmt.Sprintf("%s-%s", secretTLS.Name, hash) + } + + return secretTLS, nil +} + +// readFile just reads a file into a byte array. +func readFile(file string) ([]byte, error) { + b, err := os.ReadFile(file) + if err != nil { + return []byte{}, fmt.Errorf("Cannot read file %v, %v", file, err) + } + return b, nil +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/create/create_service.go b/vendor/k8s.io/kubectl/pkg/cmd/create/create_service.go new file mode 100644 index 000000000000..54b164b351b8 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/create/create_service.go @@ -0,0 +1,412 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package create + +import ( + "context" + "fmt" + "strconv" + "strings" + + "github.com/spf13/cobra" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/cli-runtime/pkg/genericiooptions" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + "k8s.io/kubectl/pkg/scheme" + "k8s.io/kubectl/pkg/util" + "k8s.io/kubectl/pkg/util/i18n" + "k8s.io/kubectl/pkg/util/templates" + utilsnet "k8s.io/utils/net" +) + +// NewCmdCreateService is a macro command to create a new service +func NewCmdCreateService(f cmdutil.Factory, ioStreams genericiooptions.IOStreams) *cobra.Command { + cmd := &cobra.Command{ + Use: "service", + Aliases: []string{"svc"}, + Short: i18n.T("Create a service using a specified subcommand"), + Long: i18n.T("Create a service using a specified subcommand."), + Run: cmdutil.DefaultSubCommandRun(ioStreams.ErrOut), + } + cmd.AddCommand(NewCmdCreateServiceClusterIP(f, ioStreams)) + cmd.AddCommand(NewCmdCreateServiceNodePort(f, ioStreams)) + cmd.AddCommand(NewCmdCreateServiceLoadBalancer(f, ioStreams)) + cmd.AddCommand(NewCmdCreateServiceExternalName(f, ioStreams)) + + return cmd +} + +// ServiceOptions holds the options for 'create service' sub command +type ServiceOptions struct { + PrintFlags *genericclioptions.PrintFlags + PrintObj func(obj runtime.Object) error + + Name string + TCP []string + Type corev1.ServiceType + ClusterIP string + NodePort int + ExternalName string + + FieldManager string + CreateAnnotation bool + Namespace string + EnforceNamespace bool + + Client corev1client.CoreV1Interface + DryRunStrategy cmdutil.DryRunStrategy + ValidationDirective string + genericiooptions.IOStreams +} + +// NewServiceOptions creates a ServiceOptions struct +func NewServiceOptions(ioStreams genericiooptions.IOStreams, serviceType corev1.ServiceType) *ServiceOptions { + return &ServiceOptions{ + PrintFlags: genericclioptions.NewPrintFlags("created").WithTypeSetter(scheme.Scheme), + IOStreams: ioStreams, + Type: serviceType, + } +} + +// Complete completes all the required options +func (o *ServiceOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { + name, err := NameFromCommandArgs(cmd, args) + if err != nil { + return err + } + o.Name = name + + clientConfig, err := f.ToRESTConfig() + if err != nil { + return err + } + o.Client, err = corev1client.NewForConfig(clientConfig) + if err != nil { + return err + } + + o.Namespace, o.EnforceNamespace, err = f.ToRawKubeConfigLoader().Namespace() + if err != nil { + return err + } + + o.DryRunStrategy, err = cmdutil.GetDryRunStrategy(cmd) + if err != nil { + return err + } + cmdutil.PrintFlagsWithDryRunStrategy(o.PrintFlags, o.DryRunStrategy) + + printer, err := o.PrintFlags.ToPrinter() + if err != nil { + return err + } + + o.PrintObj = func(obj runtime.Object) error { + return printer.PrintObj(obj, o.Out) + } + + o.ValidationDirective, err = cmdutil.GetValidationDirective(cmd) + if err != nil { + return err + } + + return nil +} + +// Validate if the options are valid +func (o *ServiceOptions) Validate() error { + if o.ClusterIP == corev1.ClusterIPNone && o.Type != corev1.ServiceTypeClusterIP { + return fmt.Errorf("ClusterIP=None can only be used with ClusterIP service type") + } + if o.ClusterIP != corev1.ClusterIPNone && len(o.TCP) == 0 && o.Type != corev1.ServiceTypeExternalName { + return fmt.Errorf("at least one tcp port specifier must be provided") + } + if o.Type == corev1.ServiceTypeExternalName { + if errs := validation.IsDNS1123Subdomain(o.ExternalName); len(errs) != 0 { + return fmt.Errorf("invalid service external name %s", o.ExternalName) + } + } + return nil +} + +func (o *ServiceOptions) createService() (*corev1.Service, error) { + ports := []corev1.ServicePort{} + for _, tcpString := range o.TCP { + port, targetPort, err := parsePorts(tcpString) + if err != nil { + return nil, err + } + + portName := strings.Replace(tcpString, ":", "-", -1) + ports = append(ports, corev1.ServicePort{ + Name: portName, + Port: port, + TargetPort: targetPort, + Protocol: corev1.Protocol("TCP"), + NodePort: int32(o.NodePort), + }) + } + + // setup default label and selector + labels := map[string]string{} + labels["app"] = o.Name + selector := map[string]string{} + selector["app"] = o.Name + + namespace := "" + if o.EnforceNamespace { + namespace = o.Namespace + } + + service := corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: o.Name, + Labels: labels, + Namespace: namespace, + }, + Spec: corev1.ServiceSpec{ + Type: o.Type, + Selector: selector, + Ports: ports, + ExternalName: o.ExternalName, + }, + } + if len(o.ClusterIP) > 0 { + service.Spec.ClusterIP = o.ClusterIP + } + return &service, nil +} + +// Run the service command +func (o *ServiceOptions) Run() error { + service, err := o.createService() + if err != nil { + return err + } + + if err := util.CreateOrUpdateAnnotation(o.CreateAnnotation, service, scheme.DefaultJSONEncoder()); err != nil { + return err + } + + if o.DryRunStrategy != cmdutil.DryRunClient { + createOptions := metav1.CreateOptions{} + if o.FieldManager != "" { + createOptions.FieldManager = o.FieldManager + } + createOptions.FieldValidation = o.ValidationDirective + if o.DryRunStrategy == cmdutil.DryRunServer { + createOptions.DryRun = []string{metav1.DryRunAll} + } + var err error + service, err = o.Client.Services(o.Namespace).Create(context.TODO(), service, createOptions) + if err != nil { + return fmt.Errorf("failed to create %s service: %v", o.Type, err) + } + } + return o.PrintObj(service) +} + +var ( + serviceClusterIPLong = templates.LongDesc(i18n.T(` + Create a ClusterIP service with the specified name.`)) + + serviceClusterIPExample = templates.Examples(i18n.T(` + # Create a new ClusterIP service named my-cs + kubectl create service clusterip my-cs --tcp=5678:8080 + + # Create a new ClusterIP service named my-cs (in headless mode) + kubectl create service clusterip my-cs --clusterip="None"`)) +) + +// NewCmdCreateServiceClusterIP is a command to create a ClusterIP service +func NewCmdCreateServiceClusterIP(f cmdutil.Factory, ioStreams genericiooptions.IOStreams) *cobra.Command { + o := NewServiceOptions(ioStreams, corev1.ServiceTypeClusterIP) + + cmd := &cobra.Command{ + Use: "clusterip NAME [--tcp=:] [--dry-run=server|client|none]", + DisableFlagsInUseLine: true, + Short: i18n.T("Create a ClusterIP service"), + Long: serviceClusterIPLong, + Example: serviceClusterIPExample, + Run: func(cmd *cobra.Command, args []string) { + cmdutil.CheckErr(o.Complete(f, cmd, args)) + cmdutil.CheckErr(o.Validate()) + cmdutil.CheckErr(o.Run()) + }, + } + + o.PrintFlags.AddFlags(cmd) + + cmdutil.AddApplyAnnotationFlags(cmd) + cmdutil.AddValidateFlags(cmd) + cmd.Flags().StringSliceVar(&o.TCP, "tcp", o.TCP, "Port pairs can be specified as ':'.") + cmd.Flags().StringVar(&o.ClusterIP, "clusterip", o.ClusterIP, i18n.T("Assign your own ClusterIP or set to 'None' for a 'headless' service (no loadbalancing).")) + cmdutil.AddFieldManagerFlagVar(cmd, &o.FieldManager, "kubectl-create") + cmdutil.AddDryRunFlag(cmd) + + return cmd +} + +var ( + serviceNodePortLong = templates.LongDesc(i18n.T(` + Create a NodePort service with the specified name.`)) + + serviceNodePortExample = templates.Examples(i18n.T(` + # Create a new NodePort service named my-ns + kubectl create service nodeport my-ns --tcp=5678:8080`)) +) + +// NewCmdCreateServiceNodePort is a macro command for creating a NodePort service +func NewCmdCreateServiceNodePort(f cmdutil.Factory, ioStreams genericiooptions.IOStreams) *cobra.Command { + o := NewServiceOptions(ioStreams, corev1.ServiceTypeNodePort) + + cmd := &cobra.Command{ + Use: "nodeport NAME [--tcp=port:targetPort] [--dry-run=server|client|none]", + DisableFlagsInUseLine: true, + Short: i18n.T("Create a NodePort service"), + Long: serviceNodePortLong, + Example: serviceNodePortExample, + Run: func(cmd *cobra.Command, args []string) { + cmdutil.CheckErr(o.Complete(f, cmd, args)) + cmdutil.CheckErr(o.Validate()) + cmdutil.CheckErr(o.Run()) + }, + } + + o.PrintFlags.AddFlags(cmd) + + cmdutil.AddApplyAnnotationFlags(cmd) + cmdutil.AddValidateFlags(cmd) + cmd.Flags().IntVar(&o.NodePort, "node-port", o.NodePort, "Port used to expose the service on each node in a cluster.") + cmdutil.AddFieldManagerFlagVar(cmd, &o.FieldManager, "kubectl-create") + cmd.Flags().StringSliceVar(&o.TCP, "tcp", o.TCP, "Port pairs can be specified as ':'.") + cmdutil.AddDryRunFlag(cmd) + return cmd +} + +var ( + serviceLoadBalancerLong = templates.LongDesc(i18n.T(` + Create a LoadBalancer service with the specified name.`)) + + serviceLoadBalancerExample = templates.Examples(i18n.T(` + # Create a new LoadBalancer service named my-lbs + kubectl create service loadbalancer my-lbs --tcp=5678:8080`)) +) + +// NewCmdCreateServiceLoadBalancer is a macro command for creating a LoadBalancer service +func NewCmdCreateServiceLoadBalancer(f cmdutil.Factory, ioStreams genericiooptions.IOStreams) *cobra.Command { + o := NewServiceOptions(ioStreams, corev1.ServiceTypeLoadBalancer) + + cmd := &cobra.Command{ + Use: "loadbalancer NAME [--tcp=port:targetPort] [--dry-run=server|client|none]", + DisableFlagsInUseLine: true, + Short: i18n.T("Create a LoadBalancer service"), + Long: serviceLoadBalancerLong, + Example: serviceLoadBalancerExample, + Run: func(cmd *cobra.Command, args []string) { + cmdutil.CheckErr(o.Complete(f, cmd, args)) + cmdutil.CheckErr(o.Validate()) + cmdutil.CheckErr(o.Run()) + }, + } + + o.PrintFlags.AddFlags(cmd) + + cmdutil.AddApplyAnnotationFlags(cmd) + cmdutil.AddValidateFlags(cmd) + cmd.Flags().StringSliceVar(&o.TCP, "tcp", o.TCP, "Port pairs can be specified as ':'.") + cmdutil.AddFieldManagerFlagVar(cmd, &o.FieldManager, "kubectl-create") + cmdutil.AddDryRunFlag(cmd) + return cmd +} + +var ( + serviceExternalNameLong = templates.LongDesc(i18n.T(` + Create an ExternalName service with the specified name. + + ExternalName service references to an external DNS address instead of + only pods, which will allow application authors to reference services + that exist off platform, on other clusters, or locally.`)) + + serviceExternalNameExample = templates.Examples(i18n.T(` + # Create a new ExternalName service named my-ns + kubectl create service externalname my-ns --external-name bar.com`)) +) + +// NewCmdCreateServiceExternalName is a macro command for creating an ExternalName service +func NewCmdCreateServiceExternalName(f cmdutil.Factory, ioStreams genericiooptions.IOStreams) *cobra.Command { + o := NewServiceOptions(ioStreams, corev1.ServiceTypeExternalName) + + cmd := &cobra.Command{ + Use: "externalname NAME --external-name external.name [--dry-run=server|client|none]", + DisableFlagsInUseLine: true, + Short: i18n.T("Create an ExternalName service"), + Long: serviceExternalNameLong, + Example: serviceExternalNameExample, + Run: func(cmd *cobra.Command, args []string) { + cmdutil.CheckErr(o.Complete(f, cmd, args)) + cmdutil.CheckErr(o.Validate()) + cmdutil.CheckErr(o.Run()) + }, + } + + o.PrintFlags.AddFlags(cmd) + + cmdutil.AddApplyAnnotationFlags(cmd) + cmdutil.AddValidateFlags(cmd) + cmd.Flags().StringSliceVar(&o.TCP, "tcp", o.TCP, "Port pairs can be specified as ':'.") + cmd.Flags().StringVar(&o.ExternalName, "external-name", o.ExternalName, i18n.T("External name of service")) + cmd.MarkFlagRequired("external-name") + cmdutil.AddFieldManagerFlagVar(cmd, &o.FieldManager, "kubectl-create") + cmdutil.AddDryRunFlag(cmd) + return cmd +} + +func parsePorts(portString string) (int32, intstr.IntOrString, error) { + portStringSlice := strings.Split(portString, ":") + + port, err := utilsnet.ParsePort(portStringSlice[0], true) + if err != nil { + return 0, intstr.FromInt32(0), err + } + + if len(portStringSlice) == 1 { + port32 := int32(port) + return port32, intstr.FromInt32(port32), nil + } + + var targetPort intstr.IntOrString + if portNum, err := strconv.Atoi(portStringSlice[1]); err != nil { + if errs := validation.IsValidPortName(portStringSlice[1]); len(errs) != 0 { + return 0, intstr.FromInt32(0), fmt.Errorf(strings.Join(errs, ",")) + } + targetPort = intstr.FromString(portStringSlice[1]) + } else { + if errs := validation.IsValidPortNum(portNum); len(errs) != 0 { + return 0, intstr.FromInt32(0), fmt.Errorf(strings.Join(errs, ",")) + } + targetPort = intstr.FromInt32(int32(portNum)) + } + return int32(port), targetPort, nil +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/create/create_serviceaccount.go b/vendor/k8s.io/kubectl/pkg/cmd/create/create_serviceaccount.go new file mode 100644 index 000000000000..da2e6a332d73 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/create/create_serviceaccount.go @@ -0,0 +1,202 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package create + +import ( + "context" + "fmt" + + "github.com/spf13/cobra" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/cli-runtime/pkg/genericiooptions" + coreclient "k8s.io/client-go/kubernetes/typed/core/v1" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + "k8s.io/kubectl/pkg/scheme" + "k8s.io/kubectl/pkg/util" + "k8s.io/kubectl/pkg/util/i18n" + "k8s.io/kubectl/pkg/util/templates" +) + +var ( + serviceAccountLong = templates.LongDesc(i18n.T(` + Create a service account with the specified name.`)) + + serviceAccountExample = templates.Examples(i18n.T(` + # Create a new service account named my-service-account + kubectl create serviceaccount my-service-account`)) +) + +// ServiceAccountOpts holds the options for 'create serviceaccount' sub command +type ServiceAccountOpts struct { + // PrintFlags holds options necessary for obtaining a printer + PrintFlags *genericclioptions.PrintFlags + PrintObj func(obj runtime.Object) error + // Name of resource being created + Name string + DryRunStrategy cmdutil.DryRunStrategy + ValidationDirective string + CreateAnnotation bool + FieldManager string + + Namespace string + EnforceNamespace bool + + Mapper meta.RESTMapper + Client *coreclient.CoreV1Client + + genericiooptions.IOStreams +} + +// NewServiceAccountOpts creates a new *ServiceAccountOpts with sane defaults +func NewServiceAccountOpts(ioStreams genericiooptions.IOStreams) *ServiceAccountOpts { + return &ServiceAccountOpts{ + PrintFlags: genericclioptions.NewPrintFlags("created").WithTypeSetter(scheme.Scheme), + IOStreams: ioStreams, + } +} + +// NewCmdCreateServiceAccount is a macro command to create a new service account +func NewCmdCreateServiceAccount(f cmdutil.Factory, ioStreams genericiooptions.IOStreams) *cobra.Command { + o := NewServiceAccountOpts(ioStreams) + + cmd := &cobra.Command{ + Use: "serviceaccount NAME [--dry-run=server|client|none]", + DisableFlagsInUseLine: true, + Aliases: []string{"sa"}, + Short: i18n.T("Create a service account with the specified name"), + Long: serviceAccountLong, + Example: serviceAccountExample, + Run: func(cmd *cobra.Command, args []string) { + cmdutil.CheckErr(o.Complete(f, cmd, args)) + cmdutil.CheckErr(o.Validate()) + cmdutil.CheckErr(o.Run()) + }, + } + + o.PrintFlags.AddFlags(cmd) + + cmdutil.AddApplyAnnotationFlags(cmd) + cmdutil.AddValidateFlags(cmd) + cmdutil.AddDryRunFlag(cmd) + cmdutil.AddFieldManagerFlagVar(cmd, &o.FieldManager, "kubectl-create") + return cmd +} + +// Complete completes all the required options +func (o *ServiceAccountOpts) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { + var err error + o.Name, err = NameFromCommandArgs(cmd, args) + if err != nil { + return err + } + + restConfig, err := f.ToRESTConfig() + if err != nil { + return err + } + o.Client, err = coreclient.NewForConfig(restConfig) + if err != nil { + return err + } + + o.CreateAnnotation = cmdutil.GetFlagBool(cmd, cmdutil.ApplyAnnotationsFlag) + + o.DryRunStrategy, err = cmdutil.GetDryRunStrategy(cmd) + if err != nil { + return err + } + + o.Namespace, o.EnforceNamespace, err = f.ToRawKubeConfigLoader().Namespace() + if err != nil { + return err + } + + cmdutil.PrintFlagsWithDryRunStrategy(o.PrintFlags, o.DryRunStrategy) + + printer, err := o.PrintFlags.ToPrinter() + if err != nil { + return err + } + + o.PrintObj = func(obj runtime.Object) error { + return printer.PrintObj(obj, o.Out) + } + + o.ValidationDirective, err = cmdutil.GetValidationDirective(cmd) + if err != nil { + return err + } + + return nil +} + +// Validate checks ServiceAccountOpts to see if there is sufficient information run the command. +func (o *ServiceAccountOpts) Validate() error { + if len(o.Name) == 0 { + return fmt.Errorf("name must be specified") + } + return nil +} + +// Run makes the api call to the server +func (o *ServiceAccountOpts) Run() error { + serviceAccount, err := o.createServiceAccount() + if err != nil { + return err + } + + if err := util.CreateOrUpdateAnnotation(o.CreateAnnotation, serviceAccount, scheme.DefaultJSONEncoder()); err != nil { + return err + } + + if o.DryRunStrategy != cmdutil.DryRunClient { + createOptions := metav1.CreateOptions{} + if o.FieldManager != "" { + createOptions.FieldManager = o.FieldManager + } + createOptions.FieldValidation = o.ValidationDirective + if o.DryRunStrategy == cmdutil.DryRunServer { + createOptions.DryRun = []string{metav1.DryRunAll} + } + serviceAccount, err = o.Client.ServiceAccounts(o.Namespace).Create(context.TODO(), serviceAccount, createOptions) + if err != nil { + return fmt.Errorf("failed to create serviceaccount: %v", err) + } + } + return o.PrintObj(serviceAccount) +} + +func (o *ServiceAccountOpts) createServiceAccount() (*corev1.ServiceAccount, error) { + namespace := "" + if o.EnforceNamespace { + namespace = o.Namespace + } + serviceAccount := &corev1.ServiceAccount{ + TypeMeta: metav1.TypeMeta{APIVersion: corev1.SchemeGroupVersion.String(), Kind: "ServiceAccount"}, + ObjectMeta: metav1.ObjectMeta{ + Name: o.Name, + Namespace: namespace, + }, + } + serviceAccount.Name = o.Name + return serviceAccount, nil +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/create/create_token.go b/vendor/k8s.io/kubectl/pkg/cmd/create/create_token.go new file mode 100644 index 000000000000..adfd5964dc9b --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/create/create_token.go @@ -0,0 +1,282 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package create + +import ( + "context" + "fmt" + "os" + "strings" + "time" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + authenticationv1 "k8s.io/api/authentication/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/cli-runtime/pkg/genericiooptions" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + "k8s.io/kubectl/pkg/scheme" + "k8s.io/kubectl/pkg/util/completion" + "k8s.io/kubectl/pkg/util/templates" + "k8s.io/kubectl/pkg/util/term" + "k8s.io/utils/pointer" +) + +// TokenOptions is the data required to perform a token request operation. +type TokenOptions struct { + // PrintFlags holds options necessary for obtaining a printer + PrintFlags *genericclioptions.PrintFlags + PrintObj func(obj runtime.Object) error + + // Flags hold the parsed CLI flags. + Flags *pflag.FlagSet + + // Name and namespace of service account to create a token for + Name string + Namespace string + + // BoundObjectKind is the kind of object to bind the token to. Optional. Can be Pod or Secret. + BoundObjectKind string + // BoundObjectName is the name of the object to bind the token to. Required if BoundObjectKind is set. + BoundObjectName string + // BoundObjectUID is the uid of the object to bind the token to. If unset, defaults to the current uid of the bound object. + BoundObjectUID string + + // Audiences indicate the valid audiences for the requested token. If unset, defaults to the Kubernetes API server audiences. + Audiences []string + + // Duration is the requested token lifetime. Optional. + Duration time.Duration + + // CoreClient is the API client used to request the token. Required. + CoreClient corev1client.CoreV1Interface + + // IOStreams are the output streams for the operation. Required. + genericiooptions.IOStreams +} + +var ( + tokenLong = templates.LongDesc(`Request a service account token.`) + + tokenExample = templates.Examples(` + # Request a token to authenticate to the kube-apiserver as the service account "myapp" in the current namespace + kubectl create token myapp + + # Request a token for a service account in a custom namespace + kubectl create token myapp --namespace myns + + # Request a token with a custom expiration + kubectl create token myapp --duration 10m + + # Request a token with a custom audience + kubectl create token myapp --audience https://example.com + + # Request a token bound to an instance of a Secret object + kubectl create token myapp --bound-object-kind Secret --bound-object-name mysecret + + # Request a token bound to an instance of a Secret object with a specific UID + kubectl create token myapp --bound-object-kind Secret --bound-object-name mysecret --bound-object-uid 0d4691ed-659b-4935-a832-355f77ee47cc +`) +) + +func boundObjectKindToAPIVersions() map[string]string { + kinds := map[string]string{ + "Pod": "v1", + "Secret": "v1", + } + if os.Getenv("KUBECTL_NODE_BOUND_TOKENS") == "true" { + kinds["Node"] = "v1" + } + return kinds +} + +func NewTokenOpts(ioStreams genericiooptions.IOStreams) *TokenOptions { + return &TokenOptions{ + PrintFlags: genericclioptions.NewPrintFlags("created").WithTypeSetter(scheme.Scheme), + IOStreams: ioStreams, + } +} + +// NewCmdCreateToken returns an initialized Command for 'create token' sub command +func NewCmdCreateToken(f cmdutil.Factory, ioStreams genericiooptions.IOStreams) *cobra.Command { + o := NewTokenOpts(ioStreams) + + cmd := &cobra.Command{ + Use: "token SERVICE_ACCOUNT_NAME", + DisableFlagsInUseLine: true, + Short: "Request a service account token", + Long: tokenLong, + Example: tokenExample, + ValidArgsFunction: completion.ResourceNameCompletionFunc(f, "serviceaccount"), + Run: func(cmd *cobra.Command, args []string) { + if err := o.Complete(f, cmd, args); err != nil { + cmdutil.CheckErr(err) + return + } + if err := o.Validate(); err != nil { + cmdutil.CheckErr(err) + return + } + if err := o.Run(); err != nil { + cmdutil.CheckErr(err) + return + } + }, + } + + o.PrintFlags.AddFlags(cmd) + + cmd.Flags().StringArrayVar(&o.Audiences, "audience", o.Audiences, "Audience of the requested token. If unset, defaults to requesting a token for use with the Kubernetes API server. May be repeated to request a token valid for multiple audiences.") + + cmd.Flags().DurationVar(&o.Duration, "duration", o.Duration, "Requested lifetime of the issued token. If not set or if set to 0, the lifetime will be determined by the server automatically. The server may return a token with a longer or shorter lifetime.") + + cmd.Flags().StringVar(&o.BoundObjectKind, "bound-object-kind", o.BoundObjectKind, "Kind of an object to bind the token to. "+ + "Supported kinds are "+strings.Join(sets.StringKeySet(boundObjectKindToAPIVersions()).List(), ", ")+". "+ + "If set, --bound-object-name must be provided.") + cmd.Flags().StringVar(&o.BoundObjectName, "bound-object-name", o.BoundObjectName, "Name of an object to bind the token to. "+ + "The token will expire when the object is deleted. "+ + "Requires --bound-object-kind.") + cmd.Flags().StringVar(&o.BoundObjectUID, "bound-object-uid", o.BoundObjectUID, "UID of an object to bind the token to. "+ + "Requires --bound-object-kind and --bound-object-name. "+ + "If unset, the UID of the existing object is used.") + + o.Flags = cmd.Flags() + + return cmd +} + +// Complete completes all the required options +func (o *TokenOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { + var err error + + o.Name, err = NameFromCommandArgs(cmd, args) + if err != nil { + return err + } + + o.Namespace, _, err = f.ToRawKubeConfigLoader().Namespace() + if err != nil { + return err + } + + client, err := f.KubernetesClientSet() + if err != nil { + return err + } + o.CoreClient = client.CoreV1() + + printer, err := o.PrintFlags.ToPrinter() + if err != nil { + return err + } + + o.PrintObj = func(obj runtime.Object) error { + return printer.PrintObj(obj, o.Out) + } + + return nil +} + +// Validate makes sure provided values for TokenOptions are valid +func (o *TokenOptions) Validate() error { + if o.CoreClient == nil { + return fmt.Errorf("no client provided") + } + if len(o.Name) == 0 { + return fmt.Errorf("service account name is required") + } + if len(o.Namespace) == 0 { + return fmt.Errorf("--namespace is required") + } + if o.Duration < 0 { + return fmt.Errorf("--duration must be greater than or equal to 0") + } + if o.Duration%time.Second != 0 { + return fmt.Errorf("--duration cannot be expressed in units less than seconds") + } + for _, aud := range o.Audiences { + if len(aud) == 0 { + return fmt.Errorf("--audience must not be an empty string") + } + } + + if len(o.BoundObjectKind) == 0 { + if len(o.BoundObjectName) > 0 { + return fmt.Errorf("--bound-object-name can only be set if --bound-object-kind is provided") + } + if len(o.BoundObjectUID) > 0 { + return fmt.Errorf("--bound-object-uid can only be set if --bound-object-kind is provided") + } + } else { + if _, ok := boundObjectKindToAPIVersions()[o.BoundObjectKind]; !ok { + return fmt.Errorf("supported --bound-object-kind values are %s", strings.Join(sets.StringKeySet(boundObjectKindToAPIVersions()).List(), ", ")) + } + if len(o.BoundObjectName) == 0 { + return fmt.Errorf("--bound-object-name is required if --bound-object-kind is provided") + } + } + + return nil +} + +// Run requests a token +func (o *TokenOptions) Run() error { + request := &authenticationv1.TokenRequest{ + Spec: authenticationv1.TokenRequestSpec{ + Audiences: o.Audiences, + }, + } + if o.Duration > 0 { + request.Spec.ExpirationSeconds = pointer.Int64(int64(o.Duration / time.Second)) + } + if len(o.BoundObjectKind) > 0 { + request.Spec.BoundObjectRef = &authenticationv1.BoundObjectReference{ + Kind: o.BoundObjectKind, + APIVersion: boundObjectKindToAPIVersions()[o.BoundObjectKind], + Name: o.BoundObjectName, + UID: types.UID(o.BoundObjectUID), + } + } + + response, err := o.CoreClient.ServiceAccounts(o.Namespace).CreateToken(context.TODO(), o.Name, request, metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("failed to create token: %v", err) + } + if len(response.Status.Token) == 0 { + return fmt.Errorf("failed to create token: no token in server response") + } + + if o.PrintFlags.OutputFlagSpecified() { + return o.PrintObj(response) + } + + if term.IsTerminal(o.Out) { + // include a newline when printing interactively + fmt.Fprintf(o.Out, "%s\n", response.Status.Token) + } else { + // otherwise just print the token + fmt.Fprintf(o.Out, "%s", response.Status.Token) + } + + return nil +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/edit/edit.go b/vendor/k8s.io/kubectl/pkg/cmd/edit/edit.go new file mode 100644 index 000000000000..2e9228324a48 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/edit/edit.go @@ -0,0 +1,108 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package edit + +import ( + "github.com/spf13/cobra" + + "k8s.io/cli-runtime/pkg/genericiooptions" + + cmdutil "k8s.io/kubectl/pkg/cmd/util" + "k8s.io/kubectl/pkg/cmd/util/editor" + "k8s.io/kubectl/pkg/util/completion" + "k8s.io/kubectl/pkg/util/i18n" + "k8s.io/kubectl/pkg/util/templates" +) + +var ( + editLong = templates.LongDesc(i18n.T(` + Edit a resource from the default editor. + + The edit command allows you to directly edit any API resource you can retrieve via the + command-line tools. It will open the editor defined by your KUBE_EDITOR, or EDITOR + environment variables, or fall back to 'vi' for Linux or 'notepad' for Windows. + When attempting to open the editor, it will first attempt to use the shell + that has been defined in the 'SHELL' environment variable. If this is not defined, + the default shell will be used, which is '/bin/bash' for Linux or 'cmd' for Windows. + + You can edit multiple objects, although changes are applied one at a time. The command + accepts file names as well as command-line arguments, although the files you point to must + be previously saved versions of resources. + + Editing is done with the API version used to fetch the resource. + To edit using a specific API version, fully-qualify the resource, version, and group. + + The default format is YAML. To edit in JSON, specify "-o json". + + The flag --windows-line-endings can be used to force Windows line endings, + otherwise the default for your operating system will be used. + + In the event an error occurs while updating, a temporary file will be created on disk + that contains your unapplied changes. The most common error when updating a resource + is another editor changing the resource on the server. When this occurs, you will have + to apply your changes to the newer version of the resource, or update your temporary + saved copy to include the latest resource version.`)) + + editExample = templates.Examples(i18n.T(` + # Edit the service named 'registry' + kubectl edit svc/registry + + # Use an alternative editor + KUBE_EDITOR="nano" kubectl edit svc/registry + + # Edit the job 'myjob' in JSON using the v1 API format + kubectl edit job.v1.batch/myjob -o json + + # Edit the deployment 'mydeployment' in YAML and save the modified config in its annotation + kubectl edit deployment/mydeployment -o yaml --save-config + + # Edit the 'status' subresource for the 'mydeployment' deployment + kubectl edit deployment mydeployment --subresource='status'`)) +) + +// NewCmdEdit creates the `edit` command +func NewCmdEdit(f cmdutil.Factory, ioStreams genericiooptions.IOStreams) *cobra.Command { + o := editor.NewEditOptions(editor.NormalEditMode, ioStreams) + cmd := &cobra.Command{ + Use: "edit (RESOURCE/NAME | -f FILENAME)", + DisableFlagsInUseLine: true, + Short: i18n.T("Edit a resource on the server"), + Long: editLong, + Example: editExample, + ValidArgsFunction: completion.ResourceTypeAndNameCompletionFunc(f), + Run: func(cmd *cobra.Command, args []string) { + cmdutil.CheckErr(o.Complete(f, args, cmd)) + cmdutil.CheckErr(o.Validate()) + cmdutil.CheckErr(o.Run()) + }, + } + + // bind flag structs + o.RecordFlags.AddFlags(cmd) + o.PrintFlags.AddFlags(cmd) + + usage := "to use to edit the resource" + cmdutil.AddFilenameOptionFlags(cmd, &o.FilenameOptions, usage) + cmdutil.AddValidateFlags(cmd) + cmd.Flags().BoolVarP(&o.OutputPatch, "output-patch", "", o.OutputPatch, "Output the patch if the resource is edited.") + cmd.Flags().BoolVar(&o.WindowsLineEndings, "windows-line-endings", o.WindowsLineEndings, + "Defaults to the line ending native to your platform.") + cmdutil.AddFieldManagerFlagVar(cmd, &o.FieldManager, "kubectl-edit") + cmdutil.AddApplyAnnotationVarFlags(cmd, &o.ApplyAnnotation) + cmdutil.AddSubresourceFlags(cmd, &o.Subresource, "If specified, edit will operate on the subresource of the requested object.", editor.SupportedSubresources...) + return cmd +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/explain/explain.go b/vendor/k8s.io/kubectl/pkg/cmd/explain/explain.go new file mode 100644 index 000000000000..1cde900601b2 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/explain/explain.go @@ -0,0 +1,234 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package explain + +import ( + "fmt" + + "github.com/spf13/cobra" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/cli-runtime/pkg/genericiooptions" + openapiclient "k8s.io/client-go/openapi" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + "k8s.io/kubectl/pkg/explain" + openapiv3explain "k8s.io/kubectl/pkg/explain/v2" + "k8s.io/kubectl/pkg/util/i18n" + "k8s.io/kubectl/pkg/util/openapi" + "k8s.io/kubectl/pkg/util/templates" +) + +var ( + explainLong = templates.LongDesc(i18n.T(` + Describe fields and structure of various resources. + + This command describes the fields associated with each supported API resource. + Fields are identified via a simple JSONPath identifier: + + .[.] + + Information about each field is retrieved from the server in OpenAPI format.`)) + + explainExamples = templates.Examples(i18n.T(` + # Get the documentation of the resource and its fields + kubectl explain pods + + # Get all the fields in the resource + kubectl explain pods --recursive + + # Get the explanation for deployment in supported api versions + kubectl explain deployments --api-version=apps/v1 + + # Get the documentation of a specific field of a resource + kubectl explain pods.spec.containers + + # Get the documentation of resources in different format + kubectl explain deployment --output=plaintext-openapiv2`)) + + plaintextTemplateName = "plaintext" + plaintextOpenAPIV2TemplateName = "plaintext-openapiv2" +) + +type ExplainOptions struct { + genericiooptions.IOStreams + + CmdParent string + APIVersion string + Recursive bool + + args []string + + Mapper meta.RESTMapper + openAPIGetter openapi.OpenAPIResourcesGetter + + // Name of the template to use with the openapiv3 template renderer. + OutputFormat string + + // Client capable of fetching openapi documents from the user's cluster + OpenAPIV3Client openapiclient.Client +} + +func NewExplainOptions(parent string, streams genericiooptions.IOStreams) *ExplainOptions { + return &ExplainOptions{ + IOStreams: streams, + CmdParent: parent, + OutputFormat: plaintextTemplateName, + } +} + +// NewCmdExplain returns a cobra command for swagger docs +func NewCmdExplain(parent string, f cmdutil.Factory, streams genericiooptions.IOStreams) *cobra.Command { + o := NewExplainOptions(parent, streams) + + cmd := &cobra.Command{ + Use: "explain TYPE [--recursive=FALSE|TRUE] [--api-version=api-version-group] [--output=plaintext|plaintext-openapiv2]", + DisableFlagsInUseLine: true, + Short: i18n.T("Get documentation for a resource"), + Long: explainLong + "\n\n" + cmdutil.SuggestAPIResources(parent), + Example: explainExamples, + Run: func(cmd *cobra.Command, args []string) { + cmdutil.CheckErr(o.Complete(f, cmd, args)) + cmdutil.CheckErr(o.Validate()) + cmdutil.CheckErr(o.Run()) + }, + } + cmd.Flags().BoolVar(&o.Recursive, "recursive", o.Recursive, "When true, print the name of all the fields recursively. Otherwise, print the available fields with their description.") + cmd.Flags().StringVar(&o.APIVersion, "api-version", o.APIVersion, "Use given api-version (group/version) of the resource.") + + // Only enable --output as a valid flag if the feature is enabled + cmd.Flags().StringVar(&o.OutputFormat, "output", plaintextTemplateName, "Format in which to render the schema. Valid values are: (plaintext, plaintext-openapiv2).") + + return cmd +} + +func (o *ExplainOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { + var err error + o.Mapper, err = f.ToRESTMapper() + if err != nil { + return err + } + + // Only openapi v3 needs the discovery client. + o.OpenAPIV3Client, err = f.OpenAPIV3Client() + if err != nil { + return err + } + + // Lazy-load the OpenAPI V2 Resources, so they're not loaded when using OpenAPI V3. + o.openAPIGetter = f + o.args = args + return nil +} + +func (o *ExplainOptions) Validate() error { + if len(o.args) == 0 { + return fmt.Errorf("You must specify the type of resource to explain. %s\n", cmdutil.SuggestAPIResources(o.CmdParent)) + } + if len(o.args) > 1 { + return fmt.Errorf("We accept only this format: explain RESOURCE\n") + } + + return nil +} + +// Run executes the appropriate steps to print a model's documentation +func (o *ExplainOptions) Run() error { + var fullySpecifiedGVR schema.GroupVersionResource + var fieldsPath []string + var err error + if len(o.APIVersion) == 0 { + fullySpecifiedGVR, fieldsPath, err = explain.SplitAndParseResourceRequestWithMatchingPrefix(o.args[0], o.Mapper) + if err != nil { + return err + } + } else { + // TODO: After we figured out the new syntax to separate group and resource, allow + // the users to use it in explain (kubectl explain ). + // Refer to issue #16039 for why we do this. Refer to PR #15808 that used "/" syntax. + fullySpecifiedGVR, fieldsPath, err = explain.SplitAndParseResourceRequest(o.args[0], o.Mapper) + if err != nil { + return err + } + } + + // Fallback to openapiv2 implementation using special template name + switch o.OutputFormat { + case plaintextOpenAPIV2TemplateName: + return o.renderOpenAPIV2(fullySpecifiedGVR, fieldsPath) + case plaintextTemplateName: + // Check whether the server reponds to OpenAPIV3. + if _, err := o.OpenAPIV3Client.Paths(); err != nil { + // Use v2 renderer if server does not support v3 + return o.renderOpenAPIV2(fullySpecifiedGVR, fieldsPath) + } + + fallthrough + default: + if len(o.APIVersion) > 0 { + apiVersion, err := schema.ParseGroupVersion(o.APIVersion) + if err != nil { + return err + } + fullySpecifiedGVR.Group = apiVersion.Group + fullySpecifiedGVR.Version = apiVersion.Version + } + + return openapiv3explain.PrintModelDescription( + fieldsPath, + o.Out, + o.OpenAPIV3Client, + fullySpecifiedGVR, + o.Recursive, + o.OutputFormat, + ) + } +} + +func (o *ExplainOptions) renderOpenAPIV2( + fullySpecifiedGVR schema.GroupVersionResource, + fieldsPath []string, +) error { + var err error + + gvk, _ := o.Mapper.KindFor(fullySpecifiedGVR) + if gvk.Empty() { + gvk, err = o.Mapper.KindFor(fullySpecifiedGVR.GroupResource().WithVersion("")) + if err != nil { + return err + } + } + + if len(o.APIVersion) != 0 { + apiVersion, err := schema.ParseGroupVersion(o.APIVersion) + if err != nil { + return err + } + gvk = apiVersion.WithKind(gvk.Kind) + } + + resources, err := o.openAPIGetter.OpenAPISchema() + if err != nil { + return err + } + schema := resources.LookupResource(gvk) + if schema == nil { + return fmt.Errorf("couldn't find resource for %q", gvk) + } + + return explain.PrintModelDescription(fieldsPath, o.Out, schema, gvk, o.Recursive) +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/label/label.go b/vendor/k8s.io/kubectl/pkg/cmd/label/label.go new file mode 100644 index 000000000000..39ef03f0f416 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/label/label.go @@ -0,0 +1,466 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package label + +import ( + "fmt" + "reflect" + "strings" + + jsonpatch "github.com/evanphx/json-patch" + "github.com/spf13/cobra" + "k8s.io/klog/v2" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructuredscheme" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/json" + "k8s.io/apimachinery/pkg/util/validation" + + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/cli-runtime/pkg/genericiooptions" + "k8s.io/cli-runtime/pkg/printers" + "k8s.io/cli-runtime/pkg/resource" + "k8s.io/client-go/tools/clientcmd" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + "k8s.io/kubectl/pkg/scheme" + "k8s.io/kubectl/pkg/util/completion" + "k8s.io/kubectl/pkg/util/i18n" + "k8s.io/kubectl/pkg/util/templates" +) + +const ( + MsgNotLabeled = "not labeled" + MsgLabeled = "labeled" + MsgUnLabeled = "unlabeled" +) + +// LabelOptions have the data required to perform the label operation +type LabelOptions struct { + // Filename options + resource.FilenameOptions + RecordFlags *genericclioptions.RecordFlags + + PrintFlags *genericclioptions.PrintFlags + ToPrinter func(string) (printers.ResourcePrinter, error) + + // Common user flags + overwrite bool + list bool + local bool + dryRunStrategy cmdutil.DryRunStrategy + all bool + allNamespaces bool + resourceVersion string + selector string + fieldSelector string + outputFormat string + fieldManager string + + // results of arg parsing + resources []string + newLabels map[string]string + removeLabels []string + + Recorder genericclioptions.Recorder + + namespace string + enforceNamespace bool + builder *resource.Builder + unstructuredClientForMapping func(mapping *meta.RESTMapping) (resource.RESTClient, error) + + // Common shared fields + genericiooptions.IOStreams +} + +var ( + labelLong = templates.LongDesc(i18n.T(` + Update the labels on a resource. + + * A label key and value must begin with a letter or number, and may contain letters, numbers, hyphens, dots, and underscores, up to %[1]d characters each. + * Optionally, the key can begin with a DNS subdomain prefix and a single '/', like example.com/my-app. + * If --overwrite is true, then existing labels can be overwritten, otherwise attempting to overwrite a label will result in an error. + * If --resource-version is specified, then updates will use this resource version, otherwise the existing resource-version will be used.`)) + + labelExample = templates.Examples(i18n.T(` + # Update pod 'foo' with the label 'unhealthy' and the value 'true' + kubectl label pods foo unhealthy=true + + # Update pod 'foo' with the label 'status' and the value 'unhealthy', overwriting any existing value + kubectl label --overwrite pods foo status=unhealthy + + # Update all pods in the namespace + kubectl label pods --all status=unhealthy + + # Update a pod identified by the type and name in "pod.json" + kubectl label -f pod.json status=unhealthy + + # Update pod 'foo' only if the resource is unchanged from version 1 + kubectl label pods foo status=unhealthy --resource-version=1 + + # Update pod 'foo' by removing a label named 'bar' if it exists + # Does not require the --overwrite flag + kubectl label pods foo bar-`)) +) + +func NewLabelOptions(ioStreams genericiooptions.IOStreams) *LabelOptions { + return &LabelOptions{ + RecordFlags: genericclioptions.NewRecordFlags(), + Recorder: genericclioptions.NoopRecorder{}, + + PrintFlags: genericclioptions.NewPrintFlags("labeled").WithTypeSetter(scheme.Scheme), + + IOStreams: ioStreams, + } +} + +func NewCmdLabel(f cmdutil.Factory, ioStreams genericiooptions.IOStreams) *cobra.Command { + o := NewLabelOptions(ioStreams) + + cmd := &cobra.Command{ + Use: "label [--overwrite] (-f FILENAME | TYPE NAME) KEY_1=VAL_1 ... KEY_N=VAL_N [--resource-version=version]", + DisableFlagsInUseLine: true, + Short: i18n.T("Update the labels on a resource"), + Long: fmt.Sprintf(labelLong, validation.LabelValueMaxLength), + Example: labelExample, + ValidArgsFunction: completion.ResourceTypeAndNameCompletionFunc(f), + Run: func(cmd *cobra.Command, args []string) { + cmdutil.CheckErr(o.Complete(f, cmd, args)) + cmdutil.CheckErr(o.Validate()) + cmdutil.CheckErr(o.RunLabel()) + }, + } + + o.RecordFlags.AddFlags(cmd) + o.PrintFlags.AddFlags(cmd) + + cmd.Flags().BoolVar(&o.overwrite, "overwrite", o.overwrite, "If true, allow labels to be overwritten, otherwise reject label updates that overwrite existing labels.") + cmd.Flags().BoolVar(&o.list, "list", o.list, "If true, display the labels for a given resource.") + cmd.Flags().BoolVar(&o.local, "local", o.local, "If true, label will NOT contact api-server but run locally.") + cmd.Flags().StringVar(&o.fieldSelector, "field-selector", o.fieldSelector, "Selector (field query) to filter on, supports '=', '==', and '!='.(e.g. --field-selector key1=value1,key2=value2). The server only supports a limited number of field queries per type.") + cmd.Flags().BoolVar(&o.all, "all", o.all, "Select all resources, in the namespace of the specified resource types") + cmd.Flags().BoolVarP(&o.allNamespaces, "all-namespaces", "A", o.allNamespaces, "If true, check the specified action in all namespaces.") + cmd.Flags().StringVar(&o.resourceVersion, "resource-version", o.resourceVersion, i18n.T("If non-empty, the labels update will only succeed if this is the current resource-version for the object. Only valid when specifying a single resource.")) + usage := "identifying the resource to update the labels" + cmdutil.AddFilenameOptionFlags(cmd, &o.FilenameOptions, usage) + cmdutil.AddDryRunFlag(cmd) + cmdutil.AddFieldManagerFlagVar(cmd, &o.fieldManager, "kubectl-label") + cmdutil.AddLabelSelectorFlagVar(cmd, &o.selector) + + return cmd +} + +// Complete adapts from the command line args and factory to the data required. +func (o *LabelOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { + var err error + + o.RecordFlags.Complete(cmd) + o.Recorder, err = o.RecordFlags.ToRecorder() + if err != nil { + return err + } + + o.outputFormat = cmdutil.GetFlagString(cmd, "output") + o.dryRunStrategy, err = cmdutil.GetDryRunStrategy(cmd) + if err != nil { + return err + } + + o.ToPrinter = func(operation string) (printers.ResourcePrinter, error) { + o.PrintFlags.NamePrintFlags.Operation = operation + // PrintFlagsWithDryRunStrategy must be done after NamePrintFlags.Operation is set + cmdutil.PrintFlagsWithDryRunStrategy(o.PrintFlags, o.dryRunStrategy) + return o.PrintFlags.ToPrinter() + } + + resources, labelArgs, err := cmdutil.GetResourcesAndPairs(args, "label") + if err != nil { + return err + } + o.resources = resources + o.newLabels, o.removeLabels, err = parseLabels(labelArgs) + if err != nil { + return err + } + + if o.list && len(o.outputFormat) > 0 { + return fmt.Errorf("--list and --output may not be specified together") + } + + o.namespace, o.enforceNamespace, err = f.ToRawKubeConfigLoader().Namespace() + if err != nil && !(o.local && clientcmd.IsEmptyConfig(err)) { + return err + } + o.builder = f.NewBuilder() + o.unstructuredClientForMapping = f.UnstructuredClientForMapping + + return nil +} + +// Validate checks to the LabelOptions to see if there is sufficient information run the command. +func (o *LabelOptions) Validate() error { + if o.all && len(o.selector) > 0 { + return fmt.Errorf("cannot set --all and --selector at the same time") + } + if o.all && len(o.fieldSelector) > 0 { + return fmt.Errorf("cannot set --all and --field-selector at the same time") + } + if o.local { + if o.dryRunStrategy == cmdutil.DryRunServer { + return fmt.Errorf("cannot specify --local and --dry-run=server - did you mean --dry-run=client?") + } + if len(o.resources) > 0 { + return fmt.Errorf("can only use local files by -f pod.yaml or --filename=pod.json when --local=true is set") + } + if cmdutil.IsFilenameSliceEmpty(o.FilenameOptions.Filenames, o.FilenameOptions.Kustomize) { + return fmt.Errorf("one or more files must be specified as -f pod.yaml or --filename=pod.json") + } + } else { + if len(o.resources) < 1 && cmdutil.IsFilenameSliceEmpty(o.FilenameOptions.Filenames, o.FilenameOptions.Kustomize) { + return fmt.Errorf("one or more resources must be specified as or /") + } + } + if len(o.newLabels) < 1 && len(o.removeLabels) < 1 && !o.list { + return fmt.Errorf("at least one label update is required") + } + return nil +} + +// RunLabel does the work +func (o *LabelOptions) RunLabel() error { + b := o.builder. + Unstructured(). + LocalParam(o.local). + ContinueOnError(). + NamespaceParam(o.namespace).DefaultNamespace(). + FilenameParam(o.enforceNamespace, &o.FilenameOptions). + Flatten() + + if !o.local { + b = b.LabelSelectorParam(o.selector). + FieldSelectorParam(o.fieldSelector). + AllNamespaces(o.allNamespaces). + ResourceTypeOrNameArgs(o.all, o.resources...). + Latest() + } + + one := false + r := b.Do().IntoSingleItemImplied(&one) + if err := r.Err(); err != nil { + return err + } + + // only apply resource version locking on a single resource + if !one && len(o.resourceVersion) > 0 { + return fmt.Errorf("--resource-version may only be used with a single resource") + } + + // TODO: support bulk generic output a la Get + return r.Visit(func(info *resource.Info, err error) error { + if err != nil { + return err + } + + var outputObj runtime.Object + var dataChangeMsg string + obj := info.Object + + if len(o.resourceVersion) != 0 { + // ensure resourceVersion is always sent in the patch by clearing it from the starting JSON + accessor, err := meta.Accessor(obj) + if err != nil { + return err + } + accessor.SetResourceVersion("") + } + + oldData, err := json.Marshal(obj) + if err != nil { + return err + } + if o.dryRunStrategy == cmdutil.DryRunClient || o.local || o.list { + err = labelFunc(obj, o.overwrite, o.resourceVersion, o.newLabels, o.removeLabels) + if err != nil { + return err + } + newObj, err := json.Marshal(obj) + if err != nil { + return err + } + dataChangeMsg = updateDataChangeMsg(oldData, newObj, o.overwrite) + outputObj = info.Object + } else { + name, namespace := info.Name, info.Namespace + if err != nil { + return err + } + accessor, err := meta.Accessor(obj) + if err != nil { + return err + } + for _, label := range o.removeLabels { + if _, ok := accessor.GetLabels()[label]; !ok { + fmt.Fprintf(o.Out, "label %q not found.\n", label) + } + } + + if err := labelFunc(obj, o.overwrite, o.resourceVersion, o.newLabels, o.removeLabels); err != nil { + return err + } + if err := o.Recorder.Record(obj); err != nil { + klog.V(4).Infof("error recording current command: %v", err) + } + newObj, err := json.Marshal(obj) + if err != nil { + return err + } + dataChangeMsg = updateDataChangeMsg(oldData, newObj, o.overwrite) + patchBytes, err := jsonpatch.CreateMergePatch(oldData, newObj) + createdPatch := err == nil + if err != nil { + klog.V(2).Infof("couldn't compute patch: %v", err) + } + + mapping := info.ResourceMapping() + client, err := o.unstructuredClientForMapping(mapping) + if err != nil { + return err + } + helper := resource.NewHelper(client, mapping). + DryRun(o.dryRunStrategy == cmdutil.DryRunServer). + WithFieldManager(o.fieldManager) + + if createdPatch { + outputObj, err = helper.Patch(namespace, name, types.MergePatchType, patchBytes, nil) + } else { + outputObj, err = helper.Replace(namespace, name, false, obj) + } + if err != nil { + return err + } + } + + if o.list { + accessor, err := meta.Accessor(outputObj) + if err != nil { + return err + } + + indent := "" + if !one { + indent = " " + gvks, _, err := unstructuredscheme.NewUnstructuredObjectTyper().ObjectKinds(info.Object) + if err != nil { + return err + } + fmt.Fprintf(o.Out, "Listing labels for %s.%s/%s:\n", gvks[0].Kind, gvks[0].Group, info.Name) + } + for k, v := range accessor.GetLabels() { + fmt.Fprintf(o.Out, "%s%s=%s\n", indent, k, v) + } + + return nil + } + + printer, err := o.ToPrinter(dataChangeMsg) + if err != nil { + return err + } + return printer.PrintObj(info.Object, o.Out) + }) +} + +func updateDataChangeMsg(oldObj []byte, newObj []byte, overwrite bool) string { + msg := MsgNotLabeled + if !reflect.DeepEqual(oldObj, newObj) { + msg = MsgLabeled + if !overwrite && len(newObj) < len(oldObj) { + msg = MsgUnLabeled + } + } + return msg +} + +func validateNoOverwrites(accessor metav1.Object, labels map[string]string) error { + allErrs := []error{} + for key, value := range labels { + if currValue, found := accessor.GetLabels()[key]; found && currValue != value { + allErrs = append(allErrs, fmt.Errorf("'%s' already has a value (%s), and --overwrite is false", key, currValue)) + } + } + return utilerrors.NewAggregate(allErrs) +} + +func parseLabels(spec []string) (map[string]string, []string, error) { + labels := map[string]string{} + var remove []string + for _, labelSpec := range spec { + if strings.Contains(labelSpec, "=") { + parts := strings.Split(labelSpec, "=") + if len(parts) != 2 { + return nil, nil, fmt.Errorf("invalid label spec: %v", labelSpec) + } + if errs := validation.IsValidLabelValue(parts[1]); len(errs) != 0 { + return nil, nil, fmt.Errorf("invalid label value: %q: %s", labelSpec, strings.Join(errs, ";")) + } + labels[parts[0]] = parts[1] + } else if strings.HasSuffix(labelSpec, "-") { + remove = append(remove, labelSpec[:len(labelSpec)-1]) + } else { + return nil, nil, fmt.Errorf("unknown label spec: %v", labelSpec) + } + } + for _, removeLabel := range remove { + if _, found := labels[removeLabel]; found { + return nil, nil, fmt.Errorf("can not both modify and remove a label in the same command") + } + } + return labels, remove, nil +} + +func labelFunc(obj runtime.Object, overwrite bool, resourceVersion string, labels map[string]string, remove []string) error { + accessor, err := meta.Accessor(obj) + if err != nil { + return err + } + if !overwrite { + if err := validateNoOverwrites(accessor, labels); err != nil { + return err + } + } + + objLabels := accessor.GetLabels() + if objLabels == nil { + objLabels = make(map[string]string) + } + + for key, value := range labels { + objLabels[key] = value + } + for _, label := range remove { + delete(objLabels, label) + } + accessor.SetLabels(objLabels) + + if len(resourceVersion) != 0 { + accessor.SetResourceVersion(resourceVersion) + } + return nil +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/patch/patch.go b/vendor/k8s.io/kubectl/pkg/cmd/patch/patch.go new file mode 100644 index 000000000000..be303a53b53d --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/patch/patch.go @@ -0,0 +1,364 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package patch + +import ( + "fmt" + "os" + "reflect" + "strings" + + jsonpatch "github.com/evanphx/json-patch" + "github.com/pkg/errors" + "github.com/spf13/cobra" + "k8s.io/klog/v2" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/cli-runtime/pkg/genericiooptions" + "k8s.io/cli-runtime/pkg/printers" + "k8s.io/cli-runtime/pkg/resource" + "k8s.io/client-go/tools/clientcmd" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + "k8s.io/kubectl/pkg/scheme" + "k8s.io/kubectl/pkg/util/completion" + "k8s.io/kubectl/pkg/util/i18n" + "k8s.io/kubectl/pkg/util/slice" + "k8s.io/kubectl/pkg/util/templates" +) + +var patchTypes = map[string]types.PatchType{"json": types.JSONPatchType, "merge": types.MergePatchType, "strategic": types.StrategicMergePatchType} + +// PatchOptions is the start of the data required to perform the operation. As new fields are added, add them here instead of +// referencing the cmd.Flags() +type PatchOptions struct { + resource.FilenameOptions + + RecordFlags *genericclioptions.RecordFlags + PrintFlags *genericclioptions.PrintFlags + ToPrinter func(string) (printers.ResourcePrinter, error) + Recorder genericclioptions.Recorder + + Local bool + PatchType string + Patch string + PatchFile string + Subresource string + + namespace string + enforceNamespace bool + dryRunStrategy cmdutil.DryRunStrategy + outputFormat string + args []string + builder *resource.Builder + unstructuredClientForMapping func(mapping *meta.RESTMapping) (resource.RESTClient, error) + fieldManager string + + genericiooptions.IOStreams +} + +var ( + patchLong = templates.LongDesc(i18n.T(` + Update fields of a resource using strategic merge patch, a JSON merge patch, or a JSON patch. + + JSON and YAML formats are accepted. + + Note: Strategic merge patch is not supported for custom resources.`)) + + patchExample = templates.Examples(i18n.T(` + # Partially update a node using a strategic merge patch, specifying the patch as JSON + kubectl patch node k8s-node-1 -p '{"spec":{"unschedulable":true}}' + + # Partially update a node using a strategic merge patch, specifying the patch as YAML + kubectl patch node k8s-node-1 -p $'spec:\n unschedulable: true' + + # Partially update a node identified by the type and name specified in "node.json" using strategic merge patch + kubectl patch -f node.json -p '{"spec":{"unschedulable":true}}' + + # Update a container's image; spec.containers[*].name is required because it's a merge key + kubectl patch pod valid-pod -p '{"spec":{"containers":[{"name":"kubernetes-serve-hostname","image":"new image"}]}}' + + # Update a container's image using a JSON patch with positional arrays + kubectl patch pod valid-pod --type='json' -p='[{"op": "replace", "path": "/spec/containers/0/image", "value":"new image"}]' + + # Update a deployment's replicas through the 'scale' subresource using a merge patch + kubectl patch deployment nginx-deployment --subresource='scale' --type='merge' -p '{"spec":{"replicas":2}}'`)) +) + +var supportedSubresources = []string{"status", "scale"} + +func NewPatchOptions(ioStreams genericiooptions.IOStreams) *PatchOptions { + return &PatchOptions{ + RecordFlags: genericclioptions.NewRecordFlags(), + Recorder: genericclioptions.NoopRecorder{}, + PrintFlags: genericclioptions.NewPrintFlags("patched").WithTypeSetter(scheme.Scheme), + IOStreams: ioStreams, + } +} + +func NewCmdPatch(f cmdutil.Factory, ioStreams genericiooptions.IOStreams) *cobra.Command { + o := NewPatchOptions(ioStreams) + + cmd := &cobra.Command{ + Use: "patch (-f FILENAME | TYPE NAME) [-p PATCH|--patch-file FILE]", + DisableFlagsInUseLine: true, + Short: i18n.T("Update fields of a resource"), + Long: patchLong, + Example: patchExample, + ValidArgsFunction: completion.ResourceTypeAndNameCompletionFunc(f), + Run: func(cmd *cobra.Command, args []string) { + cmdutil.CheckErr(o.Complete(f, cmd, args)) + cmdutil.CheckErr(o.Validate()) + cmdutil.CheckErr(o.RunPatch()) + }, + } + + o.RecordFlags.AddFlags(cmd) + o.PrintFlags.AddFlags(cmd) + + cmd.Flags().StringVarP(&o.Patch, "patch", "p", "", "The patch to be applied to the resource JSON file.") + cmd.Flags().StringVar(&o.PatchFile, "patch-file", "", "A file containing a patch to be applied to the resource.") + cmd.Flags().StringVar(&o.PatchType, "type", "strategic", fmt.Sprintf("The type of patch being provided; one of %v", sets.StringKeySet(patchTypes).List())) + cmdutil.AddDryRunFlag(cmd) + cmdutil.AddFilenameOptionFlags(cmd, &o.FilenameOptions, "identifying the resource to update") + cmd.Flags().BoolVar(&o.Local, "local", o.Local, "If true, patch will operate on the content of the file, not the server-side resource.") + cmdutil.AddFieldManagerFlagVar(cmd, &o.fieldManager, "kubectl-patch") + cmdutil.AddSubresourceFlags(cmd, &o.Subresource, "If specified, patch will operate on the subresource of the requested object.", supportedSubresources...) + + return cmd +} + +func (o *PatchOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { + var err error + o.RecordFlags.Complete(cmd) + o.Recorder, err = o.RecordFlags.ToRecorder() + if err != nil { + return err + } + + o.outputFormat = cmdutil.GetFlagString(cmd, "output") + o.dryRunStrategy, err = cmdutil.GetDryRunStrategy(cmd) + if err != nil { + return err + } + + cmdutil.PrintFlagsWithDryRunStrategy(o.PrintFlags, o.dryRunStrategy) + o.ToPrinter = func(operation string) (printers.ResourcePrinter, error) { + o.PrintFlags.NamePrintFlags.Operation = operation + + return o.PrintFlags.ToPrinter() + } + + o.namespace, o.enforceNamespace, err = f.ToRawKubeConfigLoader().Namespace() + if err != nil && !(o.Local && clientcmd.IsEmptyConfig(err)) { + return err + } + o.args = args + o.builder = f.NewBuilder() + o.unstructuredClientForMapping = f.UnstructuredClientForMapping + + return nil +} + +func (o *PatchOptions) Validate() error { + if len(o.Patch) > 0 && len(o.PatchFile) > 0 { + return fmt.Errorf("cannot specify --patch and --patch-file together") + } + if len(o.Patch) == 0 && len(o.PatchFile) == 0 { + return fmt.Errorf("must specify --patch or --patch-file containing the contents of the patch") + } + if o.Local && len(o.args) != 0 { + return fmt.Errorf("cannot specify --local and server resources") + } + if o.Local && o.dryRunStrategy == cmdutil.DryRunServer { + return fmt.Errorf("cannot specify --local and --dry-run=server - did you mean --dry-run=client?") + } + if len(o.PatchType) != 0 { + if _, ok := patchTypes[strings.ToLower(o.PatchType)]; !ok { + return fmt.Errorf("--type must be one of %v, not %q", sets.StringKeySet(patchTypes).List(), o.PatchType) + } + } + if len(o.Subresource) > 0 && !slice.ContainsString(supportedSubresources, o.Subresource, nil) { + return fmt.Errorf("invalid subresource value: %q. Must be one of %v", o.Subresource, supportedSubresources) + } + return nil +} + +func (o *PatchOptions) RunPatch() error { + patchType := types.StrategicMergePatchType + if len(o.PatchType) != 0 { + patchType = patchTypes[strings.ToLower(o.PatchType)] + } + + var patchBytes []byte + if len(o.PatchFile) > 0 { + var err error + patchBytes, err = os.ReadFile(o.PatchFile) + if err != nil { + return fmt.Errorf("unable to read patch file: %v", err) + } + } else { + patchBytes = []byte(o.Patch) + } + + patchBytes, err := yaml.ToJSON(patchBytes) + if err != nil { + return fmt.Errorf("unable to parse %q: %v", o.Patch, err) + } + + r := o.builder. + Unstructured(). + ContinueOnError(). + LocalParam(o.Local). + NamespaceParam(o.namespace).DefaultNamespace(). + FilenameParam(o.enforceNamespace, &o.FilenameOptions). + Subresource(o.Subresource). + ResourceTypeOrNameArgs(false, o.args...). + Flatten(). + Do() + err = r.Err() + if err != nil { + return err + } + + count := 0 + err = r.Visit(func(info *resource.Info, err error) error { + if err != nil { + return err + } + count++ + name, namespace := info.Name, info.Namespace + + if !o.Local && o.dryRunStrategy != cmdutil.DryRunClient { + mapping := info.ResourceMapping() + client, err := o.unstructuredClientForMapping(mapping) + if err != nil { + return err + } + + helper := resource. + NewHelper(client, mapping). + DryRun(o.dryRunStrategy == cmdutil.DryRunServer). + WithFieldManager(o.fieldManager). + WithSubresource(o.Subresource) + patchedObj, err := helper.Patch(namespace, name, patchType, patchBytes, nil) + if err != nil { + if apierrors.IsUnsupportedMediaType(err) { + return errors.Wrap(err, fmt.Sprintf("%s is not supported by %s", patchType, mapping.GroupVersionKind)) + } + return err + } + + didPatch := !reflect.DeepEqual(info.Object, patchedObj) + + // if the recorder makes a change, compute and create another patch + if mergePatch, err := o.Recorder.MakeRecordMergePatch(patchedObj); err != nil { + klog.V(4).Infof("error recording current command: %v", err) + } else if len(mergePatch) > 0 { + if recordedObj, err := helper.Patch(namespace, name, types.MergePatchType, mergePatch, nil); err != nil { + klog.V(4).Infof("error recording reason: %v", err) + } else { + patchedObj = recordedObj + } + } + + printer, err := o.ToPrinter(patchOperation(didPatch)) + if err != nil { + return err + } + return printer.PrintObj(patchedObj, o.Out) + } + + originalObjJS, err := runtime.Encode(unstructured.UnstructuredJSONScheme, info.Object) + if err != nil { + return err + } + + originalPatchedObjJS, err := getPatchedJSON(patchType, originalObjJS, patchBytes, info.Object.GetObjectKind().GroupVersionKind(), scheme.Scheme) + if err != nil { + return err + } + + targetObj, err := runtime.Decode(unstructured.UnstructuredJSONScheme, originalPatchedObjJS) + if err != nil { + return err + } + + didPatch := !reflect.DeepEqual(info.Object, targetObj) + printer, err := o.ToPrinter(patchOperation(didPatch)) + if err != nil { + return err + } + return printer.PrintObj(targetObj, o.Out) + }) + if err != nil { + return err + } + if count == 0 { + return fmt.Errorf("no objects passed to patch") + } + return nil +} + +func getPatchedJSON(patchType types.PatchType, originalJS, patchJS []byte, gvk schema.GroupVersionKind, creater runtime.ObjectCreater) ([]byte, error) { + switch patchType { + case types.JSONPatchType: + patchObj, err := jsonpatch.DecodePatch(patchJS) + if err != nil { + return nil, err + } + bytes, err := patchObj.Apply(originalJS) + // TODO: This is pretty hacky, we need a better structured error from the json-patch + if err != nil && strings.Contains(err.Error(), "doc is missing key") { + msg := err.Error() + ix := strings.Index(msg, "key:") + key := msg[ix+5:] + return bytes, fmt.Errorf("Object to be patched is missing field (%s)", key) + } + return bytes, err + + case types.MergePatchType: + return jsonpatch.MergePatch(originalJS, patchJS) + + case types.StrategicMergePatchType: + // get a typed object for this GVK if we need to apply a strategic merge patch + obj, err := creater.New(gvk) + if err != nil { + return nil, fmt.Errorf("strategic merge patch is not supported for %s locally, try --type merge", gvk.String()) + } + return strategicpatch.StrategicMergePatch(originalJS, patchJS, obj) + + default: + // only here as a safety net - go-restful filters content-type + return nil, fmt.Errorf("unknown Content-Type header for patch: %v", patchType) + } +} + +func patchOperation(didPatch bool) string { + if didPatch { + return "patched" + } + return "patched (no change)" +} diff --git a/vendor/k8s.io/kubectl/pkg/explain/OWNERS b/vendor/k8s.io/kubectl/pkg/explain/OWNERS new file mode 100644 index 000000000000..cb873612b13e --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/explain/OWNERS @@ -0,0 +1,6 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - apelisse +reviewers: + - apelisse diff --git a/vendor/k8s.io/kubectl/pkg/explain/explain.go b/vendor/k8s.io/kubectl/pkg/explain/explain.go new file mode 100644 index 000000000000..ccc7b3a9abc7 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/explain/explain.go @@ -0,0 +1,168 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package explain + +import ( + "fmt" + "io" + "strings" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/util/jsonpath" + "k8s.io/kube-openapi/pkg/util/proto" +) + +type fieldsPrinter interface { + PrintFields(proto.Schema) error +} + +// jsonPathParse gets back the inner list of nodes we want to work with +func jsonPathParse(in string) ([]jsonpath.Node, error) { + // Remove trailing period just in case + in = strings.TrimSuffix(in, ".") + + // Define initial jsonpath Parser + jpp, err := jsonpath.Parse("user", "{."+in+"}") + if err != nil { + return nil, err + } + + // Because of the way the jsonpath library works, the schema of the parser is [][]NodeList + // meaning we need to get the outer node list, make sure it's only length 1, then get the inner node + // list, and only then can we look at the individual nodes themselves. + outerNodeList := jpp.Root.Nodes + if len(outerNodeList) != 1 { + return nil, fmt.Errorf("must pass in 1 jsonpath string") + } + + // The root node is always a list node so this type assertion is safe + return outerNodeList[0].(*jsonpath.ListNode).Nodes, nil +} + +// SplitAndParseResourceRequest separates the users input into a model and fields +func SplitAndParseResourceRequest(inResource string, mapper meta.RESTMapper) (schema.GroupVersionResource, []string, error) { + inResourceNodeList, err := jsonPathParse(inResource) + if err != nil { + return schema.GroupVersionResource{}, nil, err + } + + if inResourceNodeList[0].Type() != jsonpath.NodeField { + return schema.GroupVersionResource{}, nil, fmt.Errorf("invalid jsonpath syntax, first node must be field node") + } + resource := inResourceNodeList[0].(*jsonpath.FieldNode).Value + gvr, err := mapper.ResourceFor(schema.GroupVersionResource{Resource: resource}) + if err != nil { + return schema.GroupVersionResource{}, nil, err + } + + var fieldsPath []string + for _, node := range inResourceNodeList[1:] { + if node.Type() != jsonpath.NodeField { + return schema.GroupVersionResource{}, nil, fmt.Errorf("invalid jsonpath syntax, all nodes must be field nodes") + } + fieldsPath = append(fieldsPath, node.(*jsonpath.FieldNode).Value) + } + + return gvr, fieldsPath, nil +} + +// SplitAndParseResourceRequestWithMatchingPrefix separates the users input into a model and fields +// while selecting gvr whose (resource, group) prefix matches the resource +func SplitAndParseResourceRequestWithMatchingPrefix(inResource string, mapper meta.RESTMapper) (gvr schema.GroupVersionResource, fieldsPath []string, err error) { + inResourceNodeList, err := jsonPathParse(inResource) + if err != nil { + return schema.GroupVersionResource{}, nil, err + } + + // Get resource from first node of jsonpath + if inResourceNodeList[0].Type() != jsonpath.NodeField { + return schema.GroupVersionResource{}, nil, fmt.Errorf("invalid jsonpath syntax, first node must be field node") + } + resource := inResourceNodeList[0].(*jsonpath.FieldNode).Value + + gvrs, err := mapper.ResourcesFor(schema.GroupVersionResource{Resource: resource}) + if err != nil { + return schema.GroupVersionResource{}, nil, err + } + + for _, gvrItem := range gvrs { + // Find first gvr whose gr prefixes requested resource + groupResource := gvrItem.GroupResource().String() + if strings.HasPrefix(inResource, groupResource) { + resourceSuffix := inResource[len(groupResource):] + var fieldsPath []string + if len(resourceSuffix) > 0 { + // Define another jsonpath Parser for the resource suffix + resourceSuffixNodeList, err := jsonPathParse(resourceSuffix) + if err != nil { + return schema.GroupVersionResource{}, nil, err + } + + if len(resourceSuffixNodeList) > 0 { + nodeList := resourceSuffixNodeList[1:] + for _, node := range nodeList { + if node.Type() != jsonpath.NodeField { + return schema.GroupVersionResource{}, nil, fmt.Errorf("invalid jsonpath syntax, first node must be field node") + } + fieldsPath = append(fieldsPath, node.(*jsonpath.FieldNode).Value) + } + } + } + return gvrItem, fieldsPath, nil + } + } + + // If no match, take the first (the highest priority) gvr + fieldsPath = []string{} + if len(gvrs) > 0 { + gvr = gvrs[0] + + fieldsPathNodeList, err := jsonPathParse(inResource) + if err != nil { + return schema.GroupVersionResource{}, nil, err + } + + for _, node := range fieldsPathNodeList[1:] { + if node.Type() != jsonpath.NodeField { + return schema.GroupVersionResource{}, nil, fmt.Errorf("invalid jsonpath syntax, first node must be field node") + } + fieldsPath = append(fieldsPath, node.(*jsonpath.FieldNode).Value) + } + } + + return gvr, fieldsPath, nil +} + +// PrintModelDescription prints the description of a specific model or dot path. +// If recursive, all components nested within the fields of the schema will be +// printed. +func PrintModelDescription(fieldsPath []string, w io.Writer, schema proto.Schema, gvk schema.GroupVersionKind, recursive bool) error { + fieldName := "" + if len(fieldsPath) != 0 { + fieldName = fieldsPath[len(fieldsPath)-1] + } + + // Go down the fieldsPath to find what we're trying to explain + schema, err := LookupSchemaForField(schema, fieldsPath) + if err != nil { + return err + } + b := fieldsPrinterBuilder{Recursive: recursive} + f := &Formatter{Writer: w, Wrap: 80} + return PrintModel(fieldName, f, b, schema, gvk) +} diff --git a/vendor/k8s.io/kubectl/pkg/explain/field_lookup.go b/vendor/k8s.io/kubectl/pkg/explain/field_lookup.go new file mode 100644 index 000000000000..9f96c507269d --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/explain/field_lookup.go @@ -0,0 +1,111 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package explain + +import ( + "fmt" + + "k8s.io/kube-openapi/pkg/util/proto" +) + +// fieldLookup walks through a schema by following a path, and returns +// the final schema. +type fieldLookup struct { + // Path to walk + Path []string + + // Return information: Schema found, or error. + Schema proto.Schema + Error error +} + +// SaveLeafSchema is used to detect if we are done walking the path, and +// saves the schema as a match. +func (f *fieldLookup) SaveLeafSchema(schema proto.Schema) bool { + if len(f.Path) != 0 { + return false + } + + f.Schema = schema + + return true +} + +// VisitArray is mostly a passthrough. +func (f *fieldLookup) VisitArray(a *proto.Array) { + if f.SaveLeafSchema(a) { + return + } + + // Passthrough arrays. + a.SubType.Accept(f) +} + +// VisitMap is mostly a passthrough. +func (f *fieldLookup) VisitMap(m *proto.Map) { + if f.SaveLeafSchema(m) { + return + } + + // Passthrough maps. + m.SubType.Accept(f) +} + +// VisitPrimitive stops the operation and returns itself as the found +// schema, even if it had more path to walk. +func (f *fieldLookup) VisitPrimitive(p *proto.Primitive) { + // Even if Path is not empty (we're not expecting a leaf), + // return that primitive. + f.Schema = p +} + +// VisitKind unstacks fields as it finds them. +func (f *fieldLookup) VisitKind(k *proto.Kind) { + if f.SaveLeafSchema(k) { + return + } + + subSchema, ok := k.Fields[f.Path[0]] + if !ok { + f.Error = fmt.Errorf("field %q does not exist", f.Path[0]) + return + } + + f.Path = f.Path[1:] + subSchema.Accept(f) +} + +func (f *fieldLookup) VisitArbitrary(a *proto.Arbitrary) { + f.Schema = a +} + +// VisitReference is mostly a passthrough. +func (f *fieldLookup) VisitReference(r proto.Reference) { + if f.SaveLeafSchema(r) { + return + } + + // Passthrough references. + r.SubSchema().Accept(f) +} + +// LookupSchemaForField looks for the schema of a given path in a base schema. +func LookupSchemaForField(schema proto.Schema, path []string) (proto.Schema, error) { + f := &fieldLookup{Path: path} + schema.Accept(f) + return f.Schema, f.Error +} diff --git a/vendor/k8s.io/kubectl/pkg/explain/fields_printer.go b/vendor/k8s.io/kubectl/pkg/explain/fields_printer.go new file mode 100644 index 000000000000..bb25241a4527 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/explain/fields_printer.go @@ -0,0 +1,82 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package explain + +import "k8s.io/kube-openapi/pkg/util/proto" + +// indentDesc is the level of indentation for descriptions. +const indentDesc = 2 + +// regularFieldsPrinter prints fields with their type and description. +type regularFieldsPrinter struct { + Writer *Formatter + Error error +} + +var _ proto.SchemaVisitor = ®ularFieldsPrinter{} +var _ fieldsPrinter = ®ularFieldsPrinter{} + +// VisitArray prints a Array type. It is just a passthrough. +func (f *regularFieldsPrinter) VisitArray(a *proto.Array) { + a.SubType.Accept(f) +} + +// VisitKind prints a Kind type. It prints each key in the kind, with +// the type, the required flag, and the description. +func (f *regularFieldsPrinter) VisitKind(k *proto.Kind) { + for _, key := range k.Keys() { + v := k.Fields[key] + required := "" + if k.IsRequired(key) { + required = " -required-" + } + + if err := f.Writer.Write("%s\t<%s>%s", key, GetTypeName(v), required); err != nil { + f.Error = err + return + } + if err := f.Writer.Indent(indentDesc).WriteWrapped("%s", v.GetDescription()); err != nil { + f.Error = err + return + } + if err := f.Writer.Write(""); err != nil { + f.Error = err + return + } + } +} + +// VisitMap prints a Map type. It is just a passthrough. +func (f *regularFieldsPrinter) VisitMap(m *proto.Map) { + m.SubType.Accept(f) +} + +// VisitPrimitive prints a Primitive type. It stops the recursion. +func (f *regularFieldsPrinter) VisitPrimitive(p *proto.Primitive) { + // Nothing to do. Shouldn't really happen. +} + +// VisitReference prints a Reference type. It is just a passthrough. +func (f *regularFieldsPrinter) VisitReference(r proto.Reference) { + r.SubSchema().Accept(f) +} + +// PrintFields will write the types from schema. +func (f *regularFieldsPrinter) PrintFields(schema proto.Schema) error { + schema.Accept(f) + return f.Error +} diff --git a/vendor/k8s.io/kubectl/pkg/explain/fields_printer_builder.go b/vendor/k8s.io/kubectl/pkg/explain/fields_printer_builder.go new file mode 100644 index 000000000000..8a2fc045eba7 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/explain/fields_printer_builder.go @@ -0,0 +1,36 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package explain + +// fieldsPrinterBuilder builds either a regularFieldsPrinter or a +// recursiveFieldsPrinter based on the argument. +type fieldsPrinterBuilder struct { + Recursive bool +} + +// BuildFieldsPrinter builds the appropriate fieldsPrinter. +func (f fieldsPrinterBuilder) BuildFieldsPrinter(writer *Formatter) fieldsPrinter { + if f.Recursive { + return &recursiveFieldsPrinter{ + Writer: writer, + } + } + + return ®ularFieldsPrinter{ + Writer: writer, + } +} diff --git a/vendor/k8s.io/kubectl/pkg/explain/formatter.go b/vendor/k8s.io/kubectl/pkg/explain/formatter.go new file mode 100644 index 000000000000..9f94f1206f4c --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/explain/formatter.go @@ -0,0 +1,178 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package explain + +import ( + "fmt" + "io" + "regexp" + "strings" +) + +// Formatter helps you write with indentation, and can wrap text as needed. +type Formatter struct { + IndentLevel int + Wrap int + Writer io.Writer +} + +// Indent creates a new Formatter that will indent the code by that much more. +func (f Formatter) Indent(indent int) *Formatter { + f.IndentLevel = f.IndentLevel + indent + return &f +} + +// Write writes a string with the indentation set for the +// Formatter. This is not wrapping text. +func (f *Formatter) Write(str string, a ...interface{}) error { + // Don't indent empty lines + if str == "" { + _, err := io.WriteString(f.Writer, "\n") + return err + } + + indent := "" + for i := 0; i < f.IndentLevel; i++ { + indent = indent + " " + } + + if len(a) > 0 { + str = fmt.Sprintf(str, a...) + } + _, err := io.WriteString(f.Writer, indent+str+"\n") + return err +} + +// WriteWrapped writes a string with the indentation set for the +// Formatter, and wraps as needed. +func (f *Formatter) WriteWrapped(str string, a ...interface{}) error { + if f.Wrap == 0 { + return f.Write(str, a...) + } + text := fmt.Sprintf(str, a...) + strs := wrapString(text, f.Wrap-f.IndentLevel) + for _, substr := range strs { + if err := f.Write(substr); err != nil { + return err + } + } + return nil +} + +type line struct { + wrap int + words []string +} + +func (l *line) String() string { + return strings.Join(l.words, " ") +} + +func (l *line) Empty() bool { + return len(l.words) == 0 +} + +func (l *line) Len() int { + return len(l.String()) +} + +// Add adds the word to the line, returns true if we could, false if we +// didn't have enough room. It's always possible to add to an empty line. +func (l *line) Add(word string) bool { + newLine := line{ + wrap: l.wrap, + words: append(l.words, word), + } + if newLine.Len() <= l.wrap || len(l.words) == 0 { + l.words = newLine.words + return true + } + return false +} + +var bullet = regexp.MustCompile(`^(\d+\.?|-|\*)\s`) + +func shouldStartNewLine(lastWord, str string) bool { + // preserve line breaks ending in : + if strings.HasSuffix(lastWord, ":") { + return true + } + + // preserve code blocks + if strings.HasPrefix(str, " ") { + return true + } + str = strings.TrimSpace(str) + // preserve empty lines + if len(str) == 0 { + return true + } + // preserve lines that look like they're starting lists + if bullet.MatchString(str) { + return true + } + // otherwise combine + return false +} + +func wrapString(str string, wrap int) []string { + wrapped := []string{} + l := line{wrap: wrap} + // track the last word added to the current line + lastWord := "" + flush := func() { + if !l.Empty() { + lastWord = "" + wrapped = append(wrapped, l.String()) + l = line{wrap: wrap} + } + } + + // iterate over the lines in the original description + for _, str := range strings.Split(str, "\n") { + // preserve code blocks and blockquotes as-is + if strings.HasPrefix(str, " ") { + flush() + wrapped = append(wrapped, str) + continue + } + + // preserve empty lines after the first line, since they can separate logical sections + if len(wrapped) > 0 && len(strings.TrimSpace(str)) == 0 { + flush() + wrapped = append(wrapped, "") + continue + } + + // flush if we should start a new line + if shouldStartNewLine(lastWord, str) { + flush() + } + words := strings.Fields(str) + for _, word := range words { + lastWord = word + if !l.Add(word) { + flush() + if !l.Add(word) { + panic("Couldn't add to empty line.") + } + } + } + } + flush() + return wrapped +} diff --git a/vendor/k8s.io/kubectl/pkg/explain/model_printer.go b/vendor/k8s.io/kubectl/pkg/explain/model_printer.go new file mode 100644 index 000000000000..8bd145dd20a6 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/explain/model_printer.go @@ -0,0 +1,163 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package explain + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/kube-openapi/pkg/util/proto" +) + +const ( + // fieldIndentLevel is the level of indentation for fields. + fieldIndentLevel = 3 + // descriptionIndentLevel is the level of indentation for the + // description. + descriptionIndentLevel = 5 +) + +// modelPrinter prints a schema in Writer. Its "Builder" will decide if +// it's recursive or not. +type modelPrinter struct { + Name string + Type string + Descriptions []string + Writer *Formatter + Builder fieldsPrinterBuilder + GVK schema.GroupVersionKind + Error error +} + +var _ proto.SchemaVisitor = &modelPrinter{} + +func (m *modelPrinter) PrintKindAndVersion() error { + if err := m.Writer.Write("KIND: %s", m.GVK.Kind); err != nil { + return err + } + return m.Writer.Write("VERSION: %s\n", m.GVK.GroupVersion()) +} + +// PrintDescription prints the description for a given schema. There +// might be multiple description, since we collect descriptions when we +// go through references, arrays and maps. +func (m *modelPrinter) PrintDescription(schema proto.Schema) error { + if err := m.Writer.Write("DESCRIPTION:"); err != nil { + return err + } + empty := true + for i, desc := range append(m.Descriptions, schema.GetDescription()) { + if desc == "" { + continue + } + empty = false + if i != 0 { + if err := m.Writer.Write(""); err != nil { + return err + } + } + if err := m.Writer.Indent(descriptionIndentLevel).WriteWrapped("%s", desc); err != nil { + return err + } + } + if empty { + return m.Writer.Indent(descriptionIndentLevel).WriteWrapped("") + } + return nil +} + +// VisitArray recurses inside the subtype, while collecting the type if +// not done yet, and the description. +func (m *modelPrinter) VisitArray(a *proto.Array) { + m.Descriptions = append(m.Descriptions, a.GetDescription()) + if m.Type == "" { + m.Type = GetTypeName(a) + } + a.SubType.Accept(m) +} + +// VisitKind prints a full resource with its fields. +func (m *modelPrinter) VisitKind(k *proto.Kind) { + if err := m.PrintKindAndVersion(); err != nil { + m.Error = err + return + } + + if m.Type == "" { + m.Type = GetTypeName(k) + } + if m.Name != "" { + m.Writer.Write("RESOURCE: %s <%s>\n", m.Name, m.Type) + } + + if err := m.PrintDescription(k); err != nil { + m.Error = err + return + } + if err := m.Writer.Write("\nFIELDS:"); err != nil { + m.Error = err + return + } + m.Error = m.Builder.BuildFieldsPrinter(m.Writer.Indent(fieldIndentLevel)).PrintFields(k) +} + +// VisitMap recurses inside the subtype, while collecting the type if +// not done yet, and the description. +func (m *modelPrinter) VisitMap(om *proto.Map) { + m.Descriptions = append(m.Descriptions, om.GetDescription()) + if m.Type == "" { + m.Type = GetTypeName(om) + } + om.SubType.Accept(m) +} + +// VisitPrimitive prints a field type and its description. +func (m *modelPrinter) VisitPrimitive(p *proto.Primitive) { + if err := m.PrintKindAndVersion(); err != nil { + m.Error = err + return + } + + if m.Type == "" { + m.Type = GetTypeName(p) + } + if err := m.Writer.Write("FIELD: %s <%s>\n", m.Name, m.Type); err != nil { + m.Error = err + return + } + m.Error = m.PrintDescription(p) +} + +func (m *modelPrinter) VisitArbitrary(a *proto.Arbitrary) { + if err := m.PrintKindAndVersion(); err != nil { + m.Error = err + return + } + + m.Error = m.PrintDescription(a) +} + +// VisitReference recurses inside the subtype, while collecting the description. +func (m *modelPrinter) VisitReference(r proto.Reference) { + m.Descriptions = append(m.Descriptions, r.GetDescription()) + r.SubSchema().Accept(m) +} + +// PrintModel prints the description of a schema in writer. +func PrintModel(name string, writer *Formatter, builder fieldsPrinterBuilder, schema proto.Schema, gvk schema.GroupVersionKind) error { + m := &modelPrinter{Name: name, Writer: writer, Builder: builder, GVK: gvk} + schema.Accept(m) + return m.Error +} diff --git a/vendor/k8s.io/kubectl/pkg/explain/recursive_fields_printer.go b/vendor/k8s.io/kubectl/pkg/explain/recursive_fields_printer.go new file mode 100644 index 000000000000..6429a73264eb --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/explain/recursive_fields_printer.go @@ -0,0 +1,81 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package explain + +import "k8s.io/kube-openapi/pkg/util/proto" + +// indentPerLevel is the level of indentation for each field recursion. +const indentPerLevel = 3 + +// recursiveFieldsPrinter recursively prints all the fields for a given +// schema. +type recursiveFieldsPrinter struct { + Writer *Formatter + Error error +} + +var _ proto.SchemaVisitor = &recursiveFieldsPrinter{} +var _ fieldsPrinter = &recursiveFieldsPrinter{} +var visitedReferences = map[string]struct{}{} + +// VisitArray is just a passthrough. +func (f *recursiveFieldsPrinter) VisitArray(a *proto.Array) { + a.SubType.Accept(f) +} + +// VisitKind prints all its fields with their type, and then recurses +// inside each of these (pre-order). +func (f *recursiveFieldsPrinter) VisitKind(k *proto.Kind) { + for _, key := range k.Keys() { + v := k.Fields[key] + f.Writer.Write("%s\t<%s>", key, GetTypeName(v)) + subFields := &recursiveFieldsPrinter{ + Writer: f.Writer.Indent(indentPerLevel), + } + if err := subFields.PrintFields(v); err != nil { + f.Error = err + return + } + } +} + +// VisitMap is just a passthrough. +func (f *recursiveFieldsPrinter) VisitMap(m *proto.Map) { + m.SubType.Accept(f) +} + +// VisitPrimitive does nothing, since it doesn't have sub-fields. +func (f *recursiveFieldsPrinter) VisitPrimitive(p *proto.Primitive) { + // Nothing to do. +} + +// VisitReference is just a passthrough. +func (f *recursiveFieldsPrinter) VisitReference(r proto.Reference) { + if _, ok := visitedReferences[r.Reference()]; ok { + return + } + visitedReferences[r.Reference()] = struct{}{} + r.SubSchema().Accept(f) + delete(visitedReferences, r.Reference()) +} + +// PrintFields will recursively print all the fields for the given +// schema. +func (f *recursiveFieldsPrinter) PrintFields(schema proto.Schema) error { + schema.Accept(f) + return f.Error +} diff --git a/vendor/k8s.io/kubectl/pkg/explain/test-recursive-swagger.json b/vendor/k8s.io/kubectl/pkg/explain/test-recursive-swagger.json new file mode 100644 index 000000000000..1ae79855d393 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/explain/test-recursive-swagger.json @@ -0,0 +1,63 @@ +{ + "swagger": "2.0", + "info": { + "title": "Kubernetes", + "version": "v1.9.0" + }, + "paths": {}, + "definitions": { + "OneKind": { + "description": "OneKind has a short description", + "required": [ + "field1" + ], + "properties": { + "field1": { + "description": "This is first reference field", + "$ref": "#/definitions/ReferenceKind" + }, + "field2": { + "description": "This is other kind field with string and reference", + "$ref": "#/definitions/OtherKind" + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "OneKind", + "version": "v2" + } + ] + }, + "ReferenceKind": { + "description": "This is reference Kind", + "properties": { + "referencefield": { + "description": "This is reference to itself.", + "$ref": "#/definitions/ReferenceKind" + }, + "referencesarray": { + "description": "This is an array of references", + "type": "array", + "items": { + "description": "This is reference object", + "$ref": "#/definitions/ReferenceKind" + } + } + } + }, + "OtherKind": { + "description": "This is other kind with string and reference fields", + "properties": { + "string": { + "description": "This string must be a string", + "type": "string" + }, + "reference": { + "description": "This is reference field.", + "$ref": "#/definitions/ReferenceKind" + } + } + } + } +} diff --git a/vendor/k8s.io/kubectl/pkg/explain/test-swagger.json b/vendor/k8s.io/kubectl/pkg/explain/test-swagger.json new file mode 100644 index 000000000000..ba5fa1ff71c0 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/explain/test-swagger.json @@ -0,0 +1,102 @@ +{ + "swagger": "2.0", + "info": { + "title": "Kubernetes", + "version": "v1.9.0" + }, + "paths": {}, + "definitions": { + "PrimitiveDef": { + "type": "string" + }, + "OneKind": { + "description": "OneKind has a short description", + "required": [ + "field1" + ], + "properties": { + "field1": { + "description": "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nulla ut lacus ac enim vulputate imperdiet ac accumsan risus. Integer vel accumsan lectus. Praesent tempus nulla id tortor luctus, quis varius nulla laoreet. Ut orci nisi, suscipit id velit sed, blandit eleifend turpis. Curabitur tempus ante at lectus viverra, a mattis augue euismod. Morbi quam ligula, porttitor sit amet lacus non, interdum pulvinar tortor. Praesent accumsan risus et ipsum dictum, vel ullamcorper lorem egestas.", + "$ref": "#/definitions/OtherKind" + }, + "field2": { + "description": "This is an array of object of PrimitiveDef", + "type": "array", + "items": { + "description": "This is an object of PrimitiveDef", + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/PrimitiveDef" + } + } + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "OneKind", + "version": "v1" + } + ] + }, + "ControlCharacterKind": { + "description": "Control character %", + "properties": { + "field1": { + "description": "Control character %", + } + }, + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "ControlCharacterKind", + "version": "v1" + } + ] + }, + "OtherKind": { + "description": "This is another kind of Kind", + "required": [ + "string" + ], + "properties": { + "string": { + "description": "This string must be a string", + "type": "string" + }, + "int": { + "description": "This int must be an int", + "type": "integer" + }, + "array": { + "description": "This array must be an array of int", + "type": "array", + "items": { + "description": "This is an int in an array", + "type": "integer" + } + }, + "object": { + "description": "This is an object of string", + "type": "object", + "additionalProperties": { + "description": "this is a string in an object", + "type": "string" + } + }, + "primitive": { + "$ref": "#/definitions/PrimitiveDef" + } + } + }, + "CrdKind": { + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "CrdKind", + "version": "v1" + } + ] + } + } +} diff --git a/vendor/k8s.io/kubectl/pkg/explain/typename.go b/vendor/k8s.io/kubectl/pkg/explain/typename.go new file mode 100644 index 000000000000..b9d71b69a5ca --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/explain/typename.go @@ -0,0 +1,66 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package explain + +import ( + "fmt" + + "k8s.io/kube-openapi/pkg/util/proto" +) + +// typeName finds the name of a schema +type typeName struct { + Name string +} + +var _ proto.SchemaVisitor = &typeName{} + +// VisitArray adds the [] prefix and recurses. +func (t *typeName) VisitArray(a *proto.Array) { + s := &typeName{} + a.SubType.Accept(s) + t.Name = fmt.Sprintf("[]%s", s.Name) +} + +// VisitKind just returns "Object". +func (t *typeName) VisitKind(k *proto.Kind) { + t.Name = "Object" +} + +// VisitMap adds the map[string] prefix and recurses. +func (t *typeName) VisitMap(m *proto.Map) { + s := &typeName{} + m.SubType.Accept(s) + t.Name = fmt.Sprintf("map[string]%s", s.Name) +} + +// VisitPrimitive returns the name of the primitive. +func (t *typeName) VisitPrimitive(p *proto.Primitive) { + t.Name = p.Type +} + +// VisitReference is just a passthrough. +func (t *typeName) VisitReference(r proto.Reference) { + r.SubSchema().Accept(t) +} + +// GetTypeName returns the type of a schema. +func GetTypeName(schema proto.Schema) string { + t := &typeName{} + schema.Accept(t) + return t.Name +} diff --git a/vendor/k8s.io/kubectl/pkg/explain/v2/explain.go b/vendor/k8s.io/kubectl/pkg/explain/v2/explain.go new file mode 100644 index 000000000000..43db73378da4 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/explain/v2/explain.go @@ -0,0 +1,97 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2 + +import ( + "encoding/json" + "errors" + "fmt" + "io" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/openapi" +) + +// PrintModelDescription prints the description of a specific model or dot path. +// If recursive, all components nested within the fields of the schema will be +// printed. +func PrintModelDescription( + fieldsPath []string, + w io.Writer, + client openapi.Client, + gvr schema.GroupVersionResource, + recursive bool, + outputFormat string, +) error { + generator := NewGenerator() + if err := registerBuiltinTemplates(generator); err != nil { + return fmt.Errorf("error parsing builtin templates. Please file a bug on GitHub: %w", err) + } + + return printModelDescriptionWithGenerator( + generator, fieldsPath, w, client, gvr, recursive, outputFormat) +} + +// Factored out for testability +func printModelDescriptionWithGenerator( + generator Generator, + fieldsPath []string, + w io.Writer, + client openapi.Client, + gvr schema.GroupVersionResource, + recursive bool, + outputFormat string, +) error { + paths, err := client.Paths() + + if err != nil { + return fmt.Errorf("failed to fetch list of groupVersions: %w", err) + } + + var resourcePath string + if len(gvr.Group) == 0 { + resourcePath = fmt.Sprintf("api/%s", gvr.Version) + } else { + resourcePath = fmt.Sprintf("apis/%s/%s", gvr.Group, gvr.Version) + } + + gv, exists := paths[resourcePath] + + if !exists { + return fmt.Errorf("couldn't find resource for \"%v\"", gvr) + } + + openAPISchemaBytes, err := gv.Schema(runtime.ContentTypeJSON) + if err != nil { + return fmt.Errorf("failed to fetch openapi schema for %s: %w", resourcePath, err) + } + + var parsedV3Schema map[string]interface{} + if err := json.Unmarshal(openAPISchemaBytes, &parsedV3Schema); err != nil { + return fmt.Errorf("failed to parse openapi schema for %s: %w", resourcePath, err) + } + + err = generator.Render(outputFormat, parsedV3Schema, gvr, fieldsPath, recursive, w) + + explainErr := explainError("") + if errors.As(err, &explainErr) { + return explainErr + } + + return err +} diff --git a/vendor/k8s.io/kubectl/pkg/explain/v2/funcs.go b/vendor/k8s.io/kubectl/pkg/explain/v2/funcs.go new file mode 100644 index 000000000000..4c5e1c62be57 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/explain/v2/funcs.go @@ -0,0 +1,240 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2 + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "reflect" + "strings" + "text/template" + + "github.com/go-openapi/jsonreference" + "k8s.io/kubectl/pkg/util/term" +) + +type explainError string + +func (e explainError) Error() string { + return string(e) +} + +func WithBuiltinTemplateFuncs(tmpl *template.Template) *template.Template { + return tmpl.Funcs(map[string]interface{}{ + "throw": func(e string, args ...any) (string, error) { + errString := fmt.Sprintf(e, args...) + return "", explainError(errString) + }, + "toJson": func(obj any) (string, error) { + res, err := json.Marshal(obj) + return string(res), err + }, + "toPrettyJson": func(obj any) (string, error) { + res, err := json.MarshalIndent(obj, "", " ") + if err != nil { + return "", err + } + return string(res), err + }, + "fail": func(message string) (string, error) { + return "", errors.New(message) + }, + "wrap": func(l int, s string) (string, error) { + buf := bytes.NewBuffer(nil) + writer := term.NewWordWrapWriter(buf, uint(l)) + _, err := writer.Write([]byte(s)) + if err != nil { + return "", err + } + return buf.String(), nil + }, + "split": func(s string, sep string) []string { + return strings.Split(s, sep) + }, + "join": func(sep string, strs ...string) string { + return strings.Join(strs, sep) + }, + "include": func(name string, data interface{}) (string, error) { + buf := bytes.NewBuffer(nil) + if err := tmpl.ExecuteTemplate(buf, name, data); err != nil { + return "", err + } + return buf.String(), nil + }, + "ternary": func(a, b any, condition bool) any { + if condition { + return a + } + return b + }, + "first": func(list any) (any, error) { + if list == nil { + return nil, errors.New("list is empty") + } + + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, errors.New("list is empty") + } + + return l2.Index(0).Interface(), nil + default: + return nil, fmt.Errorf("first cannot be used on type: %T", list) + } + }, + "last": func(list any) (any, error) { + if list == nil { + return nil, errors.New("list is empty") + } + + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, errors.New("list is empty") + } + + return l2.Index(l - 1).Interface(), nil + default: + return nil, fmt.Errorf("last cannot be used on type: %T", list) + } + }, + "indent": func(amount int, str string) string { + pad := strings.Repeat(" ", amount) + return pad + strings.Replace(str, "\n", "\n"+pad, -1) + }, + "dict": func(keysAndValues ...any) (map[string]any, error) { + if len(keysAndValues)%2 != 0 { + return nil, errors.New("expected even # of arguments") + } + + res := map[string]any{} + for i := 0; i+1 < len(keysAndValues); i = i + 2 { + if key, ok := keysAndValues[i].(string); ok { + res[key] = keysAndValues[i+1] + } else { + return nil, fmt.Errorf("key of type %T is not a string as expected", key) + } + } + + return res, nil + }, + "contains": func(list any, value any) bool { + if list == nil { + return false + } + + val := reflect.ValueOf(list) + switch val.Kind() { + case reflect.Array: + case reflect.Slice: + for i := 0; i < val.Len(); i++ { + cur := val.Index(i) + if cur.CanInterface() && reflect.DeepEqual(cur.Interface(), value) { + return true + } + } + return false + default: + return false + } + return false + }, + "set": func(dict map[string]any, keysAndValues ...any) (any, error) { + if len(keysAndValues)%2 != 0 { + return nil, errors.New("expected even number of arguments") + } + + copyDict := make(map[string]any, len(dict)) + for k, v := range dict { + copyDict[k] = v + } + + for i := 0; i < len(keysAndValues); i += 2 { + key, ok := keysAndValues[i].(string) + if !ok { + return nil, errors.New("keys must be strings") + } + + copyDict[key] = keysAndValues[i+1] + } + + return copyDict, nil + }, + "list": func(values ...any) ([]any, error) { + return values, nil + }, + "add": func(value, operand int) int { + return value + operand + }, + "sub": func(value, operand int) int { + return value - operand + }, + "mul": func(value, operand int) int { + return value * operand + }, + "resolveRef": func(refAny any, document map[string]any) map[string]any { + refString, ok := refAny.(string) + if !ok { + // if passed nil, or wrong type just treat the same + // way as unresolved reference (makes for easier templates) + return nil + } + + // Resolve field path encoded by the ref + ref, err := jsonreference.New(refString) + if err != nil { + // Unrecognized ref format. + return nil + } + + if !ref.HasFragmentOnly { + // Downloading is not supported. Treat as not found + return nil + } + + fragment := ref.GetURL().Fragment + components := strings.Split(fragment, "/") + cur := document + + for _, k := range components { + if len(k) == 0 { + // first component is usually empty (#/components/) , etc + continue + } + + next, ok := cur[k].(map[string]any) + if !ok { + return nil + } + + cur = next + } + return cur + }, + }) +} diff --git a/vendor/k8s.io/kubectl/pkg/explain/v2/generator.go b/vendor/k8s.io/kubectl/pkg/explain/v2/generator.go new file mode 100644 index 000000000000..cfcf2eaabece --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/explain/v2/generator.go @@ -0,0 +1,102 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2 + +import ( + "fmt" + "io" + "text/template" + + "k8s.io/apimachinery/pkg/runtime/schema" +) + +type Generator interface { + AddTemplate(name string, contents string) error + + Render( + // Template to use for rendering + templateName string, + // Self-Contained OpenAPI Document Containing all schemas used by $ref + // Only OpenAPI V3 documents are supported + document map[string]interface{}, + // Resource within OpenAPI document for which to render explain schema + gvr schema.GroupVersionResource, + // Field path of child of resource to focus output onto + fieldSelector []string, + // Boolean indicating whether the fields should be rendered recursively/deeply + recursive bool, + // Output writer + writer io.Writer, + ) error +} + +type TemplateContext struct { + GVR schema.GroupVersionResource + Document map[string]interface{} + Recursive bool + FieldPath []string +} + +type generator struct { + templates map[string]*template.Template +} + +func NewGenerator() Generator { + return &generator{ + templates: make(map[string]*template.Template), + } +} + +func (g *generator) AddTemplate(name string, contents string) error { + compiled, err := WithBuiltinTemplateFuncs(template.New(name)).Parse(contents) + + if err != nil { + return err + } + + g.templates[name] = compiled + return nil +} + +func (g *generator) Render( + // Template to use for rendering + templateName string, + // Self-Contained OpenAPI Document Containing all schemas used by $ref + // Only OpenAPI V3 documents are supported + document map[string]interface{}, + // Resource within OpenAPI document for which to render explain schema + gvr schema.GroupVersionResource, + // Field path of child of resource to focus output onto + fieldSelector []string, + // Boolean indicating whether the fields should be rendered recursively/deeply + recursive bool, + // Output writer + writer io.Writer, +) error { + compiledTemplate, ok := g.templates[templateName] + if !ok { + return fmt.Errorf("unrecognized format: %s", templateName) + } + + err := compiledTemplate.Execute(writer, TemplateContext{ + Document: document, + Recursive: recursive, + FieldPath: fieldSelector, + GVR: gvr, + }) + return err +} diff --git a/vendor/k8s.io/kubectl/pkg/explain/v2/template.go b/vendor/k8s.io/kubectl/pkg/explain/v2/template.go new file mode 100644 index 000000000000..45b1762eb7dc --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/explain/v2/template.go @@ -0,0 +1,50 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2 + +import ( + "embed" + "path/filepath" + "strings" +) + +//go:embed templates/*.tmpl +var rawBuiltinTemplates embed.FS + +func registerBuiltinTemplates(gen Generator) error { + files, err := rawBuiltinTemplates.ReadDir("templates") + if err != nil { + return err + } + + for _, entry := range files { + contents, err := rawBuiltinTemplates.ReadFile("templates/" + entry.Name()) + if err != nil { + return err + } + + err = gen.AddTemplate( + strings.TrimSuffix(entry.Name(), filepath.Ext(entry.Name())), + string(contents)) + + if err != nil { + return err + } + } + + return nil +} diff --git a/vendor/k8s.io/kubectl/pkg/explain/v2/templates/plaintext.tmpl b/vendor/k8s.io/kubectl/pkg/explain/v2/templates/plaintext.tmpl new file mode 100644 index 000000000000..971dfff1f22c --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/explain/v2/templates/plaintext.tmpl @@ -0,0 +1,339 @@ +{{- /* Determine if Path for requested GVR is at /api or /apis based on emptiness of group */ -}} +{{- $prefix := (ternary "/api" (join "" "/apis/" $.GVR.Group) (not $.GVR.Group)) -}} + +{{- /* Search both cluster-scoped and namespaced-scoped paths for the GVR to find its GVK */ -}} +{{- /* Also search for paths with {name} component in case the list path is missing */ -}} +{{- /* Looks for the following paths: */ -}} +{{- /* /apis/// */ -}} +{{- /* /apis////{name} */ -}} +{{- /* /apis///namespaces/{namespace}/ */ -}} +{{- /* /apis///namespaces/{namespace}//{name} */ -}} +{{- /* Also search for get verb paths in case list verb is missing */ -}} +{{- $clusterScopedSearchPath := join "/" $prefix $.GVR.Version $.GVR.Resource -}} +{{- $clusterScopedNameSearchPath := join "/" $prefix $.GVR.Version $.GVR.Resource "{name}" -}} +{{- $namespaceScopedSearchPath := join "/" $prefix $.GVR.Version "namespaces" "{namespace}" $.GVR.Resource -}} +{{- $namespaceScopedNameSearchPath := join "/" $prefix $.GVR.Version "namespaces" "{namespace}" $.GVR.Resource "{name}" -}} +{{- $gvk := "" -}} + +{{- /* Pull GVK from operation */ -}} +{{- range $index, $searchPath := (list $clusterScopedSearchPath $clusterScopedNameSearchPath $namespaceScopedSearchPath $namespaceScopedNameSearchPath) -}} + {{- with $resourcePathElement := index $.Document "paths" $searchPath -}} + {{- range $methodIndex, $method := (list "get" "post" "put" "patch" "delete") -}} + {{- with $resourceMethodPathElement := index $resourcePathElement $method -}} + {{- with $gvk = index $resourceMethodPathElement "x-kubernetes-group-version-kind" -}} + {{- break -}} + {{- end -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{- with $gvk -}} + {{- if $gvk.group -}} + GROUP: {{ $gvk.group }}{{"\n" -}} + {{- end -}} + KIND: {{ $gvk.kind}}{{"\n" -}} + VERSION: {{ $gvk.version }}{{"\n" -}} + {{- "\n" -}} + + {{- with include "schema" (dict "gvk" $gvk "Document" $.Document "FieldPath" $.FieldPath "Recursive" $.Recursive) -}} + {{- . -}} + {{- else -}} + {{- throw "error: GVK %v not found in OpenAPI schema" $gvk -}} + {{- end -}} +{{- else -}} + {{- throw "error: GVR (%v) not found in OpenAPI schema" $.GVR.String -}} +{{- end -}} +{{- "\n" -}} + +{{- /* +Finds a schema with the given GVK and prints its explain output or empty string +if GVK was not found + +Takes dictionary as argument with keys: + gvk: openapiv3 JSON schema + Document: entire doc + FieldPath: field path to follow + Recursive: print recursive +*/ -}} +{{- define "schema" -}} + {{- /* Find definition with this GVK by filtering out the components/schema with the given x-kubernetes-group-version-kind */ -}} + {{- range index $.Document "components" "schemas" -}} + {{- if contains (index . "x-kubernetes-group-version-kind") $.gvk -}} + {{- with include "output" (set $ "schema" .) -}} + {{- . -}} + {{- else -}} + {{- $fieldName := (index $.FieldPath (sub (len $.FieldPath) 1)) -}} + {{- throw "error: field \"%v\" does not exist" $fieldName}} + {{- end -}} + {{- break -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{- /* +Follows FieldPath until the FieldPath is empty. Then prints field name and field +list of resultant schema. If field path is not found. Prints nothing. +Example output: + +FIELD: spec + +DESCRIPTION: +